Crawl—VulnHub


Crawl—VulnHub

一、项目背景

最近想针对 VulnHub 上的部分靶机做一下渗透测试,又不想每次都打开一次网址,于是有了这个项目,但是总的来说,代码写的比较粗糙,没太做优化,在这也就是先记录一下

二、项目结构

  • 爬取站点:www.vulnhub.com
  • 创建文件夹:VulnHub
  • 每个靶机对应保存一个 txt

三、项目代码

#!/usr/bin/python3
# -*- coding: utf-8 -*- 
# --author:valecalida--
from requests import RequestException
from urllib3 import disable_warnings
from urllib3 import exceptions
from bs4 import BeautifulSoup
from random import choice
from threading import Thread
from subprocess import getoutput
from os.path import exists
import requests
import os
import re

disable_warnings(exceptions.InsecureRequestWarning)


def get_user_agent():
    user_agent = [
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
        "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1"
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
        "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
        "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
    ]
    return choice(user_agent)


def get_soup(link):
    """返回soup实体"""
    try:
        res = requests.get(link, headers={"User-Agent": get_user_agent()})
        soup = BeautifulSoup(res.text, 'lxml')
        return soup
    except RequestException:
        print("[-] 链接的过程中好像出现了一点问题,请手动核对一下...")


def vulnhub_title(soup):
    """返回跳转链接跟标题"""
    content = re.findall("<h1><a.*?\"(.*?)\">(.*?)</a></h1>", str(soup.find_all("h1")))
    descs = process_descs(soup.find_all(name="div", attrs={"class": "right"}))
    links, title = separate_link_title(content)
    return links, title, descs


def process_descs(contents):
    descs = []
    for i in range(len(contents)):
        content = re.sub("<.*?>", "", str(contents[i]))
        descs.append(content)
    return descs


def separate_link_title(contents):
    """将传入的参数分割开,分别保存到links跟titles两个列表"""
    links, title = [], []
    for content in contents:
        links.append(content[0])
        title.append(content[1])
    return links, title


def download_links(soup):
    """获取跳转链接的下载地址"""
    html = str(soup.find_all("ul")[6]).encode('utf-8')
    download_site = re.findall(".*?<b>(.*?)</b>: <a href=\"(.*?)\">.*?</a>", str(html))
    return download_site


def show_download_links(links):
    for site in links:
        print("\t\t", site[0], ":", site[1])


def detect_folder():
    if not exists("Vulnhub"):
        os.mkdir('Vulnhub')


def save_infos(title, desc):
    name = "Vulnhub" + "\\" + str(title) + ".txt"

    f = open(name, "w")
    try:
        f.write(desc)
        f.close()
    except UnicodeEncodeError:
        print("写入文件的时候编码好像出现了一点问题")


def handle_title(title):
    title = re.sub(' |\s|\n|\r', "", title)
    title = re.sub(':', "_", title)
    return title


def main():
    detect_folder()
    host = "https://www.vulnhub.com"
    for page in range(1, 45):
        page_link = host + "/?page=" + str(page)
        print("[..] 正在处理%s页面上的信息" % page_link)
        links, titles, descs = vulnhub_title(get_soup(page_link))
        for index in range(len(links)):
            title = handle_title(titles[index])
            print("\tTitle: <<%s>>" % title)
            show_download_links(download_links(get_soup(host + links[index])))
            save_infos(title, descs[index])


if __name__ == '__main__':
    main()

文章作者: valecalida
版权声明: 本博客所有文章除特別声明外,均采用 CC BY 4.0 许可协议。转载请注明来源 valecalida !
评论
  目录