抓取免费代理节点

有时需要多个ip来测试。项目需求。这里记录一下。我把网址去掉了。自行百度。多线程验证可用性。、
使用 代理软件 SocksCap64(客户端) ccproxy(win 服务端)

#-*-coding:utf8-*-
# [email protected]
# 抓取某刺代理网站并校验排序

import urllib2
import re
import threading
import time,os


mutex = threading.Lock()
def mylog(ss):
    mutex.acquire()
    print ss
    mutex.release()

rawProxyList = []
checkedProxyList = []

#抓取代理网站
targets=[]
for i in range(1,6):
    #xicidaili
    target = r"http://www.xxx.com/nn/%d" % i
    targets.append(target)
# print targets

#正则
p = re.compile(r'''<tr class=".+?(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})</td>.+?(\d{2,4})</td>.+?<td>(.{4,5})</td>''',re.DOTALL)

#获取代理的类
class ProxyGet(threading.Thread):
    def __init__(self,target):
        threading.Thread.__init__(self)
        self.target = target

    def getProxy(self):
        mylog("目标网站:"+self.target)

        cookies = urllib2.HTTPCookieProcessor()
        opener=urllib2.build_opener(cookies)
        opener.addheaders =[('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.63 Safari/537.36')]
            
        req = opener.open(self.target)
        result = req.read()
        matchs = p.findall(result)
        for row in matchs:
            ip = row[0]+":"+row[1]
            rawProxyList.append(ip)

    def run(self):
        self.getProxy()

#检验代理类
class ProxyCheck(threading.Thread):
    def __init__(self,proxyList):
        threading.Thread.__init__(self)
        self.proxyList = proxyList
        self.timeout=5
        self.testUrl = "http://www.baidu.com/"
        self.testStr = "030173"

    def checkProxy(self):
        cookies = urllib2.HTTPCookieProcessor()
        for proxy in self.proxyList:
            proxyHandler = urllib2.ProxyHandler({"http" : r'http://%s' %(proxy)})
            opener=urllib2.build_opener(cookies,proxyHandler)
            opener.addheaders =[('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.63 Safari/537.36')]
            t1 = time.time()
            try:
                req = opener.open(self.testUrl,timeout=self.timeout)
                result=req.read()
                timeused = time.time()-t1
                pos = result.find(self.testStr)

                if pos > 1:
                    checkedProxyList.append((proxy,timeused))
                else:
                    continue
            except Exception,e:
                continue

    def run(self):
        self.checkProxy()

if __name__ == "__main__":

    if os.path.isfile("proxy_list.txt"):
        f = open("proxy_list.txt",'r')
        for line in f:
            ll = line.split("\t")
            ll[0] = ll[0].replace('\n',"")
            if len(ll[0]) != 0:
                #print ll[0]
                rawProxyList.append(ll[0])
            else:
                line
        print u"读取代理配置",len(rawProxyList)
        

    getThreads=[]
    checkThreads=[]

    #对每个目标网站开启一个线程负责抓取代理
    for i in range(len(targets)):
        t = ProxyGet(targets[i])
        getThreads.append(t)

    for i in range(len(getThreads)):
        getThreads[i].start()
        time.sleep(1)

    for i in range(len(getThreads)):
        getThreads[i].join()

    #去重
    #print rawProxyList
    rawProxyList = list(set(rawProxyList))
    
    mylog('.'*10+"总共抓取了%s个代理" %len(rawProxyList) +'.'*10)

    #开启50个线程负责校验,将抓取到的代理分成50份,每个线程校验一份

    for i in range(50):
        t = ProxyCheck(rawProxyList[((len(rawProxyList)+49)/50) * i:((len(rawProxyList)+49)/50) * (i+1)])
        checkThreads.append(t)

    for i in range(len(checkThreads)):
        checkThreads[i].start()

    for i in range(len(checkThreads)):
        checkThreads[i].join()

    mylog('.'*10+"总共有%s个代理通过校验" %len(checkedProxyList) +'.'*10)

    #持久化
    f= open("proxy_list.txt",'w+')
    for proxy in sorted(checkedProxyList,cmp=lambda x,y:cmp(x[1],y[1])):
        print "checked proxy: %s\t%s" %(proxy[0],proxy[1])
        f.write("%s\n"%(proxy[0]))
    f.close()


猜你喜欢

转载自blog.csdn.net/weixin_33928467/article/details/87109694