python使用代理IP访问网络爬取数据

示例1:Python 3.X HTTP代理调用·爬虫(动态)代理IP

'''
Python 3.x
描述:本DEMO演示了使用爬虫(动态)代理IP请求网页的过程,代码使用了多线程
逻辑:每隔5秒从API接口获取IP,对于每一个IP开启一个线程去抓取网页源码
'''
import requests
import time
import threading
from requests.packages import urllib3

ips = []

# 爬数据的线程类
class CrawlThread(threading.Thread):
    def __init__(self,proxyip):
        super(CrawlThread, self).__init__()
        self.proxyip=proxyip
    def run(self):
        # 开始计时
        start = time.time()
        #消除关闭证书验证的警告
        urllib3.disable_warnings()
        #使用代理IP请求网址,注意第三个参数verify=False意思是跳过SSL验证(可以防止报SSL错误)
        html=requests.get(url=targetUrl, proxies={"http" : 'http://' + self.proxyip, "https" : 'https://' + self.proxyip}, verify=False, timeout=15).content.decode()
        # 结束计时
        end = time.time()
        # 输出内容
        print(threading.current_thread().getName() +  "使用代理IP, 耗时 " + str(end - start) + "毫秒 " + self.proxyip + " 获取到如下HTML内容:\n" + html + "\n*************")

# 获取代理IP的线程类
class GetIpThread(threading.Thread):
    def __init__(self,fetchSecond):
        super(GetIpThread, self).__init__()
        self.fetchSecond=fetchSecond
    def run(self):
        global ips
        while True:
            # 获取IP列表
            res = requests.get(apiUrl).content.decode()
            # 按照\n分割获取到的IP
            ips = res.split('\n')
            # 利用每一个IP
            for proxyip in ips:
                if proxyip.strip():
                    # 开启一个线程
                    CrawlThread(proxyip).start()
            # 休眠
            time.sleep(self.fetchSecond)

if __name__ == '__main__':
    # 获取IP的API接口
    apiUrl = "http:xxxx"
    # 要抓取的目标网站地址
    targetUrl = "http://ip.chinaz.com/getip.aspx"
    # 获取IP时间间隔,建议为5秒
    fetchSecond = 5
    # 开始自动获取IP
    GetIpThread(fetchSecond).start()

示例2:Python 2.X 使用HTTP代理·爬虫(动态)代理IP

# -*- coding: GBK -*-
'''
Python 2.X
描述:本DEMO演示了使用爬虫(动态)代理IP请求网页的过程,代码使用了多线程
逻辑:每隔5秒从API接口获取IP,对于每一个IP开启一个线程去抓取网页源码
'''
import urllib;
import urllib2;
import time;
import threading;
import ssl;

ips = [];

# 爬数据的线程类
class CrawlThread(threading.Thread):
    def __init__(self,proxyip):
        super(CrawlThread, self).__init__();
        self.proxyip=proxyip;
    def run(self):
        # 开始计时
        start = time.time();
        # 跳过SSL证书问题
        ssl._create_default_https_context = ssl._create_unverified_context;
        # 填充header,自行修改填充
        User_Agent = 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:43.0) Gecko/20100101 Firefox/43.0';
        header = {};
        header['User-Agent'] = User_Agent;
        # 注册代理
        proxy={"http": self.proxyip};
        proxy_support = urllib2.ProxyHandler(proxy);
        opener = urllib2.build_opener(proxy_support);
        urllib2.install_opener(opener);
        # 使用代理IP请求目标网址
        timeout = 15;
        req = urllib2.Request(targetUrl, headers=header);
        response = urllib2.urlopen(req, None, timeout);
        html = response.read();
        # 转码, 注意你网站的编码,把gb2312调整为正确的编码
        html = html.decode("utf-8").encode("gbk")
        # 结束计时
        end = time.time();
        # 输出内容
        print(threading.current_thread().getName() +  "使用代理IP, 耗时 " + str(end - start) + "毫秒 " + self.proxyip + " 获取到如下HTML内容:\n" + html + "\n*************")

# 获取代理IP的线程类
class GetIpThread(threading.Thread):
    def __init__(self,fetchSecond):
        super(GetIpThread, self).__init__();
        self.fetchSecond=fetchSecond;
    def run(self):
        global ips;
        while True:
            # 获取IP列表
            res = urllib.urlopen(apiUrl).read().strip("\n");
            # 按照\n分割获取到的IP
            ips = res.split("\n");
            # 利用每一个IP
            for proxyip in ips:
                if proxyip.strip():
                    # 开启一个线程
                    CrawlThread(proxyip).start();
            # 休眠
            time.sleep(self.fetchSecond);

if __name__ == '__main__':
    # 获取IP的API接口
    apiUrl = "http://xxxx"
    # 要抓取的目标网站地址
    targetUrl = "http://ip.chinaz.com/getip.aspx";
    # 获取IP时间间隔,建议为5秒
    fetchSecond = 5;
    # 开始自动获取IP
    GetIpThread(fetchSecond).start();

示例3:Python 3.X 使用S5代理·Socks5代理IP

# -*- coding: UTF-8 -*-
'''
Python 3.x
描述:本DEMO演示了使用爬虫(动态)代理IP请求网页的过程,代码使用了多线程
逻辑:每隔5秒从API接口获取IP,对于每一个IP开启一个线程去抓取网页源码
注意:需先安装socks模块 pip3 install 'requests[socks]'
'''
import requests;
import time;
import threading;
from requests.packages import urllib3;

ips = [];

# 爬数据的线程类
class CrawlThread(threading.Thread):
    def __init__(self,proxyip):
        super(CrawlThread, self).__init__();
        self.proxyip=proxyip;
    def run(self):
        # 开始计时
        start = time.time();
        #消除关闭证书验证的警告
        urllib3.disable_warnings();
       #使用代理IP请求网址,注意第三个参数verify=False意思是跳过SSL验证(可以防止报SSL错误),用户名和密码需要换橙正确的哦~
        html=requests.get(url=targetUrl, proxies={"http" : 'socks5://user:password@' + self.proxyip, "https" : 'socks5://user:password@' + self.proxyip}, verify=False, timeout=15).content.decode()
        # 结束计时
        end = time.time();
        # 输出内容
        print(threading.current_thread().getName() +  "使用代理IP, 耗时 " + str(end - start) + "毫秒 " + self.proxyip + " 获取到如下HTML内容:\n" + html + "\n*************")

# 获取代理IP的线程类
class GetIpThread(threading.Thread):
    def __init__(self,fetchSecond):
        super(GetIpThread, self).__init__();
        self.fetchSecond=fetchSecond;
    def run(self):
        global ips;
        while True:
            # 获取IP列表
            res = requests.get(apiUrl).content.decode()
            # 按照\n分割获取到的IP
            ips = res.split('\n');
            # 利用每一个IP
            for proxyip in ips:
                # 开启一个线程
                CrawlThread(proxyip).start();
            # 休眠
            time.sleep(self.fetchSecond);

if __name__ == '__main__':
    # 获取IP的API接口
    apiUrl = "http:xxxx"
    # 要抓取的目标网站地址
    targetUrl = "http://ip.chinaz.com/getip.aspx";
    # 获取IP时间间隔,建议为5秒
    fetchSecond = 5;
    # 开始自动获取IP
    GetIpThread(fetchSecond).start();

示例4: Scrpay 中使用HTTP代理

'''增加一个代理中间件'''
# 修改settings文件
DOWNLOADER_MIDDLEWARES = {  
     'scrapy.contrib.downloadermiddleware.httpproxy.HttpProxyMiddleware':None,  
     'myproxies.middlewares.ProxyMiddleWare':125,  
     'scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware':None  
}  

'''修改中间件middlewares.py'''

import random  
import scrapy  
from scrapy import log  
  
  
# logger = logging.getLogger()  
  
class ProxyMiddleWare(object):  
    """docstring for ProxyMiddleWare"""  
    def process_request(self,request, spider):  
        '''对request对象加上proxy'''  
        proxy = self.get_random_proxy()  
        print("this is request ip:"+proxy)  
        request.meta['proxy'] = proxy   
  
  
    def process_response(self, request, response, spider):  
        '''对返回的response处理'''  
        # 如果返回的response状态不是200,重新生成当前request对象  
        if response.status != 200:  
            proxy = self.get_random_proxy()  
            print("this is response ip:"+proxy)  
            # 对当前request加上代理  
            request.meta['proxy'] = proxy   
            return request  
        return response  
  
    def get_random_proxy(self):  
     	# 获取IP的API接口
    	apiUrl = "http:xxxx"
        proxy = requests.get(apiUrl)
        return proxy  

猜你喜欢

转载自blog.csdn.net/qq_36936510/article/details/104662537