一、设置随机请求头
浏览器请求头网站地址:http://www.useragentstring.com/pages/useragentstring.php?typ=Browser
1、添加中间件
class UserAgentDownloaderMiddleware(object):
USER_AGENT = [
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)",
"Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB5; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)",
"Mozilla/4.0 (compatible; Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729); Windows NT 5.1; Trident/4.0)",
"Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.0; Windows NT 6.0; Trident/5.0)",
"Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.1; AOLBuild 4334.5012; Windows NT 6.0; WOW64; Trident/5.0)",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20100121 Firefox/3.5.6 Wyzo/3.5.6.1",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 551; QQDownload 661; TencentTraveler 4.0; (R1 1.5))",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; InfoPath.1; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 1.1.4322; Sleipnir/2.9.2)",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; InfoPath.1; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 1.1.4322; Sleipnir/2.9.2)",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.24 (KHTML, like Gecko) RockMelt/0.9.58.494 Chrome/11.0.696.71 Safari/534.24",
]
def process_request(self, request, spider):
import random
user_agent = random.choice(self.USER_AGENT)
request.headers['User-Agent'] = user_agent
2、settings中启用中间件
#不遵循robot协议
ROBOTSTXT_OBEY = False
#请求延迟2s
DOWNLOAD_DELAY = 2
#请求头
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en',
}
#启用中间件
DOWNLOADER_MIDDLEWARES = {
'xxxx.middlewares.UserAgentDownloaderMiddleware': 543,
}
3、爬虫文件
# -*- coding: utf-8 -*-
import scrapy
import json
class UseragentSpider(scrapy.Spider):
name = 'useragent'
allowed_domains = ['httpbin.org']
start_urls = ['http://httpbin.org/user-agent']
def parse(self, response):
print('*'*30)
print(json.loads(response.text)['user-agent'])
print('*'*30)
yield scrapy.Request(url=self.start_urls[0],dont_filter=True) #dont_filter不去重
4、启动
#-*- coding:utf-8 -*-
from scrapy import cmdline
cmdline.execute("scrapy crawl useragent".split())
二、设置代理IP
1、添加中间件
class IPProxyDownloaderMiddleware(object):
IP_PROXY=['49.70.85.116:9999','117.69.201.223:9999','60.13.42.94:9999']
def process_request(self, request, spider):
import random
proxy = random.choice(self.IP_PROXY)
request.meta['proxy'] = proxy
2、settings中启用中间件
#不遵循robot协议
ROBOTSTXT_OBEY = False
#请求延迟2s
DOWNLOAD_DELAY = 2
#请求头
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en',
}
#启用中间件
DOWNLOADER_MIDDLEWARES = {
'xxxx.middlewares.IPProxyDownloaderMiddleware': 200,
}
3、爬虫文件
# -*- coding: utf-8 -*-
import scrapy
import json
class IpproxySpider(scrapy.Spider):
name = 'ipproxy'
allowed_domains = ['httpbin.org']
start_urls = ['http://httpbin.org/ip']
def parse(self, response):
print('*'*30)
print(json.loads(response.text)['origin'])
print('*'*30)
yield scrapy.Request(url=self.start_urls[0],dont_filter=True) #dont_filter不去重