python爬虫redis-ip代理池搭建几十万的ip数据--可以使用

from bs4 import BeautifulSoup
import requests,os,sys,time,random,redis
from lxml import etree
conn = redis.Redis(host='127.0.0.1',port=6379)
def get_ip(page_url,headers,cookies,sui_ji_time):
"""
爬取ip并组合ip使用的格式
:param page_url:
:param headers:
:param cookies:
:param sui_ji_time:
:return:
"""
print('{}--{}--{}--{}>>{}'.format('此程序睡眠时间',sui_ji_time,'正在爬取第',page_url,'的数据'))
response = requests.get(page_url,headers=headers,cookies=cookies).text
json_lxml = etree.HTML(response)
table = json_lxml.xpath('//*[@id="list"]/table/tbody/tr')
for i in table:
html_ip = i.xpath('.//td[1]/text()')[0]
html_ip_port = i.xpath('.//td[2]/text()')[0]
html_ip_lei = i.xpath('.//td[4]/text()')[0]
daili_ip = '{}{}:{}'.format('http://', html_ip, html_ip_port)
if html_ip_lei == 'HTTP':
ceshi_ip(headers,cookies,sui_ji_time,daili_ip,html_ip_lei)
else:
ceshi_ip(headers,cookies,sui_ji_time,daili_ip,html_ip_lei)

def ceshi_ip(headers,cookies,sui_ji_time,daili_ip,html_ip_lei):
"""
测试爬取下来的ip是否可用
:param headers:
:param cookies:
:param sui_ji_time:
:param daili_ip:
:param html_ip_lei:
:return:
"""
print(daili_ip,'@@@@@@@@@@@@')
# list1 = []
try:
requests.get('http://wenshu.court.gov.cn/', proxies={str(html_ip_lei): daili_ip})
except:
print('{}>>{}'.format(daili_ip,'不可用'))
else:
print('{}>>{}'.format(daili_ip,'可用'))
"""
存储redis数据库
"""
try:
conn.sadd('proxy','{}+{}'.format(html_ip_lei,daili_ip))
print('{}'.format('存储redis成功'))
except:
print('{}'.format('存储redis失败'))
root_dir = '{}'.format('D:\\web_xiangmu\\biquge_tushu\\代理')
# list1.append({str(html_ip_lei): str(daili_ip)})
if not os.path.exists(root_dir):
os.mkdir(root_dir)
print('{}'.format('创建成功'))
print('{}'.format('文件存在'))
"""
存储文件以防丢失
"""
try:
with open(root_dir+'\\'+'daili.text', "a+") as mon:
mon.write('{}+{}\n'.format(html_ip_lei,daili_ip))
print('{}>>>{}'.format(daili_ip,'写入成功'))
except:
print('{}'.format('写入失败'))




def main():
"""
爬取ip代理网站的全部ip
并组合分页
:return:
"""
url = 'https://www.kuaidaili.com/free/inha/'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36',
'Referer': 'https://www.kuaidaili.com/free/inha/',
}
cookies = {
'Cookie': 'channelid=0; sid=1575640807483263; _ga=GA1.2.757045199.1575642271; _gid=GA1.2.1903168241.1575642271; _gat=1; Hm_lvt_7ed65b1cc4b810e9fd37959c9bb51b31=1575642272,1575686420; Hm_lpvt_7ed65b1cc4b810e9fd37959c9bb51b31=1575686420',
}
response = requests.get(url,headers=headers,cookies=cookies).text
json_lxml = etree.HTML(response)
ip_page = json_lxml.xpath('//ul/li[9]/a/text()')[0]
ip_page_href = json_lxml.xpath('//ul/li[9]/a/@href')[0]
sui_ji_time = random.choice(list_time_sleep)
for page in range(1,int(ip_page)+1):
page_url = '{}/{}/{}/{}'.format('https://www.kuaidaili.com',''.join(ip_page_href).split('/')[1],''.join(ip_page_href).split('/')[2],page)
time.sleep(sui_ji_time)
get_ip(page_url,headers,cookies,sui_ji_time)

if __name__ == '__main__':
list_time_sleep = [5,10,15]
zhu_sui_ji_time = random.choice(list_time_sleep)
print('{}<<{}>>{}'.format('主程序随机睡眠时间',zhu_sui_ji_time,'秒'))
time.sleep(zhu_sui_ji_time)
main()


"""
#使用代理操作
# 导入模块
import redis
import requests
# 连接到Redis服务器
conn = redis.Redis(host='127.0.0.1', port=6379)
# 随机取出一条代理数据
ip=conn.redis.srandmember('proxy')
ip_add= ''.join(ip).split('+')
zhen_ip = ip[1]
print(zhen_ip)
url='https://www.baidu.com'
proxies = {
"http": "http://" + zhen_ip.decode("utf-8")
}

# 使用IP代理访问百度,测试代理地址是否有效
try:
data = requests.get(url=url, proxies=proxies, timeout=5)
except:
# 代理地址无效
删除无效的IP代理
验证IP代理是否无效,如果代理地址无效,可以使用以下命令删除代理,这样可以保证我们代理池中的地址都是有效的

conn.redis.srem('proxy', '无效的IP代理地址')

"""

猜你喜欢

转载自www.cnblogs.com/duanlinxiao/p/12001618.html