【代码整理】python--爬取免费代理IP-构建代理IP池

暂时只爬取了所有页面的IP,没有入库,待完善。

# -*-coding:utf-8 -*-
import requests
import parsel
import time

def check_ip(proxies_list):
    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36'}
    can_use = []
    for proxy in proxies_list:
        try:
            response = requests.get('https://www.baidu.com',headers=headers,proxies=proxy,timeout=0.1)
            if response.status_code == 200:
                can_use.append(proxy)
        except Exception as e:
            print (e)
        finally:
            print ('IP:',proxy,'High Quality!')
    return can_use

def count_total_page():
    root_url = 'https://www.kuaidaili.com/free/'
    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36'}
    response = requests.get(root_url,headers=headers)
    data = response.text
    html_data = parsel.Selector(data)
    parse_list = html_data.xpath('//*[@id="listnav"]/ul/li[9]/a/text()').extract_first()
    total_page = int(parse_list)
    return total_page

def get_proxy_ip(page,proxies_list):
    print ('==========Now Page {}==========').format(page)
    base_url = 'http://kuaidaili.com/free/inha/{}/'.format(str(page))
    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36'}
    response = requests.get(base_url,headers=headers)
    data = response.text
    html_data = parsel.Selector(data)
    parse_list = html_data.xpath('//table[@class="table table-bordered table-striped"]/tbody/tr')
    for tr in parse_list:
        dict_proxies = {}
        http_type = str(tr.xpath('./td[4]/text()').extract_first())
        ip_num = str(tr.xpath('./td[1]/text()').extract_first())
        ip_port = str(tr.xpath('./td[2]/text()').extract_first())
        #print (http_type,ip_num,ip_port)
        dict_proxies[http_type] = ip_num + ':' + ip_port
        proxies_list.append(dict_proxies)
        time.sleep(0.5)
    return proxies_list

def main():
    proxies_list = []
    total_page = count_total_page()
    for page in range(1,total_page):
        value_list = get_proxy_ip(page,proxies_list)
    can_use = check_ip(value_list)
    #print (can_use)
    print ('Total High Quality Proxy IP: ' + str(len(can_use)))

if __name__ == "__main__":
    main()

猜你喜欢

转载自blog.csdn.net/m0_46622606/article/details/105376849