[Python crawler] Use proxy to crawl sister images

Article Directory

proxy

Steps for usage

1. The parameter is a dictionary {'type':'Proxy ip: port number'}
proxy_support = urllib.request.ProxyHandler({})
2. Customize and create an opener
opener = urllib.request.built_opener(proxy_support)
3a. Install opener
urllib.request.install_opener(opener)
3b. Call opener
opener.open(url)

Usage example

import urllib.request as req
import random

url = 'http://www.whatismyip.com.tw'
iplist = ['114.101.253.237:9999', '183.166.71.32:9999', '175.43.57.45:9999']
proxy_support = req.ProxyHandler({
    
    'http': random.choice(iplist)})
opener = req.build_opener(proxy_support)
opener.addheaders = [('User-Agent',
                      'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36')]
req.install_opener(opener)
response = req.urlopen(url)
html = response.read().decode('utf-8')
print(html)

Crawl girl picture

import os
import time
import requests
import re
headers = {
    
    
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
}
# 请求网页
# print("请输入你要爬取网站的链接")
# httpurl = input()
httpurl = 'https://www.vmgirls.com/15159.html'
while True:
    try:
        response = requests.get(httpurl,headers=headers)
        # print(response.request.headers)
        # print(response.text)
        html = response.text
        # 解析网页
        # 在链接前加view-source:可查看网页源代码
        dir_name = re.findall('<h1 class="post-title h1">(.*?)</h1>',html)[-1]
        if not os.path.exists(dir_name):
            os.mkdir(dir_name)
        # 正则查找
        urls = re.findall('<a href="(.*?)" alt=".*?" title=".*?">',html)
        print(urls)
        # 保存图片
        for url in urls:
            time.sleep(1)
            # 图片名字
            name = url.split('/')[-1]
            response = requests.get("https:"+url,headers = headers)
            print(name+'正在下载...')
            with open(dir_name+'/'+name,'wb') as f:
                f.write(response.content)
        break
    except requests.exceptions.RequestException as e:
        print(e)
        continue
print('下载完毕')

Guess you like

Origin blog.csdn.net/qq_36477513/article/details/112309838