python中的框架如何正确使用亿牛云爬虫代理

requests(session)访问HTTPS网站
#! -- encoding:utf-8 --
import requests
import random
import requests.adapters

# 要访问的目标页面
targetUrlList = [
    "https://httpbin.org/ip",
    "https://httpbin.org/headers",
    "https://httpbin.org/user-agent",
]

# 代理服务器
proxyHost = "t.16yun.cn"
proxyPort = "31111"

# 代理隧道验证信息
proxyUser = "username"
proxyPass = "password"

proxyMeta = "http://%(user)s:%(pass)s@%(host)s:%(port)s" % {
    "host": proxyHost,
    "port": proxyPort,
    "user": proxyUser,
    "pass": proxyPass,
}

# 设置 http和https访问都是用HTTP代理
proxies = {
    "http": proxyMeta,
    "https": proxyMeta,
}

# 访问三次网站,使用相同的Session(keep-alive),均能够保持相同的外网IP
s = requests.session()
for i in range(3):
    for url in targetUrlList:
        r = s.get(url, proxies=proxies)
        print r.text
                    想要了解更多点击此链接http://ip.16yun.cn:817/reg_accounts/register/?sale_user=Fyf_18398513693&agent_user=zsea

猜你喜欢

转载自blog.51cto.com/14201222/2351928