Python 爬虫,requests模块,代理

安装requests模块:pip3 install requests

demo.py(requests模块):

import requests   # 导入。 需要pip安装

response = requests.get("http://www.baidu.com")  # 发送get请求。 返回response对象
# response = requests.post("http://www.baidu.com")
# response = requests.put("http://www.baidu.com")
# response = requests.delete("http://www.baidu.com")

print(response)  # <Response [200]>

# print(response.text)   # <html>... (可能会乱码)
print(response.encoding)   # 查看编码方式。 ISO-8859-1
response.encoding = "utf-8"   # 设置编码方式。
# print(response.text)   # 通过指定的编码方式解码响应数据

print(response.content)   # 获取响应内容。 b'<html>...'  bytes类型
print(response.content.decode('utf-8'))   # decode() 通过指定编码方式将bytes类型转换成字符串类型。 (默认utf-8)

# 查看url
print(response.url)   # 响应的url。 http://www.baidu.com/
print(response.request.url)  # 请求的url。 http://www.baidu.com/  (如果有重定向,响应的url与请求的url不一样)

# 查看请求头、响应头
print(response.status_code)  # 200  状态码
print(response.headers)   # 响应头 (字典)
print(response.request.headers)  # 请求头 (字典)

# 模拟浏览器的请求头
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36"}
response = requests.get("http://www.baidu.com", headers=headers)   # 通过指定的请求头发送请求。 headers必须通过变量指定,不能直接传{}字典

# 发送带参数的请求
params = {"wd": "哈哈"}
response = requests.get("https://www.baidu.com/s?", params=params)   # url最后的?可有可无。如果没有会自动加上?
response = requests.get("https://www.baidu.com/s?wd=哈哈")
response = requests.post("https://www.baidu.com/s", data=params)   # post请求传参用data

demo.py(使用代理ip):

# coding=utf-8
import requests


proxies = {"http":"http://163.177.151.23:80", "https": "https://12.34.56.79:9527"}  # 代理ip

headers = {
    "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36"}

r = requests.get("http://www.baidu.com", proxies=proxies, headers=headers)  # proxies 使用代理ip发送请求
print(r.status_code)

demo.py(获取响应中的cookie信息,并转换成字典类型):

# coding=utf-8
import requests


headers = {
    "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36"}

response = requests.get("http://www.baidu.com", headers=headers)

# 从响应中获取cookie信息 (RequestsCookieJar对象)
print(response.cookies)  # <RequestsCookieJar[<Cookie delPer=0 for .baidu.com/>, <Cookie BD_HOME=0 for www.baidu.com/>]>

# 将响应中的cookie信息转换成字典
cookies_dict = requests.utils.dict_from_cookiejar(response.cookies)
print(cookies_dict)   # {'delPer': '0', 'BD_HOME': '0'}

# 将字典转换成RequestsCookieJar对象
cookie_jar = requests.utils.cookiejar_from_dict( {'delPer': '0', 'BD_HOME': '0'} )
print(cookie_jar)   # <RequestsCookieJar[<Cookie BD_HOME=0 for />, <Cookie delPer=0 for />]>

demo.py(url编解码,SSL验证,超时时间):

# coding=utf-8
import requests


# url编码
url = requests.utils.quote("http://www.baidu.com/s?kw=中文")
print(url)  # http%3A//www.baidu.com/s%3Fkw%3D%E4%B8%AD%E6%96%87

# url解码
url = requests.utils.unquote("http%3A//www.baidu.com/s%3Fkw%3D%E4%B8%AD%E6%96%87")
print(url)  # http://www.baidu.com/s?kw=中文


# 取消SSL证书验证
response = requests.get("https://www.12306.cn/mormhweb/", verify=False)  # verify表示SSL证书验证。 默认True


# timeout设置超时时间。 如果超时会报错
response = requests.get(url, timeout=10)

猜你喜欢

转载自blog.csdn.net/houyanhua1/article/details/86376473