Python3 uses the Requests library to implement HTTP requests
1. Install the Requests library
Run Pip install requests , the interface is as follows:
c:\python>Pip install requests
2. Verify that the library is installed successfully
Enter import requests under the Python shell , if no error is reported, the installation is successful.
3. Implement a complete request and response Get
#coding:utf-8
# Implement a complete request and response model : GET
import requests
r =requests.get('http://www.baidu.com')
print(r.content)
4. Implement a complete request and response post
#coding:utf-8
# Implement a complete request and response model : POST
import requests
postdata={'key':'value'}
r =requests.post('http://www.baidu.com/login',data=postdata)
print(r.content)
5. GET request with parameters
#coding:utf-8
#GET request with parameters
import requests
payload = {'Keywords': 'blog:qiyeboy','pageindex':1}
r =requests.get('http://zzk.cnblogs.com/s/blogpost', params=payload)
print (r.url)
6. Response and encoding
#coding:utf-8
#Response and encoding
import requests
r =requests.get('http://www.baidu.com')
print('content-->',r.content)
print ('text-->',r.text)
print('encoding-->',r.encoding)
r.encoding='utf-8'
print ('new text-->',r.text)
7. chardet: automatic identification code
#coding:utf-8
# chardet: auto-identify encoding
import requests
import chardet
r =requests.get('http://www.baidu.com')
print(chardet.detect(r.content))
r.encoding = chardet.detect(r.content)['encoding']
print (r.text)
8. Request header headers processing
#coding:utf-8
#Request header headers processing
import requests
user_agent = 'Mozilla/4.0(compatible; MSIE 5.5; Windows NT)'
headers={'User-Agent':user_agent}
r =requests.get('http://www.baidu.com',headers=headers)
print (r.content)
9. Response code and response header processing
#coding:utf-8
#Response code code and response header headers processing
import requests
r =requests.get('http://www.baidu.com')
if r.status_code ==requests.codes.ok:
print (r.status_code) #Response code
print (r.headers)#response headers
print (r.headers.get('content-type'))# This method is recommended to get one of the fields
print (r.headers['content-type'])# This access method is not recommended
else:
r.raise_for_status()
10. Cookie handling : get cookies
#coding:utf-8
# Cookie handling : get cookies
import requests
user_agent = 'Mozilla/4.0(compatible; MSIE 5.5; Windows NT)'
headers={'User-Agent':user_agent}
r =requests.get('http://www.baidu.com',headers=headers)
# Traverse all the values of the cookie fields
for cookie in r.cookies.keys():
print(cookie+':'+r.cookies.get(cookie))
11. Cookie handling : setting cookies
#coding:utf-8
# Cookie handling : set cookie
import requests
user_agent = 'Mozilla/4.0(compatible; MSIE 5.5; Windows NT)'
headers={'User-Agent':user_agent}
cookies =dict(name='qiye',age='10')
r =requests.get('http://www.baidu.com',headers=headers,cookies=cookies)
print (r.text)
12. Cookie Handling : Automation
#coding:utf-8
# Cookie Handling : Automation
import requests
loginUrl ='http://www.baidu.com/login'
s = requests.Session()
#First visit the login interface, as a visitor, the server will first assign a cookie
r =s.get(loginUrl,allow_redirects=True)
datas={'name':'qiye','passwd':'qiye'}
#Send a post request to the login link , the verification is successful, and the visitor permission is converted to the member permission
r = s.post(loginUrl,data=datas,allow_redirects= True)
print (r.text)
13. Redirects and History
#coding:utf-8
#Redirect and history
import requests
r =requests.get('http://github.com')
print (r.url)
print (r.status_code)
print (r.history)
14. Timeout settings
requests.get('http://github.com', timeout=2) |
15. Proxy settings
#coding:utf-8
#Proxy settings
import requests
proxies = {
"http":"http://10.10.1.10:3128",
"https":"http://10.10.1.10:1080",
}
requests.get("http://example.org",proxies=proxies)