(2) Theft of web crawlers is also right
(1) Limitation of web crawlers
-
Source review: Check the User-Agent domain of the HTTP protocol header
-
Announcement: Robots Agreement-robots.txt file in the root directory of the website
(3) Requests library web crawling actual combat
(1) Crawling of Jingdong product page
import requests
def getHTMLText(url):
try:
Headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
}
r = requests.get(url,headers=Headers,timeout=30)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text[:1000]
except:
return "产生异常"
if __name__ == "__main__":
url = "https://item.jd.com/100004323294.html"
print(getHTMLText(url))
(2) Crawling of Amazon product page
import requests
def getHTMLText(url):
try:
Headers ={
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
'Cookies': 'session-id=460-2669091-3751610; i18n-prefs=CNY; ubid-acbcn=460-5518508-6686437; x-wl-uid=1ZNig4QusLPyi/2fMRt2+/B9UI77CSP/FURf3oOMMEU5Qkn5d5DWvk3ZFsfCwdw4gQUS8PQ6cQls=; session-token="Tsypc2KcjVgqDHIbYljZ3S6e6UpT8lE0Ep5iBlUEUMOR1c6UOTsT46LMslGbryJIDtKQi9eEPX3DDHl4GrcE39k7YvQKBZkkcJ7Iyz6WJo69+IsEl5RCj4I5lStPd8Aysjq91yFBZT7jviCBycWPKVz+Df2gI+6L5haArRakytUPoYW0t4wASl/nz4LpD8dYh9xlXuBViLQf7en5aVPOhxSU9h7IuM1MDO7wQLYrFiFrICD/rphjTw=="; session-id-time=2082729601l; csm-hit=tb:G7W4MG6KYD3V0Z4C2BPR+s-0AWC79VGM2DFRQZSHM5N|1586669357960&t:1586669357960&adb:adblk_no'
}
r = requests.get(url,headers=Headers,timeout=30)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
return "产生异常"
if __name__ == "__main__":
url = "https://www.amazon.cn/dp/B07FQKB4TM"
print(getHTMLText(url))
(3) Baidu / 360 search keyword submission
- Baidu keyword interface: http://www.baidu.com/s?wd=keyword
import requests
def getHTMLSearch(url, keyword):
try:
Headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
}
kv = {'wd': keyword}
r = requests.get(url, headers=Headers, params=kv, timeout=30)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
return "产生异常"
if __name__ == "__main__":
url = "http://www.baidu.com/s"
print(len(getHTMLSearch(url, keyword='Python')))
- 360 keyword interface: http://www.so.com/s?q=keyword
import requests
def getHTMLSearch(url, keyword):
try:
Headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
}
kv = {'q': keyword}
r = requests.get(url, headers=Headers, params=kv, timeout=30)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
return "产生异常"
if __name__ == "__main__":
url = "http://www.so.com/s"
print(len(getHTMLSearch(url, keyword='Python')))
(4) Crawling and storage of network pictures
import requests
import os
url = "http://image.nationalgeographic.com.cn/2017/0211/20170211061910157.jpg"
root = "D://pics//"
path = root + url.split('/')[-1]
try:
if not os.path.exists(root):
os.mkdir(root)
if not os.path.exists(path):
r = requests.get(url)
with open(path,'wb') as f:
f.write(r.content)
f.close()
print("pic save success")
else:
print("pic already exist")
except:
print("spider fail")
(5) Automatic inquiry of where the IP address belongs
https://www.ip138.com/iplookup.asp?ip={ipaddress}&action=2
import requests
def getHTMLText(url):
try:
Headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
}
r = requests.get(url,headers=Headers,timeout=30)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
return "产生异常"
if __name__ == "__main__":
ip = '202.204.80.112'
url = "https://www.ip138.com/iplookup.asp?ip={}&action=2".format(ip)
print(getHTMLText(url))