[Python] 2.爬虫-网络请求

版权声明:Hello https://blog.csdn.net/Edward_is_1ncredible/article/details/82814109
# I.urllib库:最基本的网络请求库,可以模拟浏览器的行为,向指定的服务器发送一个请求,并且可以保存返回的数据,常用函数如下:
# 1)urlopen:抓取内容
from urllib import request
response = request.urlopen("http://www.baidu.com")
print(response.read())
# 在函数上ctrl+b跳转到申明查看函数参数的设置

# 2)urlretrieve:将网页上的文件下载到本地()
from urllib import request
# 分别对应URL和保存的文件名
request.urlretrieve("http://www.baidu.com","baidu.html")

# 3)urlencode,quto_plus:编码
# 区别:urlencode需要用字典,通常为a=b的形式,单个字符就用quote就行了
from urllib import parse
url1 = "https://baike.baidu.com/item/"
url2 = "/129156?fr=aladdin"
# urlencode形式
# params = {"item":"周杰伦"}
# qs = parse.urlencode("周杰伦")
params = ("周杰伦")
qs = parse.quote_plus(params)  # 编码文字
url = url1 + qs + url2  # 拼接修改好的URL
response = request.urlopen(url)
print(response.read())

# 4)parse_qs:解码
from urllib import parse
params = {"item":"周杰伦"}
# 如果是单个字符串的话先urlparse后再parse_qs解码
qs = parse.urlencode(params)
parse = parse.parse_qs(qs)
print(parse)

# 5)urlparse,urlsplit:对URL的各个组成部分进行分割,
# urlsplit相较于urlparse没有params属性(用处很小)
from urllib import parse
url = "https://www.baidu.com/s?ie=utf-8&wd=%E4%B8%BA%E4%BB%80%E4%B9%88%E8%A6%81%E5%AD%A6python"
result = parse.urlsplit(url)
print(result)

# 6)request.Request类:添加请求头,此处以拉勾网的信息为例,示例代码如下:
from urllib import request
from urllib import parse
url = "https://www.lagou.com/jobs/list_python?labelWords=&fromSearch=true&suginput="
# 1.不使用的请求头直接用urlopen
response1 = request.urlopen(url)
print(response1.read())  # 只获得了少许的内容,拉勾网识别出了爬虫
# 2.添加请求头(请求头位置:Request Headers->User-Agent)
header ={"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36"}
re = request.Request(url,headers=header)
response2 = request.urlopen(re)
print(response2.read())
# 获取了网页的全部信息,但结果貌似仍然不是想要的,同样的在网页上右键查看网页源代码也并不能找到相应的招聘信息.
# 原因:职位信息在另一个URL中(positionAjax.jason?needAdditionalResult=false),并通过json嵌入其中,并且发现其通过POST请求
# 3.Request(data,method的使用)
url = "https://www.lagou.com/jobs/positionAjax.json?needAddtionalResult=false"
header ={"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36",
         "Referer": "https://www.lagou.com/jobs/list_python?city=%E5%85%A8%E5%9B%BD&cl=false&fromSearch=true&labelWords=&suginput="
        }
# data用网页源码的内容即可,几次尝试后发现对应的fist:是否首页,pn:页码,kd:搜索关键字
data = {"first":"true","pn":1,"kd":"python"}
method = "POST"
re = request.Request(url,headers=header,data=parse.urlencode(data).encode("utf-8"),method=method)
response = request.urlopen(re)
print(response.read())
    # 几个注意点:
    # 1.不能直接使用data,会报错TypeError: can't concat str to bytes,需要编码
    # 2.编码后会再次报错TypeError: POST data should be bytes, an iterable of bytes, or a file object. It cannot be of type str.python3中字符串默认为unicode字符串,编码成bytes类型
    # 3.原有的请求头仍然无法得到想要的数据,需要再加Referer

# 7)ProxyHandle处理器(代理设置):
# 1.为什么要使用:很多网站会检测某一段时间中某个IP的访问次数,如果访问次数过于集中该IP会被禁止访问,所以可以尝试设置一些代理服务器
# 2.原理:在请求目的网站之前,先请求代理服务器,然后让代理服务器去请求网站,在代理服务器拿到目的网站之后,在转发给我们的代码
# 3.常用:西部免费代理IP,快代理,代理云
# 4.请求网站:http://httpbin.org
# 5.思路:
    # i.使用urllib.request.ProxyHandler传入一个代理handler
    # ii.使用创建的handler以及urllib.request.build_opener创建一个opener
    # ii.调用open函数发起请求
from urllib import request
url = "http://httpbin.org/ip"
# 1.未使用代理
response1 = request.urlopen(url)
print(response1.read())
# 2.使用代理
# 2-1.使用ProxyHanderler传入代理构建一个Handler,字典形式,{key为类型,values为ip:prot}
handler = request.ProxyHandler({"http":"39.135.11.97:80"})
# 2-2.使用上面创建的handler创建一个opener
opener = request.build_opener(handler)
# 2-3.使用opener去发送请求
response2 = opener.open(url)
print(response2.read())

# 8)使用cookielib库和HTTPCookieProcessor模拟登录
# 1.什么是cookie:Cookie是指网站服务器为了辨别用户身份和进行Session跟踪,而储存在用户浏览器上的文本文件
# 2.Cookie的格式:
    # Set-Cookie: NAME=VALUE;Expires/Max-age=DATE;Path=PATH;Domain=DOMAIN_NAME;SECURE (eg.H_PS_PSSID=26523_1430_21080_18560_20930; path=/; domain=.baidu.com)
    # NAME:cookie的名字
    # VALUE:cookie的值
    # Expires:cookie的过期时间
    # Path:cookie的作用路径
    # Domain:cookie作用的域名
    # SECURE:是否只在https协议下起作用
# 3.利用cookie模拟登陆
# login_url = "http://http://www.renren.com/PLogin.do"
# dapeng_url = "http://www.renren.com/880151247/profile"
    # 3-1.手动获取cookie
from urllib import request
dapeng_url = "http://www.renren.com/880151247/profile"
headers={"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36",
         "Cookie":"anonymid=jmfo3rtd-qqw614; depovince=GW; _r01_=1; [email protected]; ln_hurl=http://hdn.xnimg.cn/photos/hdn421/20130724/1005/h_main_4Uln_61b40000018e111a.jpg; __utma=151146938.1018799493.1537755899.1537755899.1537755899.1; __utmz=151146938.1537755899.1.1.utmcsr=renren.com|utmccn=(referral)|utmcmd=referral|utmcct=/361722792/profile; wp_fold=0; jebecookies=5b198591-fc1c-40d2-98d7-a20fb74dbab1|||||; ick_login=ec09b039-4fa0-4d3b-80dc-919c1856500f; _de=B4E32D1A836EA635E2A69D838E8B6F33696BF75400CE19CC; p=b6ed7fa041c525ce392291f7fdcf32572; first_login_flag=1; t=935fd259a10790130472041798ba20372; societyguester=935fd259a10790130472041798ba20372; id=361722792; xnsid=f63825e0; ver=7.0; loginfrom=null; JSESSIONID=abcb6zA3mitavdu89Xjyw; XNESSESSIONID=f1898062548e; WebOnLineNotice_361722792=1"}
re = request.Request(url=dapeng_url,headers=headers)
response = request.urlopen(re)
with open ("test.html","w",encoding="utf-8") as f:
    f.write(response.read().decode("utf-8"))
    # 注意点:
        # i.write函数必须写入一个str的数据类型
        # ii.response.read()读出来的是一个bytes数据类型
        # iii.bytes -> encode -> str, bytes <- decode <- str
    # 3-2.利用HTTPCookieProcessor自动化获取cookie
from urllib import request
from urllib import parse
from http.cookiejar import CookieJar
headers= {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36"}
# 创建一个可以获取cookie的opener
def get_opener():
    # 创建一个cookieJar对象,主要作用是提供可存储cookie的对象
    cookiejar = CookieJar()
    # 使用cookJar创建一个HTTPCookieProcess对象
    handler = request.HTTPCookieProcessor(cookiejar=cookiejar)
    # 使用上一步创建的headler创建一个opener
    opener = request.build_opener(handler)
    return opener
# 登陆获取对应的cookie
def login(opener):
    # 使用opener发送登录的请求(人人网的邮箱和密码)
    data = {"email":"[email protected]","password":"xxxxxx"}
    data = parse.urlencode(data).encode('utf-8')
    login_url = "http://www.renren.com/PLogin.do"
    re = request.Request(url=login_url,data=data,headers=headers)
    opener.open(re)  # 这步opener获取了cookie
# 利用获取的cookie访问对应的网址
def visit(opener):
    dapeng_url = "http://www.renren.com/880151247/profile"
    # 注意点:获取个人主页面的时候,不需要新建一个新的opener,而应该是用之前的,因为之前的opener已经包含了所需的cookie
    re = request.Request(url=dapeng_url,headers=headers)
    response = opener.open(re)
    with open("test2.html","w",encoding="utf-8") as f:
        f.write(response.read().decode("utf-8"))
if __name__ == '__main__':
    opener = get_opener()
    login(opener)
    visit(opener)

# 9)cookie信息的保存
# 思路:可以使用cookiejar的save方法保存到本地,但是需要指定一个文件名
from urllib import request
from http.cookiejar import MozillaCookieJar
headers = {"User-Agent":"Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Mobile Safari/537.36"}
# 1.先自己设定一个测试cookie:http://httpbin.org/cookies/set?test=sadas123asd645as4d6
# 2.创建一个opener
cookiejar = MozillaCookieJar("cookie.txt") # 用MozillaCookieJar指定一个cookiejar,并设定一个存放cookie的路径
handler = request.HTTPCookieProcessor(cookiejar) # 使用HTTPCookieProcessor创建一个handler
opener = request.build_opener(handler) # 使用上面创建的handler创建一个opener
# 3.通过opener获取cookie
re = request.Request(url="http://httpbin.org/cookies/set?test=sadas123asd645as4d6",headers=headers)
response = opener.open(re)
# 4.将获取的cookie保存在本地(上述指定的位置中)
cookiejar.save(ignore_discard=True) # 此处cookie浏览会话结束时过期,需要设定ignore_discard保留这些即将被删除的cookie


# II.requests库: BUILT FOR HUMAN BEINGS!
# 说明文档:http://docs.python-requests.org/zh_CN/latest/index.html
# 1)发送get请求:直接调用requests.get,用什么请求调什么方法
import requests
url = "http://www.baidu.com/"
response = requests.get(url=url)
# 1.response的一些属性(post及其他请求方法也是同样的)
    # 1-1.用params接收一个字典或者字符串的查询参数,字典类型自动装化为url编码(bytes),不需要urlencode()进行编码(get -> params,post -> data)
kw = {"wd":"中国"}
headers = {"User-Agent":"Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Mobile Safari/537.36"}
response = requests.get(url="http://www.baidu.com/s",params=kw,headers=headers)
with open("test.html","w",encoding="utf-8") as f:
    f.write(response.content.decode("utf-8"))
    # 1-2.查看响应内容:
# .text和.content两者的差别在于,前者返回unicode格式(它自动选择解码方式,所以可能会有一些乱码),后者返回的为bytes格式,需要我们自己解码
# 注:在网络和硬盘内的编码方式均为bytes
print(response.text)
print(response.content.decode("utf-8"))
    # 1-3.查看完整的url地址
print(response.url)
    # 1-4.查看响应头字符编码
print(response.encoding)
    # 1-5查看响应码
print(response.status_code)

# 2)发送post请求,以拉勾网为例
import requests
url = "https://www.lagou.com/jobs/positionAjax.json?needAddtionalResult=false"
headers ={"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36",
         "Referer": "https://www.lagou.com/jobs/list_python?city=%E5%85%A8%E5%9B%BD&cl=false&fromSearch=true&labelWords=&suginput="
        }
data = {"first":"true","pn":1,"kd":"python"}
response = requests.post(url=url,headers=headers,data=data)
print(response.text)
# 此处的json数据的请求可以这样,转换成一个字典的形式
# print(reponse.json())

# 3)requests使用代理:
# 思路:在get,post或其他请求方法中传递proxies参数即可
import requests
url = "http://httpbin.org/ip"
# 1.未使用代理
response1 = requests.get(url=url)
print(response1.text)
# 2.使用代理,字典形式,{key为类型,values为ip:prot}
proxy = {"http":"39.135.11.97:80"}
response2 = requests.get(url=url,proxies=proxy)
print(response2.text)

# 4)requests的cookie相关:
import requests
url = "http://www.baidu.com/"
# 1.得到cookie
response = requests.get(url=url)
print(response.cookies.get_dict()) # .get_dict指定以字典的方式返回
# 2.使用cookie(session),以人人网为例
# session:一个绘画对象,不同于web开发中的session,这里等同于使用urllib库opener共享cookie的操作
url = "http://http://www.renren.com/PLogin.do"
data = {"email":"[email protected]","password":"xxxxxx"}
headers= {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36"}
    # 2-1.登录
session = requests.Session()
session.post(url=login_url,headers=headers,data=data)  # 获取cookie
    # 2-2.访问
response = session.get(url=dapeng_url)
    # 2-3.储存到本地
# 或者直接打出来:print(response.text)
with open("renren.html","w",encoding="utf-8") as f:
    f.write(response.text)

# 5)处理不信任的SSL证书
# 对于已经被信任的SSL证书的网站(https://www.baidu.com)直接requests就可以了,有些不被信任的,要加个参数
response = requests.get("--a url--",verify=False)
print(response.content.decode("utf-8"))

猜你喜欢

转载自blog.csdn.net/Edward_is_1ncredible/article/details/82814109