python爬虫-requests模块

requests.get( url )
获取一个网站的源代码

r = requests.get(url)
响应的内容

r.encoding 获取当前的编码
r.encoding = ‘utf-8’ 设置编码
r.text 以encoding解析返回内容。字符串方式的响应体,会自动根据响应头部的字符编码进行解码。
r.content 以字节形式(二进制)返回。字节方式的响应体,会自动为你解码 gzip 和 deflate 压缩。
r.headers 以字典对象存储服务器响应头,但是这个字典比较特殊,字典键不区分大小写,若键不存在则返回None
r.status_code 响应状态码
r.raw 返回原始响应体,也就是 urllib 的 response 对象,使用 r.raw.read()
r.ok 查看r.ok的布尔值便可以知道是否登陆成功

#特殊方法#

r.json() Requests中内置的JSON解码器,以json形式返回,前提返回的内容确保是json格式的,不然解析出错会抛异常
r.raise_for_status() 失败请求(非200响应)抛出异常

爬取百度页面

import requests
from urllib.error import  HTTPError

def get_content(url):
    try:
        # 获取网站的源代码
        response = requests.get(url)
 		
 		# 如果状态码不是200, 引发HttpError异常
        response.raise_for_status()
       
        # 从内容分析出响应内容的编码格式
        response.encoding = response.apparent_encoding
    except HTTPError as e:
        print(e)
    else:
        print(response.status_code)
        # print(response.headers)
        return  response.text

if __name__ == '__main__':
    url = 'http://www.baidu.com'
    get_content(url)

京东商品页面信息的爬取

import requests
from urllib.error import  HTTPError

def get_content(url):
    try:
        response = requests.get(url)

		# 如果状态码不是200, 引发HttpError异常
        response.raise_for_status()  
        
        # 从内容分析出响应内容的编码格式
        response.encoding = response.apparent_encoding
    except HTTPError as e:
        print(e)
    else:
        print(response.status_code)
        # print(response.headers)
        # return  response.text   # 返回的是字符串
        return  response.content  # 返回的是bytes类型, 不进行解码

if __name__ == '__main__':
    url = 'https://item.jd.com/6789689.html'
    content = get_content(url)
    with open('doc/jingdong.html', 'wb') as f:
        f.write(content)

亚马逊商品页面信息的爬取

如何去模拟浏览器访问?

import random
import requests
from urllib.error import HTTPError

def get_content(url):
    try:
        user_agents = [
            "Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0",
            "Mozilla/5.0 (Linux; Android 4.1.1; Nexus 7 Build/JRO03D) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.166 Safari/535.19",
            "Mozilla/5.0 (Windows NT 6.2; WOW64; rv:21.0) Gecko/20100101 Firefox/21.0",
        ]
        response = requests.get(url, headers={'User-Agent': random.choice(user_agents)})
        response.raise_for_status()  # 如果状态码不是200, 引发HttpError异常
        # 从内容分析出响应内容的编码格式
        response.encoding = response.apparent_encoding
    except HTTPError as e:
        print(e)
    else:
        print(response.status_code)
        # print(response.headers)
        # return  response.text
        return response.content

if __name__ == '__main__':
    # url = 'https://www.amazon.cn/dp/B01ION3VWI'
    url = 'http://www.cbrc.gov.cn/chinese/jrjg/index.html'
    content = get_content(url)
    with open('doc/bank.html', 'wb') as f:
        f.write(content)

requests提交数据到服务器

import requests

# Http常见的请求方法:
#       GET:
#       post:

# 1.
response = requests.get('http://www.baidu.com')
print(response.text)

# 2.
# # http://httpbin.org/post
response = requests.post('http://httpbin.org/post',
              data={'name':'fentiao', 'age':10})
print(response.text)

# 3.
response = requests.delete('http://httpbin.org/delete', data={'name':'fentiao'})
print(response.text)

# 4. GET方法:带参数get请求
# url1 = 'https://movie.douban.com/subject/4864908/comments?start=20&limit=20&sort=new_score&status=P'
url = 'https://movie.douban.com/subject/4864908/comments'
data = {
    'start':20,
    'limit':40,
    'sort':'new_score',
    'status': 'P'
}
response = requests.get(url, params=data)
print(response.text)
print(response.url)

百度搜索的关键字提交

import requests

def keyword_post(url, data):
    try:
        user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.109 Safari/537.36"
        response = requests.get(url, params=data, headers={'User-Agent': user_agent})
        response.raise_for_status()   # 如果返回的状态码不是200, 则抛出异常;
        response.encoding = response.apparent_encoding  # 判断网页的编码格式, 便于respons.text知道如何解码;
    except Exception as e:
        print("爬取错误")
    else:

        print(response.url)
        print("爬取成功!")
        return  response.content




def baidu():
    url = "https://www.baidu.com"
    keyword = input("请输入搜索的关键字:")
    # wd是百度需要
    data = {
        'wd': keyword
    }
    keyword_post(url, data)


def search360():
    url = "https://www.so.com"
    keyword = input("请输入搜索的关键字:")
    # wd是百度需要
    data = {
        'q': keyword
    }
    content = keyword_post(url, data)

    with open('360.html', 'wb') as f:
        f.write(content)

if __name__ == '__main__':
    search360()

requests上传chainxunix登陆数据

给网站post提交登陆信息

import requests

#  1). 上传数据;
url = 'http://bbs.chinaunix.net/member.php?mod=logging&action=login&loginsubmit=yes&loginhash=La2A2'
# url = 'http://account.chinaunix.net/login'
# postData = {
#     'username': 'LVah',
#     'password': 'gf132590'
# }

postData = {
    'username': 'T424117304',
    'password': 'T1997970120'
}
response = requests.post(url, data=postData)

# 2). 将获取的页面写入文件, 用于检测是否爬取成功;
with open('doc/chinaunix.html', 'wb') as f:
    f.write(response.content)

# 3). 查看网站的cookie信息
print(response.cookies)
for key, value in response.cookies.items():
    print(key + '=' + value)

requests解析json格式数据

  • response.content: 返回的是bytes类型, 比如: 下载图片, 视频;
  • response.text: 返回的是str类型, 默认情况会将bytes类型转成str类型
import json
import requests

# 解析json格式
ip = input('IP:')
url = "http://ip.taobao.com/service/getIpInfo.php"
data = {
    'ip': ip
}
response = requests.get(url, params=data)
# 将响应的json数据编码为python可以识别的数据类型;
content = response.json()
print(content)
print(type(content))
country = content['data']['country']
print(country)

下载指定视频

import requests
def get_content(url):
    try:
        user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.109 Safari/537.36"
        response = requests.get(url,  headers={'User-Agent': user_agent})
        print('a')
        response.raise_for_status()   # 如果返回的状态码不是200, 则抛出异常;
        response.encoding = response.apparent_encoding  # 判断网页的编码格式, 便于respons.text知道如何解码;
    except Exception as e:
        print("爬取错误")
    else:
        # print(response.url)
        print("爬取成功!")
        return  response.content  # 下载视频需要的是bytes类型

if __name__ == '__main__':
    url = 'https://gss0.bdstatic.com/-4o3dSag_xI4khGkpoWK1HF6hhy/baike/w%3D268%3Bg%3D0/sign=4f7bf38ac3fc1e17fdbf8b3772ab913e/d4628535e5dde7119c3d076aabefce1b9c1661ba.jpg'
    # url = "http://gslb.miaopai.com/stream/sJvqGN6gdTP-sWKjALzuItr7mWMiva-zduKwuw__.mp4"
    movie_content = get_content(url)
    print("正在下载....")
    with open('doc/movie.jpg', 'wb') as f:
        f.write(movie_content)
        print("下载电影完成.....")

requests常见使用

import requests

# 1). 上传文件files: 指定的文件的内容
data = {
    'name':'fentiao'
}
files = {
    # 二进制文件需要指定rb
    'file': open('doc/movie.jpg', 'rb')
}
response = requests.post(url='http://httpbin.org/post', data = data, files=files)
print(response.text)

# 2). 设置代理
proxies = {
    'http':'116.209.58.116:9999',
    'https':'115.151.5.40:53128'
}
response = requests.get('http://httpbin.org/get', proxies=proxies, timeout=2)
print(response.text)

# 3). cookie信息的保存, 加载====== 客户端的缓存, 保持客户端和服务端连接会话seesion
seesionObj = requests.session()
# 专门用来设置cookie信息的,
response1 = seesionObj.get('http://httpbin.org/cookies/set/name/westos')
# 专门用来查看cookie信息的网址
response2 = seesionObj.get('http://httpbin.org/cookies')
print(response2.text)

# 专门用来设置cookie信息的,
response1 = requests.get('http://httpbin.org/cookies/set/name/westos')
# 专门用来查看cookie信息的网址
response2 = requests.get('http://httpbin.org/cookies')
print(response2.text)

猜你喜欢

转载自blog.csdn.net/qq_43273590/article/details/87794023