爬虫小例子

必应翻译,post请求

import requests

# 必应翻译
url = "https://cn.bing.com/tlookupv3?isVertical=1&&IG=AC43A2DD353A42D292C13DA2ED005444&IID=translator.5028.2"

formdata = {
    'from':'en',
    'to':'zh-Hans',
    'text':'dog'
}

headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'}

res = requests.post(url=url,headers=headers,data=formdata)

print(res.json())
print(res.text)

代理

#!/usr/bin/env python
# -*- coding:utf-8 -*-
import requests
import random
if __name__ == "__main__":
    #不同浏览器的UA
    header_list = [
        # 遨游
        {"user-agent": "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)"},
        # 火狐
        {"user-agent": "Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1"},
        # 谷歌
        {
            "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11"}
    ]
    #不同的代理IP
    proxy_list = [
        {"http": "112.115.57.20:3128"},
        {'http': '121.41.171.223:3128'}
    ]
    #随机获取UA和代理IP
    header = random.choice(header_list)
    proxy = random.choice(proxy_list)

    url = 'http://www.baidu.com/s?ie=UTF-8&wd=ip'
    #参数3:设置代理
    response = requests.get(url=url,headers=header,proxies=proxy)
    response.encoding = 'utf-8'
    
    with open('daili.html', 'wb') as fp:
        fp.write(response.content)
    #切换成原来的IP
    requests.get(url, proxies={"http": ""})

登录页面爬取——cookie

import requests
from lxml import etree

headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36"
}
# 1.进行登陆操作,服务器就会对当前用户 创建一个cookie对象(存储当前的用户状态信息,以及身份标识)
# 2. 进行个人主页的请求(携带步骤1中创建的cookie), 获取当前用户个人主页的页面数据

# 登陆
 #登录请求的url(通过抓包工具获取)
login_url = "http://www.renren.com/ajaxLogin/login?1=1&uniqueTimestamp=2018922053679 "
session = requests.session()  # #创建一个session对象,该对象会自动将请求中的cookie进行存储和携带
data = {
    "captcha_type":"web_login",
    "domain":"renren.com",
    "email":"18829037944",
    "f":"",
    "icode":"",
    "key_id":"1",
    "origURL":"http://www.renren.com/home",
    "password": "30f28dff42c847e99969e7e91f8356bcb80aa2e9993893add81b6ff76c899be3",
    "rkey": "f1ace095ea75f09850cbb28b87a04b9e",
}
session.post(url=login_url, data=data, headers=headers) #  使用session发送请求,目的是为了将session保存该次请求中的cookie

get_url = "http://www.renren.com/968520666/profile"
#  再次使用session进行请求的发送,该次请求中已经携带了cookie
response = session.get(url=get_url, headers=headers)
#设置响应内容的编码格式
response.encoding = 'utf-8'
page_text = response.text

#将响应内容写入文件
with open('./renren01.html','w', encoding="utf-8") as fp:
    fp.write(response.text)
    print("over")

猜你喜欢

转载自www.cnblogs.com/Mr-Feng/p/11274263.html
今日推荐