Python Learning Day4

爬虫原理:

  模拟浏览器 --> 往目标站点发送请求 --> 接收响应数据 --> 提取有用的数据 --> 保存到本地/数据库。

爬虫的全过程:
  1、发送请求 (请求库)
    - requests模块
    - selenium模块

  2、获取响应数据(服务器返回)

  3、解析并提取数据(解析库)
    - re正则
    - bs4(BeautifulSoup4)
    - Xpath

  4、保存数据(存储库)
    -MongoDB

import requests
import re  # 正则模块
# uuid.uuid4()  可以根据时间戳生成一段世界上唯一的随机字符串
import uuid
# 爬虫三部曲
# 1、发送请求
def get_page(url):
    response = requests.get(url)
    return response
# 2、解析数据
# 解析主页获取视频详情页ID
def parse_index(text):
    res = re.findall('<a href="video_(.*?)"', text, re.S)
#re.findall('正则匹配规则','解析文本','正则模式')
    # print(res)
    detail_url_list = []
    for m_id in res:
        # 拼接详情页url
        detail_url = 'https://www.pearvideo.com/video_' + m_id
        # print(detail_url)
        detail_url_list.append(detail_url)
    # print(detail_url_list)
    return detail_url_list
# 解析详情页获取视频url
def parse_detail(text):
    ''''''
    '''
        (.*?): 提取括号的内容
        .*?: 直接匹配
        <video webkit-playsinline="" playsinline="" x-webkit-airplay="" autoplay="autoplay" src="https://video.pearvideo.com/mp4/adshort/20190613/cont-1566073-14015522_adpkg-ad_hd.mp4" style="width: 100%; height: 100%;"></video>
        
    正则: <video.*?src="(.*?)"
    
    # 以上是分析过程,不需要写
    
    正则: srcUrl="(.*?)"
    '''
    movie_url = re.findall('srcUrl="(.*?)"', text, re.S)[0]
    return movie_url
# 3、保存数据
def save_movie(movie_url):
    response = requests.get(movie_url)
    # 把视频写到本地
    with open(f'{uuid.uuid4()}.mp4', 'wb') as f:
        f.write(response.content)
        f.flush()
if __name__ == '__main__':  # main + 回车键
    # 1、对主页发送请求
    index_res = get_page(url='https://www.pearvideo.com/')
    # 2、对主页进行解析、获取详情页id
    detail_url_list = parse_index(index_res.text)
    # print(detail_url_list)
    # 3、对每个详情页url发送请求
    for detail_url in detail_url_list:
        detail_res = get_page(url=detail_url)
        print(detail_res.text)
        # 4、解析详情页获取视频url
        movie_url = parse_detail(detail_res.text)
        print(movie_url)
        # 5、保存视频
        save_movie(movie_url)

采用多线程方

import requests
import re#正则模块
from concurrent.futures import ThreadPoolExecutor
#限制50个线程
pool=ThreadPoolExecutor(50)

def get_page(url):
    print(f"异步任务{url}")
    response=requests.get(url)
    return response
def parse_index(res):
    
    response=res.result()
    res = re.findall('<a href="video_(.*?)"',response.text,re.S)

    for m_id in res:
        detail_url='https://www.pearvideo.com/video_'+m_id
        pool.submit(get_page,detail_url).add_done_callback(parse_detail)

def parse_detail(res):
    response=res.result()
    
    movie_url=re.findall('srcUrl="(.*?)"',response.text,re.S)[0]
    movie_name=re.findall('<title>(.*?)<',response.text,re.S)[0]
    pool.submit(save_movie,movie_url,movie_name)

def save_movie(movie_url,movie_name):
    response=requests.get(movie_url)
    with open(f'{movie_name}.mp4','wb') as f:
        f.write(response.content)
        f.flush()
    
if __name__=='__main__':
    
    url='https://www.pearvideo.com/'
    
    pool.submit(get_page,url).add_done_callback(parse_index)

GET请求讲解

User-Agent
# 访问知乎发现
  请求url:
    https://www.zhihu.com/explore
  请求方式:
    GET
  请求头:
    user-agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.146 Safari/537.36
    cookies

import requests
# 请求头字典
# headers = {
#     'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.146 Safari/537.36'
# }
# 在get请求内,添加user-agent
# response = requests.get(url='https://www.zhihu.com/explore', headers=headers)
# print(response.status_code)  # 200
# # print(response.text)
# with open('zhihu.html', 'w', encoding='utf-8') as f:
#     f.write(response.text)
'''
params请求参数
访问百度搜查安徽工程大学url
https://www.baidu.com/s?wd=安徽工程大学&pn=10第二页
https://www.baidu.com/s?wd=安徽工程大学&pn=20第三页
# '''
from urllib.parse import urlencode
# url = 'https://www.baidu.com/s?wd=%E8%94%A1%E5%BE%90%E5%9D%A4'
# url = 'https://www.baidu.com/s?' + urlencode({"wd": "蔡徐坤"})
url = 'https://www.baidu.com/s?'
headers = {
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.146 Safari/537.36'
}
# print(url)
# 在get方法中添加params参数
# response = requests.get(url, headers=headers, params={"wd": "安徽工程大学"})
response = requests.get(url, headers=headers, params={"wd": "安徽工程大学", "pn": "20"})
# print(response.text)
with open('gongcheng2.html', 'w', encoding='utf-8') as f:
    f.write(response.text)

携带登录cookies破解github登录验证

请求url:https://github.com/settings/emails

请求方式:
  GET
请求头:
  User-Agen
  Cookie: 

import requests
# 请求url
url = 'https://github.com/settings/emails'
# 请求头
headers = {
    'user-agent': '',
    # 在请求头中拼接cookies
    # 'Cookie': ''
}
# github_res = requests.get(url, headers=headers)
import requests
cookies = {
    'Cookie': ''
}
github_res = requests.get(url, headers=headers, cookies=cookies)
print('15622792660' in github_res.text)

爬取豆瓣电影

import requests
import re
url = 'https://movie.douban.com/top250'
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.146 Safari/537.36'
}
# 1、往豆瓣TOP250发送请求获取响应数据
response = requests.get(url, headers=headers)

# print(response.text)

# 2、通过正则解析提取数据
# 电影详情页url、图片链接、电影名称、电影评分、评价人数
movie_content_list = re.findall(
    # 正则规则
    '<div class="item">.*?href="(.*?)">.*?src="(.*?)".*?<span class="title">(.*?)</span>.*?<span class="rating_num".*?>(.*?)</span>.*?<span>(.*?)人评价',

    # 解析文本
    response.text,

    # 匹配模式
    re.S)

for movie_content in movie_content_list:
    # 解压赋值每一部电影
    detail_url, movie_jpg, name, point, num = movie_content
    data = f'电影名称:{name},   详情页url:{detail_url}, 图片url:{movie_jpg}, 评分: {point}, 评价人数: {num} \n'
    print(data)

    # 3、保存数据,把电影信息写入文件中
    with open('douban.txt', 'a', encoding='utf-8') as f:
        f.write(data)

猜你喜欢

转载自www.cnblogs.com/kinstday/p/11025215.html