# import requests ''' 爬取校花网视频: 一.请求url htt://www.xiaohuar.com/v/ 二.请求方式 GET 三.请求头信息 User-Agent:用户代理 ''' import time import requests # 爬虫三部曲 # 1.发送请求 def get_page(url): response = requests.get(url) return response # 2.解析数据 import re def parse_index(html): # findall匹配所有 # re.findall('正则匹配规则','匹配文本','匹配模式') # re.S:对全部文本进行搜索匹配 detail_urls = re.findall('<div class="items"><a class="imglink" href="(.*?)"',html,re.S) # for detail_url in detail_urls: # print(detail_url) return detail_urls # 解析详细页 def parse_detail(html): movie_url = re.findall('<source src="(.*?)">',html,re.S) # print(movie_url) if movie_url: return movie_url[0] # 保存数据 import uuid # uuid.uuid4()根据时间戳生成一段世界上唯一的字符串 # main + 回车 def save_video(content): with open(f'{uuid.uuid4()}.mp4','wb')as f: f.write(content) print('视频下载完毕...') #测试用例: if __name__ == '__main__': for line in range(6): url = f'http://www.xiaohuar.com/list-3-{line}.html' # 发送请求 response=get_page(url) # print(response) # # 返回响应状态码 # print(response.status_code) # 返回响应文件 # print(response.text) # 解析主页页面 detail_urls = parse_index(response.text) # 循环遍历详情页url for detail_url in detail_urls: # print(detail_url) # 往每一个详情页发送请求 detail_res = get_page(detail_url) # print(response.text) #解析详情页获取视频url movie_url = parse_detail(detail_res.text) # 判断视频url存在则打印 if movie_url: print(movie_url) # 往视频url发送请求获取视频二进制流 movie_res = get_page(movie_url) # 把视频的二进制流传给save_video函数去保存到本地 save_video(movie_res.content)