再一次写爬虫 - 爬取猫眼电影 Top100 榜


tags: python, yield, csv, re, urllib

最近学了scrapy 框架,发现 yield 的影子,所以打算在这个栗子中也拿 yield 来练一练。顺便体验一下传说中使用正则解析数据的痛苦。而为了更好体验劳动成果,使用 csv 文件格式来存储爬取的数据。发现少了几个有几个电影的数据解析不出来,但是单独把提取不出来源数据正则解析一遍,又没有发现问题,所以正则那一块还需要改一改。

import csv
import time
import re
import urllib.request
# import os, http.cookiejar


# 将数据存到 csv 文件中
def save2csv(generator, filepath, field_names):
    with open(filepath, 'w', encoding="utf-8", newline='', ) as fp:
        # 使用字典的方式写入, 个人比较推荐
        write = csv.DictWriter(fp, fieldnames=field_names)
        # 写入头部信息
        write.writeheader()
        while True:
            try:
                msg_list = next(generator)
                # for msg in msg_list:
                #     write.writerow(msg)
                # 既然已经是 list, 那为什么不用 writerows(内置使用map 高阶函数映射) 呢
                write.writerows(msg_list)
            except StopIteration:
                # 生成器停止, 就是拿不到数据了
                print("StopIteration. data is none.")
                break
            except Exception as e:
                print(e)


# 遍历十个页面 (生成器函数)
def request_url(start_url, headers):
    # 由于已知top100, 有100个电影, 每页 10 个, 所以这里的 for 写死了
    for i in range(10):
        url = start_url.format(i * 10)
        print(url)
        request = urllib.request.Request(url, headers=headers)
        # 不用这种方式了, 直接传参 headers
        # request.add_header("User-Agent", headers["User-Agent"])
        # request.add_header("Cookie", headers["Cookie"])
        response = urllib.request.urlopen(request)
        msg_list = pick_movie_msgs(response)
        print(len(msg_list), msg_list)
        if msg_list:
            yield msg_list
        # 防止把别人的服务器轰坏了, 也防止被封
        time.sleep(3)


# 预编译正则, 终于知道 `.*?` 的厉害之处了
_rule = re.compile(r'<dd>.*?>(\d+)</i>'  # 序号
                   + r'.*?src="https(.*?)".*?>'  # 封面 url **
                   + r'.*?title="(.*?)"'  # 电影名称
                   + r'.*?star">(.*?)</p>'  # 主演
                   + r'.*?releasetime">.*?(\d+-\d+-\d+)'  # 上映时间
                   + r'.*?integer">(\d+.).*?fraction">(\d).*?</dd>', flags=re.S)  # 评分


# 取出有用的信息
def pick_movie_msgs(response):
    page_source_data = response.read()
    page_source = page_source_data.decode("utf-8")
    # print(page_source)
    data_list = _rule.findall(page_source)
    # print(data_list)
    msg_list = []
    for data in data_list:
        # data is type of tuple
        if data:
            msg = {
                "order_number": data[0].strip(),
                "img_url": "".join(("https", data[1].strip())),
                "title": data[2].strip(),
                "star": data[3].strip().split(":")[1],
                "release_time": data[4].strip(),
                "score": "".join((data[5].strip(), data[6].strip())),
            }
            msg_list.append(msg)
    return msg_list


# 程序入口函数
def main():
    # 猫眼电影榜单 top 100
    start_url = "https://maoyan.com/board/4?offset={}"
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
        "Cookie": "...",
    }
    filepath = './maoyan.csv'  # 文件路径
    data_head = ["order_number", "img_url", "title", "star", "release_time", "score"]
    # 生成器
    generator = request_url(start_url, headers)
    print(type(generator))
    save2csv(generator, filepath, data_head)


# 开始爬
main()

猜你喜欢

转载自www.cnblogs.com/trent-fzq/p/11204474.html