import requests
import re
import json
import time
from requests.exceptions import RequestException
def get_one_page(url):
#获取每页URL返回的页面数据
try:
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36',
}
response = requests.get(url,headers=headers)
if response.status_code == 200:
return response.text
return None
except RequestException:
return None
def parse_one_page(html):
#通过正则表达式筛选所需内容
pattern = re.compile(
'<dd>.*?board-index.*?>(.*?)</i>.*?data-src="(.*?)".*?name.*?a.*?>(.*?)</a>.*? star.*?>(.*?)</p>.*?releasetime.*?>(.*?)</p>.*?integer.*?>(.*?)</i>.*?fraction.*?>(.*?)</i>.*?</dd>',re.S
)
items = re.findall(pattern,html)
# print(items)
for item in items:
yield{
'index':item[0],
'image':item[1],
'title':item[2].strip(),
'actor':item[3].strip()[3:] if len(item[3]) > 3 else '',
'time':item[4].strip()[5:] if len(item[4]) > 5 else '',
'score':item[5].strip()+item[6].strip()
}
def write_to_file(content):
#写入本地文件'top100.json'
with open('TOP100.json','a',encoding='utf-8') as f:
f.write(json.dumps(content,ensure_ascii=False)+'\n')
def main(offset):
#访问页面翻页设置
url = 'http://maoyan.com/board/4?offset='+str(offset)
html = get_one_page(url)
items = parse_one_page(html)
for item in items:
print(item)
write_to_file(item)
if __name__ == '__main__':
#通过time.sleep控制爬取时间间隔
for i in range(10):
main(offset=i*10)
time.sleep(1)
直接送上源码,整体代码框架来自《python3网络爬虫开发实战》一书。