(1)每个电影的电影名、导演、编剧、主演、类型、国家、上映日期、片长,电影评分,以及每个星级评分的百分比数据。
(2)每个电影热门点评中的前100个评分及其评分人。
(3)进入每个评分人的主页,爬取其看过的电影信息,以及对电影的评分。(少于300部则全部爬取,多于300部则仅爬取前300个)
将上述数据均写入数据库。三张表:电影信息,用户(用户名、主页),用户对电影的评分表。写入数据时要注意电影与用户的去重。
一、创建项目,创建 spiders
二、修改配置文件
三、进入浏览器查看华语、欧美、韩国、日本电影每个标签下按评价排序的全部电影的url
华语url:page_start=0的值可以自己设定
欧美url:
韩国url:
日本url:可见只有tag=的值不同
四、提取每个电影链接,可利用浏览器插件Xpath-helper快速完成。
五、点击进入链接,提取每个电影的详细信息
电影名 //h1/span[@property]
导演 //div[@id='info']//span[@class='attrs']/a[@rel]
编剧 //div[@id='info']//span[@class="attrs"]取第二个元素就是编剧了
全部演员 //div[@id='info']//span[@class]/a[@rel='v:starring']
类型 //div[@id='info']//span[@property='v:genre']
国家 //div[@id='info'],先把全部取出来,再找国家信息
上映时间 //div[@id='info']//span[@property='v:initialReleaseDate']
时长 //div[@id='info']//span[@property='v:runtime']
豆瓣评分 //strong
分数百分比 //div[@class='ratings-on-weight']/div/span
六,到目前为止看似这个方法行的通,但是电影信息是以json格式传过来的,用xpath提取电影链接失败。在审查元素时我们可以看到请求地址https://movie.douban.com/j/search_subjects?
七、上代码
# -*- coding: utf-8 -*-
import scrapy
import json
from NewDouBan.items import MovieInfo
from urllib import urlencode
class NewDouBan(scrapy.Spider):
name = 'douban'
allowed_domains = ['douban.com']
data = {
'type': 'movie',
'tag': '热门',
'sort': 'recommend',
'page_limit': 20,
'page_start': 0
}
start_urls = ['https://movie.douban.com/j/search_subjects?' + urlencode(data)]
#start_urls = ['https://movie.douban.com/explore#!type=movie&tag=爱情&sort=recommend&page_limit=20&page_start=40']
#https: // movie.douban.com / explore # !type=movie&tag=%E7%83%AD%E9%97%A8&sort=rank&page_limit=20&page_start=0
def parse(self, response):
# type:
# tag: 爱情
# sort: recommend
# page_limit: 20
# page_start: 40
# 取出每个页面里帖子链接列表
#详细信息link// div[@class ="gaia"]//a[@class ="item"]/@href
#
# links = response.xpath('//div[@class ="gaia"]//a[@class ="item"]/@href').extract()
# print "ssssssssss"+str(links)+response.url
# # 迭代发送每个帖子的请求,调用parse_item方法处理
# for link in links:
# print link
# yield scrapy.Request(link, callback=self.parse_item)
data = json.loads(response.text)["subjects"]
for each in data:
url2=each['url']
#print "uuuuuuuuuu"+url2
yield scrapy.Request(url2, callback=self.parse_item)
# 处理每个帖子里
def parse_item(self, response):
item = MovieInfo()
# 标题
# name = scrapy.Field()
# director = scrapy.Field()
# adaptor = scrapy.Field()
# rolename = scrapy.Field()
# type = scrapy.Field()
# country = scrapy.Field()
# date = scrapy.Field()
# length = scrapy.Field()
# grade = scrapy.Field()
# startrate = scrapy.Field()
item['name'] =response.xpath("//h1/span[@property]/text()").extract()[0]
item['rolename']=response.xpath("//div[@id='info']//span[@class]/a[@rel='v:starring']/text()").extract()
# 编号
#item['director'] = response.xpath("//div[@id='info']//span[@class='attrs']/a[@rel]/text()").extract()
#item['type']=response.xpath("//div[@id='info']//span[@property='v:genre']/text()").extract()
country=response.xpath("//div[@id='info']").extract()
#for coun in country:
item['country']=country
print "sssssss:"+country
# item['date']=response.xpath("//div[@id='info']//span[@property='v:initialReleaseDate']/text()").extract()
# item['length']=response.xpath("//div[@id='info']//span[@property='v:runtime']/text()").extract()
# item['grade']=response.xpath("//strong/text()").extract()
start=response.xpath("//div[@class='ratings-on-weight']/div/span/text()").extract()
list=[]
for it in start:
list.append(it.replace("\n", "").replace(" ",""))
item['startrate']=list
# 链接
item['url'] = response.url
#print item
yield item
管道文件代码
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import json
class NewdoubanPipeline(object):
count = 0
def __init__(self):
self.filename = open("douban.json", "w")
# import codecs
# self.filename=codecs.open("dongguan.json","w",encoding="utf-8")
def process_item(self, item, spider):
print 'nnnnnnnnnnnnnnn'
self.count += 1
text = str(self.count) + json.dumps(dict(item), ensure_ascii=False) + '\n'
self.filename.write(text.encode("utf-8"))
return item
def close_spider(self, spider):
self.filename.close()
类文件代码
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class MovieInfo(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
#电影的电影名、导演、编剧、主演、类型、国家、上映日期、片长,电影评分,以及每个星级评分的百分比数据。
name=scrapy.Field()
director=scrapy.Field()
adaptor=scrapy.Field()
rolename=scrapy.Field()
type=scrapy.Field()
country=scrapy.Field()
date=scrapy.Field()
length=scrapy.Field()
grade=scrapy.Field()
startrate=scrapy.Field()
url=scrapy.Field()