爬虫获取的数据可以存放到本地,也可以直接存放的指定的数据库。
1、存放到本地中,pipeline文件代码:
import json
class DongguanPipeline(object):
def __init__(self):
self.filename = open("xxx.json", "wb")
def process_item(self, item, spider):
text = json.dumps(dict(item), ensure_ascii = False) + ",\n"
self.filename.write(text.encode("utf-8"))
return item
def close_spider(self, spider):
self.filename.close()
2、存放到mongodb数据库中,pipeline文件代码:
import pymongo
from scrapy.conf import settings
class DoubanPipeline(object):
def __init__(self):
host = settings["MONGODB_HOST"]
port = settings["MONGODB_PORT"]
dbname = settings["MONGODB_DBNAME"]
sheetname= settings["MONGODB_SHEETNAME"]
# 创建MONGODB数据库链接
client = pymongo.MongoClient(host = host, port = port)
# 指定数据库
mydb = client[dbname]
# 存放数据的数据库表名
self.sheet = mydb[sheetname]
def process_item(self, item, spider):
data = dict(item)
self.sheet.insert(data)
return item
3、存放到mongodb数据库时,在setting文件中的配置
ITEM_PIPELINES = {
'douban.pipelines.DoubanPipeline': 300,
}
# MONGODB 主机名
MONGODB_HOST = "127.0.0.1"
# MONGODB 端口号
MONGODB_PORT = 27017
# 数据库名称
MONGODB_DBNAME = "Douban"
# 存放数据的表名称
MONGODB_SHEETNAME = "doubanmovies"