Scrapy盗墓笔记爬虫0.1版 保存到数据库mongode

这次的实例是用scrapy对盗墓笔记进行爬取,并且通过数据库mongode进行连接,这里对数据库的可视化工具为:Robo 3T 1.1.1

环境: win10 py3.6 scrapy1.6
编译器: pycharm

main.py

from scrapy import cmdline
cmdline.execute('scrapy crawl dmoz'.split())

items.py

# -*- coding: utf-8 -*-

# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html

import scrapy

class NoverspiderItem(scrapy.Item):
    # 书名
    bookName = scrapy.Field()
    # 书标题
    bookTitle = scrapy.Field()
    # 章节数
    chapterNum = scrapy.Field()
    # 章节的名字
    chapterName = scrapy.Field()
    # 章节url
    chapterURL = scrapy.Field()

pipelines.py

from Noverspider.items import  NoverspiderItem
from scrapy.conf import settings   # 配置项的使用
import pymongo                     # 导入python控制数据库mongode的包

class NoverspiderPipeline(object):
    def __init__(self):            # 类的初始化 这里连接的数据库是没有上任何密码的
        host = settings['MONGODE_HOST']
        port = settings['MONGODE_PORT']
        dbName = settings['MONGODE_DBNAME']
        client = pymongo.MongoClient(host=host, port=port)
        tdb = client[dbName]
        self.post = tdb[settings['MONGODE_DOCNAME']]

    # 将返回的类似字典类型的items插入到数据库中
    def process_item(self, item, spider):
        bookInfo = dict(item)
        self.post.insert(bookInfo)
        return item

setting.py

# -*- coding: utf-8 -*-

# 第七行都是scrapy自己生成的东西 这里不需要另外配置
BOT_NAME = 'Noverspider'    # 项目的名字 创建工程的时候会自动赋值

SPIDER_MODULES = ['Noverspider.spiders']
NEWSPIDER_MODULE = 'Noverspider.spiders'
ITEM_PIPELINES = ['Noverspider.pipelines.NoverspiderPipeline']

# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'Noverspider (+http://www.yourdomain.com)'

# Obey robots.txt rules
ROBOTSTXT_OBEY = False

# 下载时候的并发请求
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default) 禁用cookies
COOKIES_ENABLED = True
# 连接数据库的配置
MONGODE_HOST = '127.0.0.1'
MONGODE_PORT = 27017
MONGODE_DBNAME = 'Test'
MONGODE_DOCNAME = 'Book'
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False

# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
#   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
#   'Accept-Language': 'en',
#}

# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
#    'Noverspider.middlewares.NoverspiderSpiderMiddleware': 543,
#}

# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
#    'Noverspider.middlewares.NoverspiderDownloaderMiddleware': 543,
#}

# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
#}

# 配置项目管道 也就是pipilines吧,反正爬取这类东西的时候都得开启
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
   'Noverspider.pipelines.NoverspiderPipeline': 300,
}

# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

Noverspider.py

from scrapy.spiders import Spider
from Noverspider.items import NoverspiderItem   # 导入items中的类

class Novspider(Spider):
    name = 'dmoz'
    # 需要爬取的url列表
    start_urls = ['http://www.daomubiji.com/dao-mu-bi-ji-1', 'http://www.daomubiji.com/dao-mu-bi-ji-2',
                  'http://www.daomubiji.com/dao-mu-bi-ji-3', 'http://www.daomubiji.com/dao-mu-bi-ji-4',
                  'http://www.daomubiji.com/dao-mu-bi-ji-5', 'http://www.daomubiji.com/dao-mu-bi-ji-6',
                  'http://www.daomubiji.com/dao-mu-bi-ji-7', 'http://www.daomubiji.com/dao-mu-bi-ji-8',
                  'http://www.daomubiji.com/dao-mu-bi-ji-2015','http://www.daomubiji.com/sha-hai',
                  'http://www.daomubiji.com/zang-hai-hua'
                  ]

    def parse(self, response):
        # 类的初始化
        item = NoverspiderItem()
        # 获取书的名字
        bookName = response.xpath('//div[@class="container"]/h1/text()').extract()
        # 先抓大后抓小 这里是抓的章节的大
        Chapter = response.xpath('//div[@class="excerpts"]')
        content = Chapter.xpath('.//article/a/text()').extract()
        # 用每一个url作为下一个for循环的条件
        url = Chapter.xpath('.//article/a/@href').extract()
        # 循环提取盗墓笔记中需要的内容
        for i in range(len(url)):
            try:
                item['bookName'] = bookName[0].split(':')[0]
                item['bookTitle'] = bookName[0].split(':')[1]
            except Exception as e:
                item['bookName'] = bookName[0]

            item['chapterURL'] = url[i]

            try:
                item['chapterNum'] = content[i].split(' ')[1]
                item['chapterName'] = content[i].split(' ')[2]
            except Exception as e:
                item['chapterNum'] = content[i].split(' ')[1]
            yield item

获取到的数据:
数据
ps: 这里是极客学院的爬虫入门课程,由于那个视频已经老了,网站的结构已经发生了改变,这里重新编写的代码。
123

猜你喜欢

转载自blog.csdn.net/qq_40258748/article/details/88679613