当当网程序设计类图书信息爬取

当当网程序设计类图书信息爬取

思路分析

思路较为简单,就是循环遍历提取每一页的图书信息,再通过管道存入MongoDB数据库。
URL构造:

url = "http://category.dangdang.com/pg" + str(i) + "-cp01.54.06.00.00.00.html"

本次爬虫使用了scrapy框架。

项目目录

在这里插入图片描述

代码

dangdang_spider.py

# -*- coding: utf-8 -*-
import scrapy
from dangdang.items import DangdangItem
from scrapy.http import Request


class DangdangSpiderSpider(scrapy.Spider):
    name = 'dangdang_spider'
    allowed_domains = ['dangdang.com']
    start_urls = ['http://category.dangdang.com/cp01.54.06.00.00.00.html']

    def parse(self, response):
        li_list = response.xpath('//ul[@class="bigimg"]//li')
        for li in li_list:
            title = li.xpath('./a/@title').get()
            link = li.xpath('./a/@href').get()
            detail = li.xpath('./p[@class="detail"]/text()').get()
            price = li.xpath('./p[@class="price"]/span[@class="search_now_price"]/text()').get()
            comment_num = li.xpath('./p[@class="search_star_line"]/a/text()').get()
            author = li.xpath('./p[@class="search_book_author"]//span[1]/a/text()').get()
            public_time = li.xpath('./p[@class="search_book_author"]//span[2]/text()').get().replace(" /", "")
            press = li.xpath('./p[@class="search_book_author"]//span[3]/a/text()').get()
            item = DangdangItem(
                title=title,
                link=link,
                detail=detail,
                price=price,
                comment_num=comment_num,
                author=author,
                public_time=public_time,
                press=press
            )
            yield item
            for i in range(2, 101):
                url = "http://category.dangdang.com/pg" + str(i) + "-cp01.54.06.00.00.00.html"
                yield Request(url=url, callback=self.parse)

pipelines.py

# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo


class DangdangPipeline(object):
    def __init__(self):
        self.client = pymongo.MongoClient(host='127.0.0.1', port=27017)
        self.db = self.client['dangdang']

    def process_item(self, item, spider):
        print(item)
        data = dict(item)
        self.db.books.insert_one(data)
        return item

    def close_spider(self, spider):
        self.client.close()

items.py

# -*- coding: utf-8 -*-

# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html

import scrapy


class DangdangItem(scrapy.Item):
    title = scrapy.Field()
    link = scrapy.Field()
    detail = scrapy.Field()
    price = scrapy.Field()
    author = scrapy.Field()
    public_time = scrapy.Field()
    comment_num = scrapy.Field()
    press = scrapy.Field()

settings.py

# -*- coding: utf-8 -*-

# Scrapy settings for dangdang project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://docs.scrapy.org/en/latest/topics/settings.html
#     https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://docs.scrapy.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'dangdang'

SPIDER_MODULES = ['dangdang.spiders']
NEWSPIDER_MODULE = 'dangdang.spiders'

# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = 'dangdang (+http://www.yourdomain.com)'

# Obey robots.txt rules
ROBOTSTXT_OBEY = False

# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 1
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
# COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False

# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
    'Accept-Language': 'en',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36'
}

# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
#    'dangdang.middlewares.DangdangSpiderMiddleware': 543,
# }

# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
#    'dangdang.middlewares.DangdangDownloaderMiddleware': 543,
# }

# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
# }

# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
    'dangdang.pipelines.DangdangPipeline': 300,
}

# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

main.py

# !/usr/bin/env python
# —*— coding: utf-8 —*—
# @Time:    2020/2/9 11:01
# @Author:  Martin
# @File:    main.py
# @Software:PyCharm
from scrapy import cmdline

# cmdline.execute("scrapy crawl dangdang_spider".split())
cmdline.execute("scrapy crawl dangdang_spider --nolog".split())

结果展示

在这里插入图片描述
在这里插入图片描述

总结分析

scrapy框架爬行效率较高,仅用了几分钟就爬取了100页图书信息。


在这里插入图片描述

发布了151 篇原创文章 · 获赞 236 · 访问量 3万+

猜你喜欢

转载自blog.csdn.net/Deep___Learning/article/details/104233340