Python crawling Beijing second-hand housing data, how long it takes the average person to work to afford housing!

Original link: https://mp.weixin.qq.com/s?src=11×tamp=1571726151&ver=1927&signature=6Uu*6AWn5BPvyEmq8aj1SGlNHqmhL274G3X2prvfEAy19vM*kdMCwfd43b3VQ8mXTPSAL7IacOSNigao1SarQnp1FyLL5mFQsN2waY5T33i*8HuwTrr41EiHGAmKCIG8&new=1

Python crawling Ganji Beijing second-hand housing data

Getting reptile month, so for each site using Xpath, Beautiful Soup, a regular three methods were crawling for exercises to consolidate. Data from the following sources:
Here Insert Picture Description
the Xpath crawling:

Here the main solution using Xpath how to determine whether some elements of the existing problems, such as if a house is not furnished information, together with judgment does not, there is no certain elements will lead to crawling interruption.

import requests
from lxml import etree
from requests.exceptions import RequestException
import multiprocessing
import time
'''
更多Python学习资料以及源码教程资料,可以在群821460695 免费获取
'''
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'}

def get_one_page(url):
    try:
        response = requests.get(url, headers=headers)
        if response.status_code == 200:
            return response.text
        return None
    except RequestException:
        return None

def parse_one_page(content):
    try:
        selector = etree.HTML(content)
        ALL = selector.xpath('//*[@id="f_mew_list"]/div[6]/div[1]/div[3]/div[1]/div')
        for div in ALL:
            yield {
                'Name': div.xpath('dl/dd[1]/a/text()')[0],
                'Type': div.xpath('dl/dd[2]/span[1]/text()')[0],
                'Area': div.xpath('dl/dd[2]/span[3]/text()')[0],
                'Towards': div.xpath('dl/dd[2]/span[5]/text()')[0],
                'Floor': div.xpath('dl/dd[2]/span[7]/text()')[0].strip().replace('\n', ""),
                'Decorate': div.xpath('dl/dd[2]/span[9]/text()')[0],
                #地址需要特殊处理一下
                'Address': div.xpath('dl/dd[3]//text()')[1]+div.xpath('dl/dd[3]//text()')[3].replace('\n','')+div.xpath('dl/dd[3]//text()')[4].strip(),
                'TotalPrice': div.xpath('dl/dd[5]/div[1]/span[1]/text()')[0] + div.xpath('dl/dd[5]/div[1]/span[2]/text()')[0],
                'Price': div.xpath('dl/dd[5]/div[2]/text()')[0]
            }
        if div['Name','Type','Area','Towards','Floor','Decorate','Address','TotalPrice','Price'] == None:##这里加上判断,如果其中一个元素为空,则输出None
            return None
    except Exception:
        return None

def main():
    for i in range(1, 500):#这里设置爬取500页数据,在数据范围内,大家可以自设置爬取的量
        url = 'http://bj.ganji.com/fang5/o{}/'.format(i)
        content = get_one_page(url)
        print('第{}页抓取完毕'.format(i))
        for div in parse_one_page(content):
            print(div)

if __name__ == '__main__':
    main()

Beautiful Soup crawling:

import requests
import re
from requests.exceptions import RequestException
from bs4 import BeautifulSoup
import csv
import time

headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'}

def get_one_page(url):
    try:
        response = requests.get(url,headers = headers)
        if response.status_code == 200:
            return response.text
        return None
    except RequestException:
        return None

def parse_one_page(content):
    try:
        soup = BeautifulSoup(content,'html.parser')
        items = soup.find('div',class_=re.compile('js-tips-list'))
        for div in items.find_all('div',class_=re.compile('ershoufang-list')):
            yield {
                'Name':div.find('a',class_=re.compile('js-title')).text,
                'Type': div.find('dd', class_=re.compile('size')).contents[1].text,#tag的 .contents 属性可以将tag的子节点以列表的方式输出
                'Area':div.find('dd',class_=re.compile('size')).contents[5].text,
                'Towards':div.find('dd',class_=re.compile('size')).contents[9].text,
                'Floor':div.find('dd',class_=re.compile('size')).contents[13].text.replace('\n',''),
                'Decorate':div.find('dd',class_=re.compile('size')).contents[17].text,
                'Address':div.find('span',class_=re.compile('area')).text.strip().replace(' ','').replace('\n',''),
                'TotalPrice':div.find('span',class_=re.compile('js-price')).text+div.find('span',class_=re.compile('yue')).text,
                'Price':div.find('div',class_=re.compile('time')).text
            }
        #有一些二手房信息缺少部分信息,如:缺少装修信息,或者缺少楼层信息,这时候需要加个判断,不然爬取就会中断。
        if div['Name', 'Type', 'Area', 'Towards', 'Floor', 'Decorate', 'Address', 'TotalPrice', 'Price'] == None:
                return None
    except Exception:
        return None

def main():
    for i in range(1,50):
        url = 'http://bj.ganji.com/fang5/o{}/'.format(i)
        content = get_one_page(url)
        print('第{}页抓取完毕'.format(i))
        for div in parse_one_page(content):
            print(div)
        with open('Data.csv', 'a', newline='') as f:  # Data.csv 文件存储的路径,如果默认路径就直接写文件名即可。
            fieldnames = ['Name', 'Type', 'Area', 'Towards', 'Floor', 'Decorate', 'Address', 'TotalPrice', 'Price']
            writer = csv.DictWriter(f, fieldnames=fieldnames)
            writer.writeheader()
            for item in parse_one_page(content):
                writer.writerow(item)
        time.sleep(3)#设置爬取频率,一开始我就是爬取的太猛,导致网页需要验证。

if __name__=='__main__':
    main()

Regular crawling: I have studied for a long time, still not been resolved.

This process is easy to problems encountered are:

  • There are some houses missing some information, such as the lack of information decoration, this time need to add a judgment, if not determine, crawling will automatically terminate (here I fell great pit).
  • Data.csv knowledge stored in the file path is the default working directory in Python on how to view the working directory:
import os 
'''
更多Python学习资料以及源码教程资料,可以在群821460695 免费获取
'''
#查看pyhton 的默认工作目录
print(os.getcwd())

#修改时工作目录
os.chdir('e:\\workpython')
print(os.getcwd())
#输出工作目录
e:\workpython

Reptile print is a dictionary, each house is a dictionary of information, because the library is a Python-related knowledge in excel blind spot, so when the reptile dictionary cycle is written directly to the CSV.

Pycharm printing as follows:
Here Insert Picture Description
The dictionary write cycles CSV directly following effects:
Here Insert Picture Description
too expensive ~~~~

Guess you like

Origin blog.csdn.net/fei347795790/article/details/102682036