爬取网站小猪短租的少量信息及详细介绍--爬虫案例篇

版权声明:如有侵权,请联系作者删除该文件! https://blog.csdn.net/Programmer_huangtao/article/details/83010801
#!/usr/bin/env python
# -*- coding:utf-8 -*- 
# @Time    : 18-10-10 下午9:21
import requests #导入requests包;发请求网页
from bs4 import BeautifulSoup #导入bs4包;解析网页
import time #导入time包 时间作用本文为:睡眠时间
headers ={
    'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'
          }#加入请求头
def sex_xiebie (class_name): #定义一个性别函数
    if class_name == ['member_ico1']: #通过发f12,性别为女的类型为:member_ico1
        return '女'
    else:
        return '男'     #通过if方法判断性别为女还是男,因为男的也有一个类型,可以看看

def get_links(url):
    #定义一个网页函数
    wb_data = requests.get(url,headers=headers)
    #获取详细页面的链接
    soup = BeautifulSoup(wb_data.text,'lxml')
    #解析获取网页
    links = soup.select('#page_list > ul > li > a')
    #links为url列表

    for link in links:
        href = link.get("href")
        get_info(href)
    #循环url,挨个调用get_info()函数

def get_info(url):
#定义获取详细的网页的
    wb_data =requests.get(url,'headers=headers')
    soup = BeautifulSoup(wb_data.text,'lxml')
    #解析网页,获取参数信息
    tittles = soup.select('div.pho_info > h4')
    addresses = soup.select('span.pr5')
    prices = soup.select('#pricePart > div.day_l > span')
    imgs = soup.select('#floatRightBox > div.js_box.clearfix > div.member_pic > a > img')
    names = soup.select('#floatRightBox > div.js_box.clearfix > div.w_240 > h6 > a')
    sexs = soup.select('#floatRightBox > div.js_box.clearfix > div.member_pic > div')
    #通过selector方法寻找想找的信息
    for tittle, address, price, img, name, sex in zip(tittles,addresses,prices,imgs,names,sexs):
        data = {
            '主题':tittle.get_text().strip(),
            '地址':address.get_text().strip(),
            '价格':price.get_text(),
            '图片':img.get("src"),
            '名称':name.get_text(),
            '性别':sex(sex.get("class"))
        }
        print(data)
        #解析了该网页,并获取该网页的信息,通过字典的信息打印

if __name__ == '__main__':
#主程序的入口
    urls = ['http://bj.xiaozhu.com/search-duanzufang-p{}-0/'.format(number)
    for number in range(1,16)]
    #小猪短租可以看到下面的网址有页面页面数,所以就可以构造url
    #构造多页url,构造前15页

for single_url in urls:
 get_links(single_url)
    #循环调用get_links()函数
time.sleep(3)
#每次循环一次,让爬虫暂停3秒,防止爬虫失败

猜你喜欢

转载自blog.csdn.net/Programmer_huangtao/article/details/83010801