Python练手例子

一:爬取豆瓣电影top250地址

from bs4 import BeautifulSoup
from urllib.request import urlopen
import pandas as pd
import numpy as np
from pandas import DataFrame,Series
import re
def split(str,regular):  #正则表达式过滤字符串

    return re.split(regular,str)

def trans_list(main_list,sub_list):

    index=main_list.index(sub_list)
    sub_list.reverse()  #反转list的排列
    for ele in sub_list:
        main_list.insert(index,ele)  #后一以元素插入在前一元素之前
    main_list.pop(main_list.index(sub_list))
    return main_list

def extract_info(li_tag):
    #使用.stripped_strings更方便

    info=[]
    for string in li_tag.stripped_strings:
        info.append(string)
    #info=['1', '肖申克的救赎', '/\xa0The Shawshank Redemption', '/\xa0月黑高飞(港)  /  刺激1995(台)',
    #'[可播放]', '导演: 弗兰克·德拉邦特 Frank Darabont\xa0\xa0\xa0主演: 蒂姆·罗宾斯 Tim Robbins /...',
    #'1994\xa0/\xa0美国\xa0/\xa0犯罪 剧情', '9.6', '693081人评价', '希望让人自由。']

    if '[可播放]' in info:
        index=info.index('[可播放]')
        info.pop(index)  #delete unused info,the index-1
    class_hd=li_tag.find('div',{'class':'hd'})
    if len(class_hd.a.find_all('span'))==2:
        if '  /  ' in info[2]:
            info.insert(2,np.NaN)  #缺失则插入NaN,注意index
            info[3]=info[3][2:]
        else:
            info[2]=info[2][2:]
            info.insert(3,np.NaN)
    else:
        info[2]=info[2][2:]  #MovieName,\xa0表示16进制下A0的一个数,为一个字符
        info[3]=info[3][2:]  #EnglishName
    Dir_and_Act=split(info[4],r':|\xa0\xa0\xa0')  #正则表达式分割字符串
    if len(Dir_and_Act)<4:
        Dir_and_Act.append('NaN')
    Yea_Cou_Gen=split(info[5],r'\xa0/\xa0')
    info[4]=Dir_and_Act
    info[5]=Yea_Cou_Gen
    info=trans_list(info,Dir_and_Act)
    info=trans_list(info,Yea_Cou_Gen)
    info.pop(4)  #去除‘导演’
    info.pop(5)  #起初’演员‘
    return info  #返回一行movie的数据,list的形式

def collecting_data(url,database):

    soup=BeautifulSoup(urlopen(url),'lxml')
    movie_grid=soup.find_all('ol',{'class':'grid_view'})  #找到电影表单
    movie=movie_grid[0].find_all('li')
    for li in movie:
        database.append(extract_info(li))  #data为list前提下,DataFrame([data])为行排列,DataFrame(data)为列排列
    return database  #database=[[],[],[],....]



def collect_all(url):

    database=[]
    collecting_data(url,database)
    data=pd.DataFrame(database)
    return data  #返回一行daframe格式



#mian
#url=r'https://movie.douban.com/top250?start=0&filter='#豆瓣电影top250地址
page=[]
for sequence in list(range(0,250,25)):

    url=r'https://movie.douban.com/top250?start=%d&filter=' %sequence  #所有top250的网页地址
    page.append(collect_all(url))  #添加数据

GeneralData=pd.DataFrame()
for i in range(len(page)):
    GeneralData=pd.concat([GeneralData,page[i]],ignore_index=True)  #pd.concat:[]内要为DataFrame形式,

#保存数据,待整理分析
GeneralData=GeneralData.drop(0,axis=1)  #去除编号的一列
column=['MovieName','EnglishName','OtherName','Director',\
        'Actors','Year','Country','Grenre','Rating10','RatingNum',\
        'Description']
GeneralData.columns=column
GeneralData.to_csv('MovieTop250.csv',encoding='utf-8')  #此函数默认解码方式为utf-8,但是在保存时不加encoding的话,读取会产生错误
GeneralData.to_csv('Movie.csv')
print("成功保存数据")

二:正则抓取

import requests
import re
from requests.exceptions import RequestException

def gethtml(url):
    try:
        # 获取网页html内容
        response = requests.get(url)
        print(response)
        # 通过状态码判断是否获取成功
        if response.status_code == 200:
            return response.text
        return None
    except RequestException:
        return None
def parse_html(html):
    pattern=re.compile('<a.*?>.*?title">(.*?)</span>.*?other">(.*?)</span>.*?</div>',re.S)
    items=re.findall(pattern,html)
    print(items)
    print("-------------")
    #变成字典
    for item in items:
        #yield把方法变成生成器
      yield{
          "name":item[0],
          "other":item[1].strip(" '&nbsp;/&nbsp;")
      }

def main():
    url = "https://movie.douban.com/top250"
    html=gethtml(url)
    for item in parse_html(html):
        print(item)


if __name__ == '__main__':
    main()

三:requests+xpath

通过检查元素,copy XPath获得xpath

# -*-coding:utf-8 -*-
import requests
from lxml import etree
url = 'https://movie.douban.com/subject/1292052/'
data = requests.get(url).text
s=etree.HTML(data)
film=s.xpath('//*[@id="content"]/h1/span[1]/text()')
director=s.xpath('//*[@id="info"]/span[1]/span[2]/a/text()')
time=s.xpath('//*[@id="info"]/span[10]/text()')
runtime=s.xpath('//*[@id="info"]/span[13]/text()')
print("电影名称:",film)
print("导演:",director)
print("上映时间:",time)
print("片长::",runtime)

四:爬取豆瓣TOP250的图书信息

# -*-coding:utf-8 -*-
from lxml import etree
import requests
import time

for a in range(10):
    url = 'https://book.douban.com/top250?start={}'.format(a*25)
    data = requests.get(url).text

    s=etree.HTML(data)
    file=s.xpath('//*[@id="content"]/div/div[1]/div/table')
    time.sleep(3)

    for div in file:
        title = div.xpath("./tr/td[2]/div[1]/a/@title")[0]
        href = div.xpath("./tr/td[2]/div[1]/a/@href")[0]
        score=div.xpath("./tr/td[2]/div[2]/span[2]/text()")[0]
        num=div.xpath("./tr/td[2]/div[2]/span[3]/text()")[0].strip("(").strip().strip(")").strip()
        scrible=div.xpath("./tr/td[2]/p[2]/span/text()")

        if len(scrible) > 0:
            print("书名:{},网页:{},评分:{},评价人数:{},评价:{}\n".format(title,href,score,num,scrible[0]))
        else:
            print("{},{},{},{}\n".format(title,href,score,num))

五:爬取租房信息

1、单页

# -*-coding:utf-8 -*-
from lxml import etree
import requests
import time
url = "http://sz.xiaozhu.com/"
data = requests.get(url).text
s=etree.HTML(data)
file=s.xpath('//*[@id="page_list"]/ul/li')
time.sleep(1)

for name in file:
    title = name.xpath('./div[2]/div/a/span/text()')[0]
    price = name.xpath('./div[2]/span[1]/i/text()')[0]
    scrible = name.xpath('./div[2]/div/em /text()')[0].strip()
    pic = name.xpath('./a/img/@lazy_src')[0]
    print("标题:{},价格:{},描述:{},图片:{}\n".format(title,print,scrible,pic))

2、多页,储存数据到本地

# -*-coding:utf-8 -*-
from lxml import etree
import requests
import time
with open('D:\PycharmProjects/test.txt','w',encoding='utf-8') as f:
     for a in range(1,10):
         url = 'http://sz.xiaozhu.com/search-duanzufang-p{}-0/'.format(a)
         data = requests.get(url).text
         s=etree.HTML(data)
         file=s.xpath('//*[@id="page_list"]/ul/li')
         time.sleep(1)
         for name in file:
             title = name.xpath('./div[2]/div/a/span/text()')[0]
             price = name.xpath('./div[2]/span[1]/i/text()')[0]
             scrible = name.xpath('./div[2]/div/em /text()')[0].strip()
             pic = name.xpath('./a/img/@lazy_src')[0]
             print("标题:{},价格:{},描述:{},图片:{}\n".format(title,print,scrible,pic))
             f.write("标题:{},价格:{},描述:{},图片:{}\n".format(title, price, scrible, pic))

如果储存格式是CSV的话会出现乱码,首先要用记事本打开,然后另存为 – 选择编码为“ANSI”,再打开。

持续更新中!!!

猜你喜欢

转载自blog.csdn.net/weixin_40586270/article/details/83000020