python爬虫-爬取天气预报内容

使用requests模块获取网页内容,bs4数据清洗 ,pands保存数据一般都为csv格式。
import requests
from bs4 import BeautifulSoup
import pandas as pd

def Get_data(url):
    # 请求得到网页内容
    res = requests.get(url)
    # 二进制方式显示网页内容
    html = res.content.decode('gbk')
    # 格式化网页
    soup = BeautifulSoup(html,'html.parser')
    # 使用soup对象find_all所需内容
    tr_list = soup.find_all('tr')
    # 提取需要的数据
    dates = []
    tmp = []
    condiitions = []
    # 清洗数据
    for data in tr_list[1:]:
        rel_data = data.text.split()
        dates.append(rel_data[0])
        tmp.append(''.join(rel_data[3:6]))
        condiitions.append(''.join(rel_data[1:3]))
    Biaoge = pd.DataFrame()
    Biaoge['日期'] = dates
    Biaoge['温度'] = tmp
    Biaoge['天气情况'] = condiitions
    return Biaoge

mounth9 = Get_data('http://www.tianqihoubao.com/lishi/beijing/month/201909.html')
mounth10 = Get_data('http://www.tianqihoubao.com/lishi/beijing/month/201910.html')
mounth11 = Get_data('http://www.tianqihoubao.com/lishi/beijing/month/201911.html')
# 利用pd.concat拼接 9、10、11月份表格为一份
v = pd.concat([mounth9,mounth10,mounth11]).reset_index(drop=True)
# 保存数据为csv格式
v.to_csv('BeiJing.csv',index=False,encoding='utf-8')
天气预报爬虫

猜你喜欢

转载自www.cnblogs.com/eddycomeon/p/11972188.html