网络爬虫:xpath


# xpath:在html中查找信息,对xml文档中的元素进行遍历和属性的提取
# xml:是为了传输数据,结构域和HTML非常相像,是一种标记语言

"""
xpath常见的语法:

nodename:选取此节点中的所有子节点
/:从根节点开始查找
//: 匹配节点,不考虑节点位置
. :选取当前节点
..:选取当前节点的父节点
@:取标签的属性  a/@href 取a标签的href属性
a/text():取标签的文本
a[@class='123]:根据某个(class)属性寻找标签  a[@id='123]
a[@id='123'][last()]:取最后一个id为123的a标签
a[@id='123'][position()<2]:取id为123的前两个标签
li[@id="123"][position()=2]:取第二个id为1123的li标签
"""
import requests,re
from lxml.html import etree

# 案例()
# http://www.budejie.com/audio/
# http://www.budejie.com/audio/2
def load_page_data(url):
    """
    下载器根据页面源码获取url分页地址
    :param url:
    :return:
    """
    # proxies = {
    #     'http': '59.37.33.62:50686',
    #     'https': '61.128.208.94:3128',
    #     'https': 'http://username:password@ip:port'  # 私密代理
    # }

    req_headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'
    }
    response = requests.get(url,headers=req_headers)
    response.encoding = 'utf-8'
    if response.status_code ==200:
        print('请求成功')
        status = parse_page_data(response.text)
        # with open('page.html','w',encoding='utf-8') as file:
        #     file.write(response.text)
        if status:
            # 请求下一页
            pattern = re.compile('\d+')
            #    当前页码
            cur_page = re.search(pattern,response.url).group()
            # 下一页
            next_page = int(cur_page)+1
            # 下一页url
            next_page_url = re.sub(pattern,str(next_page),response.url)
            load_page_data(next_page_url)


def parse_page_data(html):
    """
    使用xpath从页面源码提取数据

    :param html:
    :return:
    """
    # 使用etree
    html_element = etree.HTML(html)
    auto_list = html_element.xpath('//div[@class="j-r-c"]/div[@class="j-r-list"]/ul/li')
    # print(auto_list)
    # print(type(auto_list))
    for auto in auto_list:
        auto_data = {}
        # 取出标题
        auto_data['name'] = auto.xpath('.//a[@class="u-user-name"]/text()')[0]
        # 取出内容
        auto_data['content'] = auto.xpath('.//div[@class="j-r-list-c-desc"]/text()')[0]
        # 发布时间
        auto_data['pubLishTime'] = auto.xpath('.//span[@class="u-time  f-ib f-fr"]/text()')[0]
        # 点赞数
        auto_data['zanNum'] = auto.xpath('.//li[@class="j-r-list-tool-l-up"]/span/text()')[0]
        # 不喜欢
        auto_data['lowNum'] = auto.xpath('.//li[@class="j-r-list-tool-l-down "]/span/text()')
        #3 音频
        auto_data['url'] = auto.xpath('.//div[@class=" j-audio"]/@data-mp3')[0]
        print(auto_data)
        download_audio_by_url(auto_data['url'], auto_data)
    if len(auto_list)>0:
        return True
    else:
        return False

def download_audio_by_url(url,auto):
    proxies = {
        'http': '59.37.33.62:50686',
        'https': '61.128.208.94:3128',
        'https': 'http://username:password@ip:port'  # 私密代理
    }
    req_header = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'
    }
    response = requests.get(url,headers=req_header)
    if response.status_code == 200:
        # print(response.url, '下载成功')
        filename = response.url[-17:]
        with open('baisibudejie/' + filename, 'wb') as file:
            file.write(response.content)
            auto['localpath'] = 'baisibudejie/' + filename
            print('完成')
        # 将数据存储到数据库
        save_data_to_db(auto)

def save_data_to_db(audio):
    print(audio)

if __name__ == "__main__":
    start_url ='http://www.budejie.com/audio/1'
    load_page_data(start_url)

猜你喜欢

转载自blog.csdn.net/weixin_34161032/article/details/87243986