爬虫之提取数据

1.正则:

##糗图百科
import requests
import re
import os
dirname = './imgfile'
if not os.path.exists(dirname):
    os.mkdir(dirname)  
url = 'https://www.qiushibaike.com/imgrank/page/%d/'
headers = {
    
    
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36'
}
for i in range(1,4):
    url_new = format(url%i)
    page_text = requests.get(url_new,headers=headers).text
    ex = '<div class="thumb">.*?<img src="(.*?)" alt.*?</div>'
    img_data = re.findall(ex,page_text,re.S)
    for f in img_data:
        f = 'https:' + f
        img_name = f.split('/')[-1]
        img_path = dirname + '/' +img_name
        img_data = requests.get(f,headers=headers).content
        with open(img_path,'wb') as fp:
            fp.write(img_data)
        print(img_name,'下载成功!!')

2.bs4
实例化一个BeautifulSoup的对象,需要将被解析的网页源码加载到该对象中;调用该对象的相关方法和属性进行标签定位和数据提取。
将本地存储的一个html网页源码加载到实例化好的对象中:
BeautifulSoup(fp,‘lxml’)
将从互联网上获取的页面源码加载到实例化对象中:
BeautifulSoup(page_text,‘lxml’)

from bs4 import BeautifulSoup
fp = open('./test.html','r',encoding='utf-8')
soup = BeautifulSoup(fp,'lxml')
soup.find('div',class_='song')
soup.find('a',id='feng')
soup.find_all('div')[1]  #返回一个列表
#class
soup.select('.song')
#id
soup.select('#feng')
#层级选择器
soup.select('.tang > ul > li')
soup.select('.tang li')
#取文本
a_tag = soup.select('#feng')[0]
a_tag.text  #返回一个字符串
a_tag.string
#取属性
a_tag = soup.select('#feng')[0]
a_tag['href']

爬取某小说网站某部小说:

from bs4 import BeautifulSoup
import requests
#输入保存的书名
d = input('输入书名需要后缀')
fp = open(d,'w',encoding='utf-8')
headers = {
    
    
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36'
}
#输入书名地址
main_url = input('请输入书名主页url:')
# main_url = 'http://www.diyibanzhu99.com/0/329/'
page_text = requests.get(main_url,headers=headers).text
soup = BeautifulSoup(page_text,'lxml')
c = input('请输入章节数量:')
for i in range(3,int(c)+3):
    tag = soup.select('.bd > .list > li > a')[i]
    name = tag.string
    for f in range(1,21):
        a = tag['href']
        #切割后缀
        b = a.split('.')[0]
        new_url = 'http://www.diyibanzhu99.com' + b +'_%d.html'
        #内容url
        next_url = format(new_url%f)
#         next_url = "http://www.diyibanzhu99.com" + tag['href']
        page_text_2 = requests.get(next_url,headers=headers).text
        soup_2 = BeautifulSoup(page_text_2,'lxml')
#     tag_2 = soup_2.select('#ChapterView > .bd > div > p')[0]
        content = soup_2.find('div',class_='page-content font-large').text
        fp.write(name+ ':' +content +'\n')
fp.close()

三.xpath
实现一个etree的对象,然后将解析的对象的页面源码加载到对象中
etree对象的实例化:
本地页面源码:
etree.parse('test.html)
远程的页面源码:
etree.HTML(page_text)

from lxml import etree
test = etree.parse('./test.html')
test.xpath('/html/body/div')
test.xpath('//ul')
test.xpath('/html/body/div/p')
test.xpath('/html/body//p') 
#属性定位
test.xpath('/html/body/div[@class="song"]')
#索引定位,从1开始
test.xpath('//li[7]')
#取直系文本 /text() 
test.xpath('//a[@id="feng"]/text()')[0]
#取全部文本 //text()
test.xpath('//div[@class="song"]//text()')
#取属性
test.xpath('//a[@id="feng"]/@href')
#某网页作者名称和内容
from lxml import etree
import requests
fp = open('./qiutu_author.txt','w',encoding='utf-8')
url = 'https://www.qiushibaike.com/imgrank/'
headers = {
    
    
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36'
}

page_text = requests.get(url,headers=headers).text
text = etree.HTML(page_text)

div_list = text.xpath('//div[@class="content-block clearfix"]/div[2]/div')

for i in div_list:
    author = i.xpath('./div[1]/a[2]/h2/text()')[0]
    content = i.xpath('./a[1]/div[1]//text()')
    content = ''.join(content)
    fp.write(author + content +'\n')
fp.close()
#爬取网页图片
from lxml import etree
import requests
import os

dirname = './meinv'
if not os.path.exists(dirname):
    os.mkdir(dirname)
headers = {
    
    
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36'
}
url = 'https://pic.netbian.com/4kmeinv/index_%d.html'
for page in range(1,11):
    if page == 1:
        new_url = 'https://pic.netbian.com/4kmeinv/index.html'
    else:
        new_url = format(url%page)
    page_text = requests.get(new_url,headers=headers).text
#     a = requests.get(new_url,headers=headers)
#     a.encoding = 'gb2312'
#     page_text = a.text
    tree = etree.HTML(page_text)
    tag_list = tree.xpath('//ul[@class="clearfix"]/li/a')
    for tag in tag_list:
        img_src = 'https://pic.netbian.com' + tag.xpath('./img/@src')[0]
        img_name =tag.xpath('./b/text()')[0]
        img_name = img_name.encode('iso-8859-1').decode('gbk')
        img_data = requests.get(img_src,headers=headers).content
        img_path = dirname + '/' +img_name+'.jpg'
        with open(img_path,'wb') as fp:
            fp.write(img_data)
            print(img_name,'下载成功')

猜你喜欢

转载自blog.csdn.net/qq_37369726/article/details/114978669
今日推荐