python爬虫-bs4模块

  • 获取页面: urllib, requests
  • 解析页面信息: 正则表达式, BeautifulSoup4(BS4)

简介

Beautiful Soup提供一些简单的、python式的函数用来处理导航、搜索、修改分析树等功能。它是一个
工具箱,通过解析文档为tiful Soup自动将输入文档转换为Unicode编码,输出文档转换为utf-8编码。
你不需要考虑编码方式,除非文档没有指定一个编一下原始编码方式就可以了。

import re
from bs4 import BeautifulSoup

html = """
<html>
<head><title>story12345</title></head>
<body>
<p class="title" name="dromouse"><b>The Dormouse's story</b></p>
<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1"><span>westos</span><!-- Elsie --></a>,
<a href="http://example.com/lacie" class="sister1" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>
<p class="story">...</p>
"""

soup = BeautifulSoup(html, 'html.parser')
# print(soup.prettify())

# 1. 根据标签获取内容;

# ******************标签的常用属性************************
# # 根据格式化, 如果title只有一个, 根据标签可以获取
print(soup.title)
print(type(soup.title))    #输出:<class 'bs4.element.Tag'> 
print(soup.title.name)     # 标签的名称,输出:title

# 获取标签里面的属性信息
print(soup.a.attrs)        #输出第一个标签的属性信息
print(soup.a.attrs['href'])


# *******************标签常用的方法*************************
# 1).get方法用于得到标签下的属性值,注意这是一个重要的方法,在许多场合都能用到,比如你要得到<img src=”#”>标签下的图像url,那么就可以用soup.img.get(‘src’)
print(soup.a.get('href'))
print(soup.a.get('class'))


# 2).string得到标签下的文本内容,只有在此标签下没有子标签,或者只有一个子标签的情况下才能返回其中的内容,否则返回的是None;
# 3).get_text()可以获得一个标签中的所有文本内容,包括子孙节点的内容,这是最常用的方法
print(soup.a.string)    # 标签里面的内容
print(soup.a.get_text())


# *******************对获取的属性信息进行修改***********************
print(soup.a.get('href'))
soup.a['href'] = 'http://www.baidu.com'
print(soup.a.get('href'))
print(soup.a)

# 2. 面向对象的匹配
# 查找符合条件的所有标签;
aTagObj =  soup.find_all('a')
print(aTagObj)
for item in aTagObj:
    print(item)


#  需求: 获取所有的a标签, 并且类名为"sister"
aTagObj = soup.find_all('a', class_="sister")
print(aTagObj)

# 3. 根据内容进行匹配
print(soup.find_all(text="story"))
print(soup.find_all(text=re.compile('story\d+')))

整理补充

import re
from bs4 import BeautifulSoup

html = """
<html>
<head><title class='title'>story12345</title></head>
<body>
<p class="title" name="dromouse">The Dormouse's story</p>
<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1"><span>westos</span><!-- Elsie --></a>,
<a href="http://example.com/lacie" class="sister1" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>
<p class="story">...</p>

<input type="text">
<input type="password">
"""

#
soup = BeautifulSoup(html, 'html.parser')
# 需要安装第三方模块lxml;
# soup = BeautifulSoup(html, 'lxml')

# 1. 返回符合条件的第一个标签内容
print(soup.title)
print(soup.p)
print(soup.find('p', class_=re.compile(r'^ti.*?')))

# 2. 返回符合条件的所有标签内容
print(soup.find_all('p'))
print(soup.find_all('p', class_='title', text=re.compile(r'.*?story.*?')))

# 3. 获取符合条件的p标签或者a标签
print(soup.find(['title', 'a']))
print(soup.find_all(['title', 'a']))
print(soup.find_all(['title', 'a'], class_=['title', 'sister']))


# 4. CSS匹配
# 标签选择器
print(soup.select("title"))
# 类选择器(.类名)
print(soup.select(".sister"))
# id选择器(#id名称)
print(soup.select("#link1"))
# 此处不支持正则表达式;
# print(soup.select(re.compile("#link\d+")))
# 属性选择器()
print(soup.select("input[type='password']"))

官方中文文档: https://beautifulsoup.readthedocs.io/zh_CN/v4.4.0/

下表列出了主要的解析器,以及它们的优缺点:

解析器:使用方法:优势:劣势

  • Python标准库
    使用方法:BeautifulSoup(markup, “html.parser”)
    Python的内置标准库
    执行速度适中
    文档容错能力强
    Python 2.7.3 or 3.2.2)前 的版本中文档容错能力差

  • lxml
    HTML 解析器 BeautifulSoup(markup, “lxml”)
    速度快
    文档容错能力强
    需要安装C语言库

  • lxml
    XML 解析器
    使用代码: BeautifulSoup(markup, [“lxml-xml”])
    BeautifulSoup(markup, “xml”)
    速度快
    唯一支持XML的解析器
    需要安装C语言库

  • html5lib
    使用代码: BeautifulSoup(markup, “html5lib”)
    最好的容错性
    以浏览器的方式解析文档
    生成HTML5格式的文档
    速度慢
    不依赖外部扩展

爬取个人博客—去掉导航和广告

import requests
from bs4 import BeautifulSoup

def get_content(url,):
    try:
        user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.109 Safari/537.36"
        response = requests.get(url,  headers={'User-Agent': user_agent})
        response.raise_for_status()   # 如果返回的状态码不是200, 则抛出异常;
        response.encoding = response.apparent_encoding  # 判断网页的编码格式, 便于respons.text知道如何解码;
    except Exception as e:
        print("爬取错误")
    else:

        print(response.url)
        print("爬取成功!")
        return  response.content

def parser_content(htmlContent):
    # 实例化soup对象, 便于处理;
    soup = BeautifulSoup(htmlContent, 'html.parser')
    # 提取页面的头部信息, 解决乱码问题
    headObj = soup.head

    # 提取需要的内容;
    divObj = soup.find_all('div', class_="blog-content-box")[0]

    #scriptObj = soup.script
    with open('doc/csdn.html', 'w') as f:
        # 写入头部信息(指定编码格式和标题)
        f.write(str(headObj))
        # 写入博客正文;
        f.write(str(divObj))
        print("下载成功......")

        # f.write(str(scriptObj))
if __name__ == '__main__':
    url = "https://blog.csdn.net/King15229085063/article/details/87380182"
    content = get_content(url)
    parser_content(content)

个人博客整理

import requests
from bs4 import BeautifulSoup
import re


def get_content(url,):
    try:
        user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.109 Safari/537.36"
        response = requests.get(url,  headers={'User-Agent': user_agent})
        response.raise_for_status()   # 如果返回的状态码不是200, 则抛出异常;
        response.encoding = response.apparent_encoding  # 判断网页的编码格式, 便于respons.text知道如何解码;
    except Exception as e:
        print("爬取错误")
    else:

        print(response.url)
        print("爬取成功!")
        return  response.content


def parser_content(htmlContent):
  
    # 实例化soup对象, 便于处理;
    soup = BeautifulSoup(htmlContent, 'html.parser')
  
    # 1). 获取每个博客的大盒子: 特征: div标签, class名称一致article-item-box csdn-tracking-statistics
    #  <div class="article-item-box csdn-tracking-statistics" data-articleid="85718923">
    divObjs = soup.find_all('div', class_="article-item-box")
  
    # 2). 依次遍历每一个div标签, 获取博客标题
    #  博客标题的特征: h4里面的a标签里面的内容
    # 去掉默认的广告, 留下个人的博客内容;
	for  divObj in divObjs[1:]:
        # **2-1. 获取博客标题: 去掉原创或者转载的信息, 只需要博客名称;
        title = divObj.h4.a.get_text().split()[1]
        # **2-2. 获取博客链接, 也就是获取a链接中href对应的值;
        blogUrl = divObj.h4.a.get('href')
        global  bloginfo
        # 将爬取的所有内容保存到变量中[(blogtitle, blogurl)]
        bloginfo.append((title, blogUrl))


if __name__ == '__main__':
    blogPage = 3
    # 全局变量, 用于保存所有博客信息;
    bloginfo = []
    for page in range(1, blogPage+1):
        url = "https://blog.csdn.net/King15229085063/article/list/%s" %(page)
        content = get_content(url)
        parser_content(content)
        print("第%d页整理结束...." %(page))


    with open('doc/myblog.md', 'a') as f:
        for index, info in enumerate(bloginfo[::-1]):
            f.write('- 第%d篇博客: [%s](%s)\n' %(index+1, info[0], info[1]))
    print("完成.....")

西刺代理ip爬取

import requests
from bs4 import BeautifulSoup

def get_content(url):
    try:
        user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.109 Safari/537.36"
        response = requests.get(url,  headers={'User-Agent': user_agent})
        response.raise_for_status()   # 如果返回的状态码不是200, 则抛出异常;
        response.encoding = response.apparent_encoding  # 判断网页的编码格式, 便于respons.text知道如何解码;
    except Exception as e:
        print("%s爬取错误" %(url))
    else:
        print(response.url)
        print("%s爬取成功!" %(url))
        return  response.content


def crawl_ips(page):
    headers = {"user-agent": "Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:15.0) Gecko/20100101 Firefox/15.0.1"}
    for i in range(1, page+1):
        response = requests.get("http://www.xicidaili.com/nn/{0}".format(i), headers=headers)
        # 实例化soup对象, 便于处理;
        soup = BeautifulSoup(response.content, 'html.parser')

爬取豆瓣top250电影信息

import re

import requests
from bs4 import BeautifulSoup

def get_content(url,):
    try:
        user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.109 Safari/537.36"
        response = requests.get(url,  headers={'User-Agent': user_agent})
        response.raise_for_status()   # 如果返回的状态码不是200, 则抛出异常;
        response.encoding = response.apparent_encoding  # 判断网页的编码格式, 便于respons.text知道如何解码;
    except Exception as e:
        print("爬取错误")
    else:

        print(response.url)
        print("爬取成功!")
        return  response.content

def parser_content(htmlContent):
    # 实例化soup对象, 便于处理;
    soup = BeautifulSoup(htmlContent, 'html.parser')
    #  1). 电影信息存储在ol标签里面的li标签:
    #  <ol class="grid_view">
    olObj = soup.find_all('ol', class_='grid_view')[0]

    #  2). 获取每个电影的详细信息, 存储在li标签;
    details = olObj.find_all('li')


    for detail in details:
        # #  3). 获取电影名称;
        movieName = detail.find('span', class_='title').get_text()

        # 4). 电影评分:
        movieScore = detail.find('span', class_='rating_num').get_text()

        # 5). 评价人数***************
        # 必须要转换类型为字符串
        movieCommentNum = str(detail.find(text=re.compile('\d+人评价')))


        # 6). 电影短评
        movieCommentObj = detail.find('span', class_='inq')
        if movieCommentObj:
            movieComment = movieCommentObj.get_text()
        else:
            movieComment = "无短评"

        movieInfo.append((movieName, movieScore, movieCommentNum, movieComment))




import openpyxl


def create_to_excel(wbname, data, sheetname='Sheet1', ):
    """
    将制定的信息保存到新建的excel表格中;

    :param wbname:
    :param data: 往excel中存储的数据;
    :param sheetname:
    :return:
    """

    print("正在创建excel表格%s......" % (wbname))

    # wb = openpyxl.load_workbook(wbname)
    #  如果文件不存在, 自己实例化一个WorkBook的对象;
    wb = openpyxl.Workbook()
    # 获取当前活动工作表的对象
    sheet = wb.active
    # 修改工作表的名称
    sheet.title = sheetname
    # 将数据data写入excel表格中;
    print("正在写入数据........")
    for row, item in enumerate(data):  # data发现有4行数据, item里面有三列数据;
        print(item)
        for column, cellValue in enumerate(item):

            # cell = sheet.cell(row=row + 1, column=column + 1, value=cellValue)
            cell = sheet.cell(row=row+1, column=column + 1)
            cell.value = cellValue

    wb.save(wbname)
    print("保存工作薄%s成功......." % (wbname))


if __name__ == '__main__':
    doubanTopPage = 2
    perPage = 25
    # [(), (), ()]
    movieInfo = []
    # 1, 2, 3 ,4, 5
    for page in range(1, doubanTopPage+1):
        # start的值= (当前页-1)*每页显示的数量(25)
        url = "https://movie.douban.com/top250?start=%s" %((page-1)*perPage)
        content = get_content(url)
        parser_content(content)


    create_to_excel('/tmp/hello.xlsx', movieInfo, sheetname="豆瓣电影信息")

猜你喜欢

转载自blog.csdn.net/qq_43273590/article/details/87601006