爬虫入门-2

bs4,re和lxml

一:bs4的功能与使用——成功

from bs4 import BeautifulSoup
import requests

r = requests.get('https://python123.io/ws/demo.html')
demo = r.text

soup = BeautifulSoup(demo, 'html.parser')
print(soup.prettify()) # 有层次感的输出解析后的HTML页面

tag = soup.a
print(tag.attrs)
print(tag.attrs['class'])
print(type(tag.attrs))
print(soup.a.prettify())

newsoup = BeautifulSoup('<a>我明白了bs4的使用','html.parser')
print(newsoup.prettify())
print(soup.contents)

二:使用re爬取淘宝——失败

import requests
import re

def getHTMLText(url):
    try:
        kv = {
    
    
            'cookie: miid=203611311525240540; cna=p4jIFMhUXRgCAdOPVVzSELT4; thw=cn; t=51ba7f4725b4845f6918a391269ddf56; cookie2=11c628615d2c0c1650fcfcd041acdc16; _tb_token_=f50a338eee6e7; hng=CN%7Czh-CN%7CCNY%7C156; _samesite_flag_=true; sgcookie=Ezqi43cYHHBojs4uKPBzl; unb=3412336767; uc3=nk2=F5RBzeTls8w6hyg%3D&lg2=VFC%2FuZ9ayeYq2g%3D%3D&vt3=F8dBxGR1TQo7h3gRbQI%3D&id2=UNQ2ksN395ZfQQ%3D%3D; csg=057ac389; lgc=tb481462048; cookie17=UNQ2ksN395ZfQQ%3D%3D; dnk=tb481462048; skt=c05da993d590c9ef; existShop=MTU4NzY1MjEyNA%3D%3D; uc4=nk4=0%40FY4KqBBaipV5At3qkX8xWKmKndSNGg%3D%3D&id4=0%40UgP9qSTPgyDsoX4fInVcWnXzo6Jx; tracknick=tb481462048; _cc_=UIHiLt3xSw%3D%3D; _l_g_=Ug%3D%3D; sg=87a; _nk_=tb481462048; cookie1=VAXdfn%2BDZxlz9LU374OBNTFHBPhokK%2BA8sZt425gt%2F0%3D; tfstk=cfWdBdYq1P4HULsegTFMFqIz9ijRakmprDT-eughp8MiGlk-FsvCntdpsWtrJdEO.; mt=ci=0_1; v=0; uc1=cookie16=W5iHLLyFPlMGbLDwA%2BdvAGZqLg%3D%3D&cookie14=UoTUPcqZfD%2BAoA%3D%3D&cookie21=W5iHLLyFeYZ1WM9hVnmS&cookie15=UtASsssmOIJ0bQ%3D%3D&existShop=false&pas=0; l=eBTR0uqHQ2UjGEHDBOfwPurza77OSIRAguPzaNbMiT5PO2fp5l2RWZjjZw89C3GVh6WvR3oGfmN0BeYBqIX7bM9nIosM_Ckmn; isg=BIKCcDBxaPzO9HQgWf5FTk2a04jkU4ZtbtV0ocybovWgHyKZtOG0fb5dzxtjT_4F': 'thw=cn; v=0; t=ab66dffdedcb481f77fd563809639584; cookie2=1f14e41c704ef58f8b66ff509d0d122e; _tb_token_=5e6bed8635536; cna=fGOnFZvieDECAXWIVi96eKju; unb=1864721683; sg=%E4%B8%8B3f; _l_g_=Ug%3D%3D; skt=83871ef3b7a49a0f; cookie1=BqeGegkL%2BLUif2jpoUcc6t6Ogy0RFtJuYXR4VHB7W0A%3D; csg=3f233d33; uc3=vt3=F8dBy3%2F50cpZbAursCI%3D&id2=UondEBnuqeCnfA%3D%3D&nk2=u%2F5wdRaOPk21wDx%2F&lg2=VFC%2FuZ9ayeYq2g%3D%3D; existShop=MTU2MjUyMzkyMw%3D%3D; tracknick=%5Cu4E36%5Cu541B%5Cu4E34%5Cu4E3F%5Cu5929%5Cu4E0B; lgc=%5Cu4E36%5Cu541B%5Cu4E34%5Cu4E3F%5Cu5929%5Cu4E0B; _cc_=WqG3DMC9EA%3D%3D; dnk=%5Cu4E36%5Cu541B%5Cu4E34%5Cu4E3F%5Cu5929%5Cu4E0B; _nk_=%5Cu4E36%5Cu541B%5Cu4E34%5Cu4E3F%5Cu5929%5Cu4E0B; cookie17=UondEBnuqeCnfA%3D%3D; tg=0; enc=2GbbFv3joWCJmxVZNFLPuxUUDA7QTpES2D5NF0D6T1EIvSUqKbx15CNrsn7nR9g%2Fz8gPUYbZEI95bhHG8M9pwA%3D%3D; hng=CN%7Czh-CN%7CCNY%7C156; mt=ci=32_1; alitrackid=www.taobao.com; lastalitrackid=www.taobao.com; swfstore=97213; x=e%3D1%26p%3D*%26s%3D0%26c%3D0%26f%3D0%26g%3D0%26t%3D0%26__ll%3D-1%26_ato%3D0; uc1=cookie16=UtASsssmPlP%2Ff1IHDsDaPRu%2BPw%3D%3D&cookie21=UIHiLt3xThH8t7YQouiW&cookie15=URm48syIIVrSKA%3D%3D&existShop=false&pas=0&cookie14=UoTaGqj%2FcX1yKw%3D%3D&tag=8&lng=zh_CN; JSESSIONID=A502D8EDDCE7B58F15F170380A767027; isg=BMnJJFqj8FrUHowu4yKyNXcd2PXjvpa98f4aQWs-RbDvsunEs2bNGLfj8BYE6lWA; l=cBTDZx2mqxnxDRr0BOCanurza77OSIRYYuPzaNbMi_5dd6T114_OkmrjfF96VjWdO2LB4G2npwJ9-etkZ1QoqpJRWkvP.; whl=-1%260%260%261562528831082',
            'user-agent': 'Mozilla/5.0'
        }
        r = requests.get(url, timeout = 30, headers = kv)
        r.raise_for_status()
        r.encoding = r.apparent_encoding
        return r.text
    except:
        return "爬取失败"

def parsePage(glist, html):
    try:
        price_list = re.findall(r'', html)
        name_list = re.findall(r'', html)
        for i in range(len(price_list)):
            price = eval(price_list[i].split(":")[1])
            name = eval(name_list[i].split(":")[1])
            glist.append([price, name])
    except:
        print("解析失败")

def printGoodList(glist):
    tplt = "{0:^4}\t{1:^6}\t{2:^10}"
    print(tplt.format("序号","商品价格","商品名称"))
    count = 0;
    for g in glist:
        count = count + 1
        print(tplt.format(count, g[0], g[1]))

goods_name = "书包"
start_url = "https://s.taobao.com/search?q=" + goods_name
info_list = []
page = 3

count = 0
for i in range(page):
    count += 1
    try:
        url = start_url + "&s=" + str(44 * i)
        html = getHTMLText(url)
        parsePage(info_list, html)
        print("\r爬取页面当前进度: {:.2f}%".format(count * 100 / page), end = "")
    except:
        continue

printGoodList(info_list)

失败原因:页面的cookie获取有误但不清楚填充自己的cookie时哪里出现了错误

三:lxml爬取丁香园论坛——失败

from lxml import etree
import requests

url = "http://www.dxy.cn/bbs/thread/626626#626626"

req = requests.get(url)
html = req.text

tree = etree.HTML(html)
print(tree)

user = tree.xpath('')
content = tree.xpath('')

results = []
for i in range(0, len(user)):
    results.append(user[i].strip() + ":  " + content[i].xpath('string(.)').strip())

for i, result in zip(range(0, len(user)), results):
    print(("user" + str(i + 1) + "-" + result))
    print("*"*100)

失败原因:
在这里插入图片描述
这个xpath方法使用有误么?先记录下哈哈

猜你喜欢

转载自blog.csdn.net/DZZ18803835618/article/details/105720076