python爬虫,爬取禅道BUG信息列表

python爬虫,爬取禅道BUG信息列表

import requests
from bs4 import BeautifulSoup
import requests
url = "http://192.168.1.55:81/zentao/bug-browse-4--byModule-28.html"
payload = {}
headers = {
  'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
  'Accept-Encoding': 'zip, deflate',
  'Accept-Language': 'zh-CN,zh;q=0.9',
  'Cache-Control': 'max-age=0',
  'Connection': 'keep-alive',
  'Cookie': '=zh-cn; device=desktop; theme=default; preBranch=0; keepLogin=on; za=huangpeng; lastProject=12; lastBuild=25; zp=7426e0294ba92658a77807f8286d9c5bc3db31d3; qaBugOrder=id_desc; pagerBugBrowse=500; ajax_dragSelected=on; preProductID=13; lastProduct=13; bugModule=36; windowHeight=937; windowWidth=1013; zentaosid=jv6ih00rfe2mtoh0cro3mc15p6',
  'Host': '192.168.1.55:81',
  'Referer': 'http://192.168.1.55:81/zentao/bug-browse-13--byModule-36.html',
  'Upgrade-Insecure-Requests': '',
  'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36'
}
response = requests.request("GET", url, headers=headers, data = payload)
soup = BeautifulSoup(response.text,'html.parser') #开始解析
links_th=soup.find_all('tr')#获取所有的tr标签形成列表
listdic=[]
for i in links_th[1:]:
    text = (i.find_all("td"))#获取tr标签下所有td标签形成列表
    dic = {
        "id": int(text[0].find("a").get_text()),#解析获取text信息
        'pri': text[1].get_text(),
        'title': text[2].get_text(),
        "severity": text[3].get_text(),
        'type': text[4].get_text(),
        'status': text[5].get_text(),
        'activatedDate': text[6].get_text(),
        'openedBy': text[7].get_text(),
        'openedDate': text[8].get_text(),
        'primary': text[9].get_text(),
        'lastEditedDate': text[11].get_text()
    }
    listdic.append(dic)
    print(dic)
    print(len(listdic))
#input("..........................")



python解析Html

  1. 只提取a标签,所有超链接
import os
from bs4 import BeautifulSoup
soup = BeautifulSoup(open('1500.html',mode='r'),'html.parser') #开始解析
#输出全部的a 标签,结果是列表形式,并遍历出节点的名称,属性,文字
links=soup.find_all('a')
list_url=[]
for i in links:
    if "/2020/01/17" not in i['href']:
        print(i.name, i['href'], i.get_text())
        list_url.append(i['href'])
print(len(list_url))

  1. 提取ID、值、时间、链接,形成字典
import os
from bs4 import BeautifulSoup

soup = BeautifulSoup(open('1500.html',mode='r'),'html.parser') #开始解析

links_th=soup.find_all('tr')
listdic=[]

for i in links_th[2:]:

    if '2020/01/15' in i.find("a")['href'] or '2020/01/16' in i.find("a")['href']:
        list_td = i.find_all('td')
        dic={
        "ID" : int(list_td[0].get_text(). replace('\n','').replace('\t','')),
        "url":i.find("a")['href'],
        "val":list_td[2].get_text(). replace('\n','').replace('\t',''),
        "time":list_td[3].get_text(). replace('\n','').replace('\t',''),
        }


        print(dic)
        listdic.append(dic)
print(len(listdic))


发布了58 篇原创文章 · 获赞 18 · 访问量 2万+

猜你喜欢

转载自blog.csdn.net/qq_42846555/article/details/104040908