The crawling data into the data block and deriving

import requests
from lxml import etree
from pandas import DataFrame

url='https://search.51job.com/list/120800,000000,0000,32,9,99,%25E4%25BA%25A7%25E5%2593%2581%25E7%25BB%258F%25E7%2590%2586,2,1.html'
res=requests.get(url)
res.encoding='gbk'
print(res)
#用etree生成xpath解析对象
root=etree.HTML(res.text)
print(root)
#利用xpath提取信息
position=root.xpath('//p[@class="t1 "]/span/a/@title')
extract=root.xpath('//p[@class="t1 "]/span/a/text()')
extract=[extract[i].strip() for i in range(len(extract))]
company=root.xpath('//span[@class="t2"]/a/@title')
place=root.xpath('//div[@class="el"]/span[@class="t3"]/text()')
salary=root.xpath('//div[@class="el"]/span[@class="t4"]/text()')
jobinfo=DataFrame([position,company,place,salary]).T
jobinfo.columns = [ 'post', 'company', 'place', 'salary']
jobinfo.to_csv ( '51jbob.csv' encoding = 'GBK')

Guess you like

Origin www.cnblogs.com/tiankong-blue/p/11566034.html