爬虫实战:爬取前程无忧(51job)python相关职位信息

import requests
from bs4 import BeautifulSoup
import re
import time

requests.adapters.DEFAULT_RETRIES = 3
strat_url = 'https://search.51job.com/list/030000%252C00,000000,0000,00,9,99,python,2,1.html'
ua = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.109 Safari/537.36'}
resp = requests.get(strat_url,headers = ua)
resp.encoding = 'gb2312'
pages = int(re.findall(r'共(\d+)页',resp.text)[0]) #匹配总页数,确定循环次数
info_list = [] 

def get_info(url):
    resp = requests.get(url,headers = ua)
    resp.encoding = 'gb2312'
    soup = BeautifulSoup(resp.text,'lxml')
    resultList = soup.find(id="resultList")
    infos = resultList.find_all(class_='el')
    for info in infos:
        t1 = info.find(class_='t1').text.strip()
        t2 = info.find(class_='t2').text.strip()
        t3 = info.find(class_='t3').text.strip()
        t4 = info.find(class_='t4').text.strip()
        t5 = info.find(class_='t5').text.strip()
        format_str = '{0}\t{1}\t{2}\t{3}\t{4}'.format(t1,t2,t3,t4,t5)
#        print(format_str)
        info_list.append(format_str)

if __name__=='__main__':
    for i in range(1,pages+1):
        url = 'https://search.51job.com/list/030000%252C00,000000,0000,00,9,99,python,2,'+str(i)+'.html'
        print('\r当前进度:{0}/{1}'.format(i,pages),end = '')
        get_info(url)
        time.sleep(0.2)
    with open('F:/前程无忧广东省python.txt','w',encoding='utf8') as f:
        f.write('\n'.join(info_list))
    print('爬取结束!')
    
        



猜你喜欢

转载自blog.csdn.net/qq_36936510/article/details/88842111