Python爬虫练习之股票数据

Python版本3.7

import requests
import re
from bs4 import BeautifulSoup
import traceback

def getHTMLText(url):
	try:
		r = requests.get(url, timeout = 30)
		r.raise_for_status()
		r.encoding = r.apparent_encoding
		return r.text
	except:
		return ""

def getStockList(lst, stockURL):
	html = getHTMLText(stockURL)
	soup = BeautifulSoup(html, 'html.parser')
	a = soup.find_all('a')
	for i in a:
		try:
			href = i.attrs['href']
			lst.append(re.findall(r'[s][hz]\d{6}', href)[0])
		except:
			continue

def getStockInfo(lst, stockURL, fpath):
	for stock in lst:
		url = stockURL + stock + '.html'
		html = getHTMLText(url)
		try:
			if html == "":
				continue
			infoDict = {}
			soup = BeautifulSoup(html, 'html.parser')
			stockInfo = soup.find('div', attrs = {'class':'stock-bets'})
			name = stockInfo.find_all(attrs = {'class':'bets-name'})[0]
			infoDict.update({'股票名称':name.text.split()[0]})
			keyList = stockInfo.find_all('dt')
			valueList = stockInfo.find_all('dd')
			for i in range(len(keyList)):
				key = keyList[i].text
				val = valueList[i].text
				infoDict[key] = val

			with open(fpath, 'a', encoding = 'utf-8') as f:
				f.write(str(infoDict) + '\n')

		except:
			traceback.print_exc()
			continue

def main():
	stock_list_url = 'http://quote.eastmoney.com/stocklist.html'
	stock_info_url = 'https://gupiao.baidu.com/stock/'
	output_file = 'D://BaiduStockInfo.txt'
	slist = []
	getStockList(slist, stock_list_url)
	getStockInfo(slist, stock_info_url, output_file)

在此基础上可以进行两点优化:1.直接设定编码方式  2.增加进度条的显示

为此将代码修改如下:

def getHTMLText(url, code = 'utf-8'):
	try:
		r = requests.get(url, timeout = 30)
		r.raise_for_status()
		r.encoding = code
		return r.text
	except:
		return ""

def getStockList(lst, stockURL):
	html = getHTMLText(stockURL, 'GB2312')
	soup = BeautifulSoup(html, 'html.parser')
	a = soup.find_all('a')
	for i in a:
		try:
			href = i.attrs['href']
			lst.append(re.findall(r'[s][hz]\d{6}', href)[0])
		except:
			continue

def getStockInfo(lst, stockURL, fpath):
	count = 0
	for stock in lst:
		url = stockURL + stock + '.html'
		html = getHTMLText(url)
		try:
			if html == "":
				continue
			infoDict = {}
			soup = BeautifulSoup(html, 'html.parser')
			stockInfo = soup.find('div', attrs = {'class':'stock-bets'})
			name = stockInfo.find_all(attrs = {'class':'bets-name'})[0]
			infoDict.update({'股票名称':name.text.split()[0]})
			keyList = stockInfo.find_all('dt')
			valueList = stockInfo.find_all('dd')
			for i in range(len(keyList)):
				key = keyList[i].text
				val = valueList[i].text
				infoDict[key] = val

			with open(fpath, 'a', encoding = 'utf-8') as f:
				f.write(str(infoDict) + '\n')
			count = count + 1
			print('\r当前进度:{:.2f}%'.format(count*100/len(lst)), end = '')
		except:
			traceback.print_exc()
			count = count + 1
			print('\r当前进度:{:.2f}%'.format(count*100/len(lst)), end = '')
			continue

猜你喜欢

转载自blog.csdn.net/LightInDarkness/article/details/81153088