This series of notes comes from
the Python series courses of Chinese University MOOC-Beijing Institute of Technology-Songtian teacher
Reprinted from: http://www.jianshu.com/p/98d0139dacac
7. Getting started with the Re (regular expression) library
regular expression = regex = RE
is a general string expression framework, which is used to succinctly express the expression of a set of strings, and can also be used to determine the feature attribution of a string
- Regular Expression Syntax
Basic usage of Re library
- The representation type of the regular expression is the raw string type (the native string type) , which is represented as r'text'
The main functions of the Re library
re.search(pattern,string,flags=0)
re.match(pattern, string, flags=0)
Because match starts from the starting position, add if to judge whether the returned result is empty, otherwise an error will be reported
re.findall(pattern,string,flags=0)
re.split(pattern, string, maxsplit=0, flags=0)
maxsplit is the maximum number of divisions, and the rest is output as the last element
re.finditer(pattern,string,flags=0)
re.sub(pattern,repl,string,count=0,flags=0)
repl is the string used to replace, count is the number of replacements
Another equivalent usage
of the Re library The functional usage of the Re library is a one-time operation, and the other is an object-oriented usage . The regex object generated by compile can be called a regular expression after multiple operations after compilation.regex = re.compile(pattern,flags=0)
The match object of the Re library
- Greedy matching and minimum matching of
Re library Re library adopts greedy matching by default, that is, the output matches the longest substring
8. Example 2: Taobao commodity price comparison directional crawler (requests-re)
Step 1: Submit a product search request and obtain pages in a loop
Step 2: For each page, extract the product name and price information
Step 3: Display the information output
import requests
import re
def getHTMLText(url):
try:
r = requests.get(url, timeout=30)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
return ""
def parsePage(ilt, html):
try:
plt = re.findall(r'\"view_price\"\:\"[\d\.]*\"',html)
tlt = re.findall(r'\"raw_title\"\:\".*?\"',html)
for i in range(len(plt)):
price = eval(plt[i].split(':')[1])
title = eval(tlt[i].split(':')[1])
ilt.append([price , title])
except:
print("")
def printGoodsList(ilt):
tplt = "{:4}\t{:8}\t{:16}"
print(tplt.format("序号", "价格", "商品名称"))
count = 0
for g in ilt:
count = count + 1
print(tplt.format(count, g[0], g[1]))
def main():
goods = '书包'
depth = 3
start_url = 'https://s.taobao.com/search?q=' + goods
infoList = []
for i in range(depth):
try:
url = start_url + '&s=' + str(44*i)
html = getHTMLText(url)
parsePage(infoList, html)
except:
continue
printGoodsList(infoList)
main()
9. Example 3: stock data directional crawler (requests-bs
4-re)
Step 1: Get the stock list from Dongfang Fortune.com
Step 2: Get individual stock information from Baidu stock one by one according to the stock list
Step 3: Store the result to a file
#CrawBaiduStocksB.py
import requests
from bs4 import BeautifulSoup
import traceback
import re
def getHTMLText(url, code="utf-8"):
try:
r = requests.get(url)
r.raise_for_status()
r.encoding = code
return r.text
except:
return ""
def getStockList(lst, stockURL):
html = getHTMLText(stockURL, "GB2312")
soup = BeautifulSoup(html, 'html.parser')
a = soup.find_all('a')
for i in a:
try:
href = i.attrs['href']
lst.append(re.findall(r"[s][hz]\d{6}", href)[0])
except:
continue
def getStockInfo(lst, stockURL, fpath):
count = 0
for stock in lst:
url = stockURL + stock + ".html"
html = getHTMLText(url)
try:
if html=="":
continue
infoDict = {}
soup = BeautifulSoup(html, 'html.parser')
stockInfo = soup.find('div',attrs={'class':'stock-bets'})
name = stockInfo.find_all(attrs={'class':'bets-name'})[0]
infoDict.update({'股票名称': name.text.split()[0]})
keyList = stockInfo.find_all('dt')
valueList = stockInfo.find_all('dd')
for i in range(len(keyList)):
key = keyList[i].text
val = valueList[i].text
infoDict[key] = val
with open(fpath, 'a', encoding='utf-8') as f:
f.write( str(infoDict) + '\n' )
count = count + 1
print("\r当前进度: {:.2f}%".format(count*100/len(lst)),end="")
except:
count = count + 1
print("\r当前进度: {:.2f}%".format(count*100/len(lst)),end="")
continue
def main():
stock_list_url = 'http://quote.eastmoney.com/stocklist.html'
stock_info_url = 'https://gupiao.baidu.com/stock/'
output_file = 'D:/BaiduStockInfo.txt'
slist=[]
getStockList(slist, stock_list_url)
getStockInfo(slist, stock_info_url, output_file)
main()
This series of notes comes from
the Python series courses of Chinese University MOOC-Beijing Institute of Technology-Songtian teacher
Reprinted from: http://www.jianshu.com/p/98d0139dacac
7. Getting started with the Re (regular expression) library
regular expression = regex = RE
is a general string expression framework, which is used to succinctly express the expression of a set of strings, and can also be used to determine the feature attribution of a string
- Regular Expression Syntax
Basic usage of Re library
- The representation type of the regular expression is the raw string type (the native string type) , which is represented as r'text'
The main functions of the Re library
re.search(pattern,string,flags=0)
re.match(pattern, string, flags=0)
Because match starts from the starting position, add if to judge whether the returned result is empty, otherwise an error will be reported
re.findall(pattern,string,flags=0)
re.split(pattern, string, maxsplit=0, flags=0)
maxsplit is the maximum number of divisions, and the rest is output as the last element
re.finditer(pattern,string,flags=0)
re.sub(pattern,repl,string,count=0,flags=0)
repl is the string used to replace, count is the number of replacements
Another equivalent usage
of the Re library The functional usage of the Re library is a one-time operation, and the other is an object-oriented usage . The regex object generated by compile can be called a regular expression after multiple operations after compilation.regex = re.compile(pattern,flags=0)
The match object of the Re library
- Greedy matching and minimum matching of
Re library Re library adopts greedy matching by default, that is, the output matches the longest substring
8. Example 2: Taobao commodity price comparison directional crawler (requests-re)
Step 1: Submit a product search request and obtain pages in a loop
Step 2: For each page, extract the product name and price information
Step 3: Display the information output
import requests
import re
def getHTMLText(url):
try:
r = requests.get(url, timeout=30)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
return ""
def parsePage(ilt, html):
try:
plt = re.findall(r'\"view_price\"\:\"[\d\.]*\"',html)
tlt = re.findall(r'\"raw_title\"\:\".*?\"',html)
for i in range(len(plt)):
price = eval(plt[i].split(':')[1])
title = eval(tlt[i].split(':')[1])
ilt.append([price , title])
except:
print("")
def printGoodsList(ilt):
tplt = "{:4}\t{:8}\t{:16}"
print(tplt.format("序号", "价格", "商品名称"))
count = 0
for g in ilt:
count = count + 1
print(tplt.format(count, g[0], g[1]))
def main():
goods = '书包'
depth = 3
start_url = 'https://s.taobao.com/search?q=' + goods
infoList = []
for i in range(depth):
try:
url = start_url + '&s=' + str(44*i)
html = getHTMLText(url)
parsePage(infoList, html)
except:
continue
printGoodsList(infoList)
main()
9. Example 3: stock data directional crawler (requests-bs
4-re)
Step 1: Get the stock list from Dongfang Fortune.com
Step 2: Get individual stock information from Baidu stock one by one according to the stock list
Step 3: Store the result to a file
#CrawBaiduStocksB.py
import requests
from bs4 import BeautifulSoup
import traceback
import re
def getHTMLText(url, code="utf-8"):
try:
r = requests.get(url)
r.raise_for_status()
r.encoding = code
return r.text
except:
return ""
def getStockList(lst, stockURL):
html = getHTMLText(stockURL, "GB2312")
soup = BeautifulSoup(html, 'html.parser')
a = soup.find_all('a')
for i in a:
try:
href = i.attrs['href']
lst.append(re.findall(r"[s][hz]\d{6}", href)[0])
except:
continue
def getStockInfo(lst, stockURL, fpath):
count = 0
for stock in lst:
url = stockURL + stock + ".html"
html = getHTMLText(url)
try:
if html=="":
continue
infoDict = {}
soup = BeautifulSoup(html, 'html.parser')
stockInfo = soup.find('div',attrs={'class':'stock-bets'})
name = stockInfo.find_all(attrs={'class':'bets-name'})[0]
infoDict.update({'股票名称': name.text.split()[0]})
keyList = stockInfo.find_all('dt')
valueList = stockInfo.find_all('dd')
for i in range(len(keyList)):
key = keyList[i].text
val = valueList[i].text
infoDict[key] = val
with open(fpath, 'a', encoding='utf-8') as f:
f.write( str(infoDict) + '\n' )
count = count + 1
print("\r当前进度: {:.2f}%".format(count*100/len(lst)),end="")
except:
count = count + 1
print("\r当前进度: {:.2f}%".format(count*100/len(lst)),end="")
continue
def main():
stock_list_url = 'http://quote.eastmoney.com/stocklist.html'
stock_info_url = 'https://gupiao.baidu.com/stock/'
output_file = 'D:/BaiduStockInfo.txt'
slist=[]
getStockList(slist, stock_list_url)
getStockInfo(slist, stock_info_url, output_file)
main()