版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/VABTC/article/details/84784033
import urllib.request
import ssl
import re#导入正则表达式模块
data=urllib.request.urlopen("http://news.sina.com.cn/").read()#将网址信息读取出来,并赋值给data
#headers=("","")#此处可以添加模拟http请求,详见前面博文
#opener...
#opener
data2=data.decode("utf-8","ignore")#编码
pat='href="(http://news.sina.com.cn/.*?)"'
allurl=re.compile(pat).findall(data2)
for i in range(0,len(allurl)):
try:
print("第"+str(i)+"次爬取")
thisurl=allurl[i]
file="E:/practice/sinanews/"+str(i)+".html"
urllib.request.urlretrieve(thisurl,file)#下载网页
print("---成功---")
except urllib.error.URLError as e:
if hasattr(e,"code"):
print(e.code)
if hasattr(e,"reason"):
print(e.reason)
import urllib.request
import re
url="http://blog.csdn.net/"
headers=("User-Agent","Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.53 Safari/537.36")
opener=urllib.request.build_opener()
opener.addheaders=[headers]
urllib.request.install_opener(opener)
data=urllib.request.urlopen(url).read().decode("utf-8","ignore")
pat='href="(https://blog.csdn.net/.*?)"'
result=re.compile(pat).findall(data)
for i in range(0,len(result)):
try:
print("第"+str(i)+"次爬取")
thisurl=result[i]
file="E:/practice/csdntitle/"+str(i)+".html"
urllib.request.urlretrieve(thisurl,file)
print("成功")
except urllib.error.URLError as e:
if hasattr(e,"code"):
print(e.code)
if hasattr(e,"reason"):
print(e.reason)