8.13 爬虫笔记1

爬虫的过程
1.首先需要了解业务需求
2.根据需求,寻找网站
3.将网站数据获取到本地 (可以通过urllib,requests等包)
4.定位数据(re xpath css json等)
5.存储数据(mysql redis 文件格式)

一:简单的爬虫结构 百度html页面
(1、爬取内容)
from urllib import request
url = ‘http://www.baidu.com
response = request.urlopen(url)
info = response.read()
print(info)

(2、爬取内容并写入同文件目录下一个新的html文件)
from urllib import request
#确定目标
base_url=’http://www.baidu.com
#发起http请求,返回类文件对象
response=request.urlopen(url=base_url)
html=response.read()
html=html.decode(‘utf-8’)
#写入一个同目录下一个新的html文件
with open(‘baidu.html’,’w’,encoding=’utf-8’) as f:
f.write(html)

当用上面的代码去爬取某些网页时会获取不到数据,此时就需要加入 headers 头
二、西刺代理html页面
from urllib import request

url = ‘http://www.xicidaili.com/

user_agent = ‘Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Mobile Safari/537.36’
headers={
‘User-Agent’:user_agent
}

req = request.Request(url,headers=headers)
response = request.urlopen(req)
info = response.read() #切记response仅仅返回一次

with open(‘xicidaili.html’,’wb’) as f:
f.write(info)

三、代码封装 百度翻译
from urllib import request, parse
from urllib.error import HTTPError, URLError

#a. get(url, headers=None)

def get(url, headers=None):
return urlrequests(url, headers=headers)

def post(url, form, headers=None):
return urlrequests(url, form, headers=headers)

#b. post(url, form, headers=None)

  1. 传入url
  2. user_agent
  3. headers
  4. 定义Request
  5. urlopen
  6. 返回byte数组

def urlrequests(url, form=None, headers=None):
user_agent = ‘Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36’
# 如果用户需要自行传入headers, 则覆盖之前的headers
if headers == None:
headers = {
‘User-Agent’: user_agent
}
html_bytes = b”
try:
if form:
# POST
# 2.1 转换成str
form_str = parse.urlencode(form)
#print(form_str)
# 2.2 转换成bytes
form_bytes = form_str.encode(‘utf-8’)
req = request.Request(url, data=form_bytes, headers=headers)
else:
# GET
req = request.Request(url, headers=headers)
response = request.urlopen(req)
html_bytes = response.read()
except HTTPError as e:
print(e)
except URLError as e:
print(e)

return html_bytes

if name == ‘main‘:
# url = ‘http://fanyi.baidu.com/sug
# form = {
# ‘kw’: ‘呵呵’
# }
# html_bytes = post(url, form=form)
# print(html_bytes)

url = 'http://www.baidu.com'
html_byte = get(url)
print(html_byte)

猜你喜欢

转载自blog.csdn.net/Lujuntong/article/details/81638122