【Python爬虫】urllib库的使用

Python版本:3.6
urllib官方文档

urllib由几个和url相关的模块组成:

  • urllib.request
    for opening and reading URLs
  • urllib.error
    containing the exceptions raised by urllib.request
  • urllib.parse
    for parsing URLs
  • urllib.robotparser
    for parsing robots.txt files

urlopen

urllib.request.urlopen(url, data=None, [timeout, ]*, cafile=None, capath=None, cadefault=False, context=None)

import urllib

# 使用urlopen进行get请求
response = urllib.request.urlopen('http://www.baidu.com')
print(response.read().decode('utf-8'))


# 带请求参数的get请求
import urllib.parse
data = bytes(urllib.parse.urlencode({'word': 'hello'}), encoding='utf8')
response = urllib.request.urlopen('http://httpbin.org/post', data=data)
print(response.read().decode('utf-8'))


# 指定超时时间 如果访问超时会报错
response = urllib.request.urlopen('http://httpbin.org/get', timeout=0.1)
print(response.read())

# 异常处理
import socket
import urllib.error

try:
    response = urllib.request.urlopen('http://httpbin.org/get', timeout=0.1)
except urllib.error.URLError as e:
    if isinstance(e.reason, socket.timeout):
        print('TIME OUT')

响应信息 response

import urllib.request
response = urllib.request.urlopen('https://www.python.org')
print(type(response)) #<class 'http.client.HTTPResponse'>


#状态码和响应头
response = urllib.request.urlopen('https://ww.python.org')
print(response.status)
print(response.getheaders()) #返回一个list
print(response.getheader('Server'))

请求信息 request

除了直接使用urlopen来进行请求,还可以使用Request对象进行请求

import urllib.request

request = urllib.request.Request('https://python.org')
response = urllib.request.urlopen(request)
print(response.read().decode('utf-8'))


#设置请求头参数的post请求
url = 'http://httpbin.org/post'
headers = {
    'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)',
    'Host': 'httpbin.org'
}
dict = {
    'name': 'Germey'
}
data = bytes(urllib.parse.urlencode(dict), encoding='utf8')
req = urllib.request.Request(url=url,data=data,headers=headers,method='POST')
response = urllib.request.urlopen(req)
print(response.read().decode('utf-8'))

Handler

  • 代理
import urllib.request

#设置代理handler
proxy_handler = urllib.request.ProxyHandler({
    'http': 'http://127.0.0.1:9743',
    'https': 'https://127.0.0.1:9743'
})
opener = urllib.request.build_opener(handler)
response = opener.open('http://www.baidu.com')
print(response.read().decode('utf-8'))
  • Cookie

在一些涉及到登陆的爬虫中,可能会用到cookie

import http.cookie,urllib.request
cookie = http.cookiejar.CookieJar()
handler = urllib.request.HTTPCookieProcessor(cookie)
opener = urllib.request.build_opener(handler)
response = opener.open('http://www.baidu.com')
#cookie本质上就是一个存储在客户端的k-v对小文件
for item in cookie:
    print(item.name+"="+item.value)
#指定cookie的保存路径
import http.cookiejar, urllib.request
filename = "cookie.txt"
cookie = http.cookiejar.MozillaCookieJar(filename)
#cookie = http.cookiejar.LWPCookieJar(filename) 这是另一种cookie的保存格式
handler = urllib.request.HTTPCookieProcessor(cookie)
opener = urllib.request.build_opener(handler)
response = opener.open('http://www.baidu.com')
#保存cookie
cookie.save(ignore_discard=True, ignore_expires=True)
import http.cookiejar, urllib.request
cookie = http.cookiejar.LWPCookieJar()
#加载本地cookie
cookie.load('cookie.txt', ignore_discard=True, ignore_expires=True)
handler = urllib.request.HTTPCookieProcessor(cookie)
opener = urllib.request.build_opener(handler)
response = opener.open('http://www.baidu.com')
print(response.read().decode('utf-8'))

异常处理

urllib.error模块

from urllib import request, error

try:
    response = request.urlopen('http://cuiqingcai.com/index.htm')
except error.HTTPError as e:
    print(e.reason, e.code, e.headers, sep='\n')
except error.URLError as e:
    print(e.reason)
else:
    print('Request Successfully')

url解析

urllib.parse模块

from urllib.parse import urlparse

result = urlparse('http://www.baidu.com/index.html;user?id=5#comment')
print(type(result), result)

from urllib.parse import urlunparse

data = ['http', 'www.baidu.com', 'index.html', 'user', 'a=6', 'comment']
print(urlunparse(data)

from urllib.parse import urljoin

print(urljoin('http://www.baidu.com', 'FAQ.html'))
print(urljoin('http://www.baidu.com', 'https://cuiqingcai.com/FAQ.html'))
print(urljoin('http://www.baidu.com/about.html', 'https://cuiqingcai.com/FAQ.html'))
print(urljoin('http://www.baidu.com/about.html', 'https://cuiqingcai.com/FAQ.html?question=2'))
print(urljoin('http://www.baidu.com?wd=abc', 'https://cuiqingcai.com/index.php'))
print(urljoin('http://www.baidu.com', '?category=2#comment'))
print(urljoin('www.baidu.com', '?category=2#comment'))
print(urljoin('www.baidu.com#comment', '?category=2'))


from urllib.parse import urlencode

params = {
    'name': 'germey',
    'age': 22
}
base_url = 'http://www.baidu.com?'
url = base_url + urlencode(params)
print(url)

猜你喜欢

转载自blog.csdn.net/u012557610/article/details/80448240
今日推荐