python3.x之爬虫学习

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/songqiu65/article/details/69055063

首先需要知道python3.x中urllib.request是用于打开URL的可扩展库。
一。 1.最简单的爬虫就是把整个网页保存到本地分为如下几步
①.访问url
②.读取网页
③.保存网页
实现代码:

#encoding:UTF-8
from urllib.request import urlopen
import os
def main():
    url="http://www.python.org"
    f = urlopen(url)#1
    data = f.read().decode("utf-8")#2
    f.close()
    filename="index.html"
    f=open(filename,'w')#3
    f.write(data)
    f.close()
    #print(data)#输出网页信息
    print(type(f))
    print(f.geturl())#返回检索的资源的URL,通常用于确定是否遵循重定向
    print(f.info())#以email.message_from_string()实例的形式返回页面的元信息(如标题)(请参阅 HTTP头快速参考)
    print(f.getcode())#返回响应的HTTP状态代码

if __name__ == '__main__':
    main()

2.查找可变网址(例如百度搜索)
首先需要了解urllib.parse.urlencode()

import urllib.request
data={}
data['word']='songqiu65'

url_values=urllib.parse.urlencode(data)
url="http://www.baidu.com/s?"
full_url=url+url_values#合并为完整网址

data=urllib.request.urlopen(full_url).read()
data=data.decode('UTF-8')
print(data)

3.伪装成浏览器来爬网页
有些网站需要浏览器发起访问,我们就需要添加报头
少:

import urllib.request
weburl = 'http://www.baidu.com/'
webheader = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}
req = urllib.request.Request(url=weburl, headers=webheader)
webPage=urllib.request.urlopen(req)
data = webPage.read()
data = data.decode('UTF-8')
print(data)
print(type(webPage))
print(webPage.geturl())
print(webPage.info())
print(webPage.getcode())

多:

import urllib.request
weburl = 'http://www.baidu.com/'
webheader = {
    'Connection': 'Keep-Alive',
    'Accept': 'text/html, application/xhtml+xml, */*',
    'Accept-Language': 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3',
    'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko',
    'Host': 'www.baidu.com',
    'DNT': '1'
    }
req = urllib.request.Request(url=weburl, headers=webheader)
webPage=urllib.request.urlopen(req)
data = webPage.read()
data = data.decode('UTF-8')
print(data)
print(type(webPage))
print(webPage.geturl())
print(webPage.info())
print(webPage.getcode())

二。1.爬单页面图片(需要正则表达式)

#encoding:utf-8
import urllib.request
import re
import os
def buildcdw():
    try:
        targetDir = os.getcwd()  # 文件保存路径
        os.chdir(targetDir)
        try:
            os.mkdir('pic')
            print("建立目录pic")
        except:
            print(targetDir+"\pic目录已存在,将直接存到该目录")
        finally:
            os.chdir("pic")
        targetDir = os.getcwd()
    except :
        print("error")
        os._exit()
    return targetDir

def destFile(x,pat):
    pos = pat.rindex('/')#获取'/'最后出现的位置
    t = os.path.join(x, pat[pos+1:])#path[pos+1:]提取链接中的文件名
    return t
if __name__ == "__main__":  #程序运行入口
    weburl = 'http://www.douban.com/'
    webheaders = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}
    req = urllib.request.Request(url=weburl, headers=webheaders)  #构造请求报头
    webpage = urllib.request.urlopen(req)  #发送请求报头
    contentBytes = webpage.read()
    x=buildcdw()
    for link, t in set(re.findall(r'(https:[^s]*?(jpg|png|gif))', str(contentBytes))):  #正则表达式查找所有的图片

        print(link+"\t"+t)
        try:
            urllib.request.urlretrieve(link, destFile(x,link)) #下载图片
        except:
            print('失败') #异常抛出

2.解析页面链接

from html.parser import HTMLParser
from bs4 import BeautifulSoup
import io
from urllib.request import urlopen
from urllib.parse import urljoin
URLS = (
    'http://python.org',
)

def output(x):
    print ('\n'.join(sorted(set(x))))

def simpleBS(url, f):
    'simpleBS() - use BeautifulSoup to parse all tags to get anchors'
    output(urljoin(url, x['href']) for x in BeautifulSoup(
        f,"html5lib").findAll('a'))

def htmlparser(url, f):
    'htmlparser() - use HTMLParser to parse anchor tags'
    class AnchorParser(HTMLParser):
        def handle_starttag(self, tag, attrs):
            if tag != 'a':
                return
            if not hasattr(self, 'data'):
                self.data = []
            for attr in attrs:
                if attr[0] == 'href':
                    self.data.append(attr[1])
    parser = AnchorParser()
    parser.feed(f.read())
    output(urljoin(url, x) for x in parser.data)

def process(url, data):
    print ('\n*** simple BS')
    simpleBS(url, data)
    data.seek(0)
    print ('\n*** HTMLParser')
    htmlparser(url, data)

def main():
    for url in URLS:
        f = urlopen(url)
        data = io.StringIO(f.read().decode("utf-8"))
        f.close()
        process(url, data)

if __name__ == '__main__':
    main()

上面代码里面有两种方式用BeautifulSoup或者HTMLParser,用的不同的库,若缺少,请点击下载。
三。1.模拟登陆知乎
需要用到PIL可以cmd执行pip install pillow==3.4.2
写这个时,手里只有一份没有获取验证码的代码。看着代码也看不懂,就用了作者推荐的fiddler软件,自己一步一步去看网页源码寻找答案,例如get和post的信息,_xsrf的获取,主要是验证码问题(源代码是没有的)。验证码有两种一种是图片输入字符类型,一类是点击类型。输入字符我可以下载图片到本地通过图片库显示出来,然后再手动输入;点击类型没研究。其次是获取验证码来源,网页源码是没有的,通过f12或者fd软件可以发现它始终有部分固定,变化的是https://www.zhihu.com/captcha.gif?r="+t+"&type=login中间的t。这里我是自己没有解决办法的,就只能搜索了。搜索答案t是时间。

代码:

#coding:utf-8
"""手机号登录知乎 r=1失败0成功"""
import gzip
import re
import sys
import http.cookiejar
import urllib.request
import urllib.parse
from PIL import Image
import io
import time


# 构造header,一般header至少要包含一下两项。这两项是从抓到的包里分析得出的。
header = {
    'Connection': 'Keep-Alive',
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
    'Upgrade-Insecure-Requests': '1',
    'Accept-Language': 'zh-CN,zh;q=0.8',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.110 Safari/537.36',
    'Host': 'www.zhihu.com',
    'DNT': '1'
}

def getyzm(data,i):#获取验证码
    cer = re.compile("captcha-module\" data-type=\"en\"", flags=0)
    str1 = cer.search(data)
    if(str1):
        t = str(int(time.time()*1000))
        t= "https://www.zhihu.com/captcha.gif?r="+t+"&type=login"
        file = opener.open(t)
        file = io.BytesIO(file.read())
        im = Image.open(file)
        im.show()
        x=input("请输入图片中的字符:")
        return x
    else:
        if(i<10):
            print("验证码拉取失败,重新拉取中!")
            i+=1
            getyzm(data,i)
        else:
            print("无法拉去图片验证码,即将退出!")
            sys.exit(0)

# 解压函数
def ungzip(data):
    try:  # 尝试解压
        print('正在解压.....')
        data = gzip.decompress(data)
        print('解压完毕!')
    except:
        print('未经压缩, 无需解压')
    return data


# 获取_xsrf
def getXSRF(data):
    cer = re.compile("name=\"_xsrf\" value=\"(.*)\"", flags=0)
    strlist = cer.findall(data)
    return strlist[0]


# 构造文件头
def getOpener(head):
    # 设置一个cookie处理器,它负责从服务器下载cookie到本地,并且在发送请求时带上本地的cookie
    cj = http.cookiejar.CookieJar()
    pro = urllib.request.HTTPCookieProcessor(cj)
    opener = urllib.request.build_opener(pro)
    header = []
    for key, value in head.items():
        elem = (key, value)
        header.append(elem)
    opener.addheaders = header
    return opener

def getIdPass():
    i=input("请输入知乎手机号:\n")
    p=input("请输入知乎密码:\n")
    return (i,p)

def main():
    global opener

    #获取验证码和_xsrf
    url = 'https://www.zhihu.com/'
    opener = getOpener(header)
    op = opener.open(url)
    data = op.read()
    data = ungzip(data.decode('utf-8'))  # 解压
    yzm=getyzm(data,0)
    _xsrf = getXSRF(data)

    # 获取账号密码
    id,password = getIdPass()

    # 构造Post数据,从抓大的包里分析得出的。
    postDict = {
        '_xsrf': _xsrf,  # 特有数据,不同网站可能不同
        'phone_num': id,
        'password': password,
        'captcha':yzm
    }
    # 需要给Post数据编码
    url='https://www.zhihu.com/login/phone_num'
    postData = urllib.parse.urlencode(postDict).encode()
    op = opener.open(url, postData)
    data = op.read()
    data = ungzip(data.decode("utf-8"))
    print(data)

if __name__=="__main__":
    main()

2.爬取糗事段子
代码有缺陷,就是在爬取段子时,会从没有神评的段子到最先出现神评的段子都是没有神评的。
由于第一个文字页面网址为http://www.qiushibaike.com/text/,第2及以后的页面为http://www.qiushibaike.com/text/page/2/?s=4971603,出现了页面数和s,所以有函数getID获取s

代码:

#coding:utf-8
"""获取糗事百科文字糗事(有神评版)"""
import re
import urllib.parse
import urllib.request
# 构造header
header = {
    'Connection': 'Keep-Alive',
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
    'Upgrade-Insecure-Requests': '1',
    'Accept-Language': 'zh-CN,zh;q=0.8',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.110 Safari/537.36',
    'Host': 'www.qiushibaike.com',
    'DNT': '1'
}
url = 'http://www.qiushibaike.com/text/'
def simpleBS(f):#匹配并输出
    i=1
    x = re.compile("<a href=\"/users/([0-9]{8})/\" target=\"_blank\" title=\"(.+)\">([\s\S]*?)<span>(.*)</span>([\s\S]*?)<i class=\"number\">(\d+)</i> 好笑</span>([\s\S]*?)<i class=\"number\">(\d+)</i> 评论([\s\S]*?)<div class=\"single-share\">(([\s\S]*?)<span class=\"cmt-name\">(.+)</span>(\n*)<div class=\"main-text\">(\n*)(.+)(\n*)<div class=\"likenum\">){0,1}", flags=0)
    for y in x.findall(f):
        if("<a href=\"/users/" not in y[10]):
            print(("第{0}条:"+"id:" + y[0] + "\t" + "name:" + y[1] + "\t"+"笑脸:"+y[5]+"\t"+"评论数:"+y[7]+"\n内容:\n"+y[3].replace("<br/>","\n")+"\n"+"神评者:"+y[11]+"\t"+"神评"+y[14]+"\n\n").format(i))
            i += 1
        else:
            x1=re.compile("<a href=\"/users/([0-9]{8})/\" target=\"_blank\" title=\"(.+)\">([\s\S]*?)<span>(.*)</span>([\s\S]*?)<i class=\"number\">(\d+)</i> 好笑</span>([\s\S]*?)<i class=\"number\">(\d+)</i> 评论([\s\S]*?)<div class=\"single-share\">", flags=0)
            for z in x1.findall(y[10]):
                print(("第{0}条:" + "id:" + z[0] + "\t" + "name:" + z[1] + "\t" + "笑脸:" + z[5] + "\t" + "评论数:" + z[7] + "\n内容:\n" + z[3].replace("<br/>", "\n") + "\n\n").format(i))
                i+=1

def getnew(i,ii):#获取糗事
    urlnew=url+"page/"+str(i)+"/?s="+str(ii)
    op=urllib.request.Request(urlnew,None,header)
    op=urllib.request.urlopen(op)
    data=op.read().decode("utf-8")
    #print(data)
    simpleBS(data)

def getID():#获取链接中的数字部分
    op = urllib.request.Request(url, None, header)
    op = urllib.request.urlopen(op)
    data = op.read().decode("utf-8")
    x=re.compile("s = (\d+)\" rel=\"nofollow\">")
    y=x.findall(data)
    return y

if __name__=="__main__":
    i1=int(input("你需要从第几页开始的糗事:"))
    i2 = int(input("你需要到第几页结束的糗事:"))
    i2+=1
    i3=getID()
    if(i1<i2):
        for ii in range(i1,i2):
            print("第"+str(ii)+"页:\n")
            getnew(ii,i3)


这里附scrapy爬糗百

猜你喜欢

转载自blog.csdn.net/songqiu65/article/details/69055063