抓取博客园个人主页目录制作

python2代码

#-*- coding:utf-8 -*-

import urllib2
from lxml import etree

class CrawlJs():
    #定义函数,爬取对应的数据
    def getArticle(self,url):
        print '█████████████◣开始爬取数据'
        my_headers = {
            'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.104 Safari/537.36',
        }
        request = urllib2.Request(url,headers=my_headers)
        content = urllib2.urlopen(request).read()
        return content

    #定义函数,筛选和保存爬取到的数据
    def save(self,content):
        xml = etree.HTML(content)
        title = xml.xpath('//*[@class="postTitle"]/a/text()')
        link = xml.xpath('//*[@class="postTitle"]/a/@href')
        print (title,link)
        # print(zip(title,link))
        # print(map(lambda x,y:[x,y], title,link))
        for t,li in zip(title,link):
            print(t+li)
            with open('bokeyuan.txt','a+') as f:
                f.write(t.encode('utf-8')+li+ '\n')
        print '█████████████◣爬取完成!'

#定义主程序接口
if __name__ == '__main__':
    page = int(raw_input('请输入你要抓取的页码总数:'))
    for num in range(page):
        #这里输入个人主页,
        url = 'http://www.cnblogs.com/zhouxinfei/default.html?page=%s'%(num+1)
        js = CrawlJs()
        content = js.getArticle(url)
        js.save(content)

python3代码

#-*- coding:utf-8 -*-

import urllib.request
from lxml import etree

class CrawlJs():
    #定义函数,爬取对应的数据
    def getArticle(self,url):
        print ('█████████████◣开始爬取数据')
        my_headers = {
            'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.104 Safari/537.36',
        }
        request = urllib.request.Request(url,headers=my_headers)
        content = urllib.request.urlopen(request).read()
        return content

    #定义函数,筛选和保存爬取到的数据
    def save(self,content):
        xml = etree.HTML(content)
        title = xml.xpath('//*[@class="postTitle"]/a/text()')
        link = xml.xpath('//*[@class="postTitle"]/a/@href')
        print (title,link)
        # print(zip(title,link))
        # print(map(lambda x,y:[x,y], title,link))
        for t,li in zip(title,link):
            print(t+li)
            with open('bokeyuan.txt','a+') as f:
                f.write(t+'  '+li+ '\n')
        print('█████████████◣爬取完成!')

#定义主程序接口
if __name__ == '__main__':
    page = int(input('请输入你要抓取的页码总数:'))
    for num in range(page):
        #这里输入个人主页,
        url = 'http://www.cnblogs.com/zhouxinfei/default.html?page=%s'%(num+1)
        js = CrawlJs()
        content = js.getArticle(url)
        js.save(content)

猜你喜欢

转载自blog.csdn.net/xc_zhou/article/details/80557061
今日推荐