爬微博

import requests
import json
import codecs
import time
import random
import csv
from fake_useragent import UserAgent
import pandas as pd
from selenium import webdriver
from lxml import etree
from bs4 import BeautifulSoup as bs
from selenium import webdriver
from selenium.webdriver.common.proxy import Proxy
from selenium.webdriver.common.proxy import ProxyType

url_page=’http://s.weibo.com/weibo/%25E4%25B8%2580%25E5%25B8%25A6%25E4%25B8%2580%25E8%25B7%25AF&xsort=hot&suball=1&timescope=custom:2017-07-01:2017-08-01&page={}’

url_2016=[‘http://s.weibo.com/weibo/%25E4%25B8%2580%25E5%25B8%25A6%25E4%25B8%2580%25E8%25B7%25AF&xsort=hot&suball=1&timescope=custom:2016-01-01:2016-02-01&page={}’, #17139025785
http://s.weibo.com/weibo/%25E4%25B8%2580%25E5%25B8%25A6%25E4%25B8%2580%25E8%25B7%25AF&xsort=hot&suball=1&timescope=custom:2016-02-02:2016-03-01&page={}’, #14556642563
http://s.weibo.com/weibo/%25E4%25B8%2580%25E5%25B8%25A6%25E4%25B8%2580%25E8%25B7%25AF&xsort=hot&suball=1&timescope=custom:2016-03-02:2016-04-01&page={}’, #17326257480
http://s.weibo.com/weibo/%25E4%25B8%2580%25E5%25B8%25A6%25E4%25B8%2580%25E8%25B7%25AF&xsort=hot&suball=1&timescope=custom:2016-04-02:2018-05-01&page={}’, #[email protected]
http://s.weibo.com/weibo/%25E4%25B8%2580%25E5%25B8%25A6%25E4%25B8%2580%25E8%25B7%25AF&xsort=hot&suball=1&timescope=custom:2016-05-02:2016-06-01&page={}’, # 17139025785
http://s.weibo.com/weibo/%25E4%25B8%2580%25E5%25B8%25A6%25E4%25B8%2580%25E8%25B7%25AF&xsort=hot&suball=1&timescope=custom:2016-06-02:2016-07-01&page={}’, # 14556642563
http://s.weibo.com/weibo/%25E4%25B8%2580%25E5%25B8%25A6%25E4%25B8%2580%25E8%25B7%25AF&xsort=hot&suball=1&timescope=custom:2016-07-02:2016-08-01&page={}’, # 17326257480
http://s.weibo.com/weibo/%25E4%25B8%2580%25E5%25B8%25A6%25E4%25B8%2580%25E8%25B7%25AF&xsort=hot&suball=1&timescope=custom:2016-08-02:2018-09-01&page={}’, # [email protected]
http://s.weibo.com/weibo/%25E4%25B8%2580%25E5%25B8%25A6%25E4%25B8%2580%25E8%25B7%25AF&xsort=hot&suball=1&timescope=custom:2016-09-02:2016-10-01&page={}’, # 17139025785
http://s.weibo.com/weibo/%25E4%25B8%2580%25E5%25B8%25A6%25E4%25B8%2580%25E8%25B7%25AF&xsort=hot&suball=1&timescope=custom:2016-10-02:2016-11-01&page={}’, # 14556642563
http://s.weibo.com/weibo/%25E4%25B8%2580%25E5%25B8%25A6%25E4%25B8%2580%25E8%25B7%25AF&xsort=hot&suball=1&timescope=custom:2016-11-02:2016-12-01&page={}’, # 17326257480
http://s.weibo.com/weibo/%25E4%25B8%2580%25E5%25B8%25A6%25E4%25B8%2580%25E8%25B7%25AF&xsort=hot&suball=1&timescope=custom:2016-12-02:2018-12-31&page={}’ # [email protected]
]

file1=open(‘ip.txt’,’r’)
ips=file1.readlines()

def get_ip():
ip=ips[random.randint(1,500)]
print(ip)
proxy = Proxy(
{
‘proxyType’: ProxyType.MANUAL,
‘httpProxy’: ip
}
)
return proxy

def get_html(url,s_proxy,user_name,pass_word):
driver = webdriver.Firefox(proxy=s_proxy)
driver.maximize_window()
driver.get(url)
time.sleep(random.randint(3, 9))
driver.find_element_by_xpath(‘//a[@node-type=”loginBtn”]’).click()
time.sleep(random.randint(5, 13))
driver.find_element_by_xpath(‘//input[@node-type=”username”]’).clear()
driver.find_element_by_xpath(‘//input[@node-type=”username”]’).send_keys(user_name) # 获取到用户名标签,发送用户名
time.sleep(random.randint(4, 10))
driver.find_element_by_xpath(‘//input[@node-type=”password”]’).clear()
driver.find_element_by_xpath(‘//input[@node-type=”password”]’).send_keys(pass_word) # 获取到密码标签,发送密码
time.sleep(random.randint(2, 7))
driver.find_element_by_xpath(‘//a[@node-type=”submitBtn”]’).click() # 获取登陆标签并点击
time.sleep(random.randint(11,28))
data=driver.page_source
if data:
parse_detail(data)
page=1
while True:
page+=1
print(‘page is——–’,page)
try:

        driver.find_element_by_xpath('//a[@class="page next S_txt1 S_line1"]').click()  # 获取下一页标签并点击
        driver.execute_script("window.scrollTo(0,400)")
        time.sleep(random.uniform(1.2,3.5))
        driver.execute_script("window.scrollTo(0,2000)")
        time.sleep(random.uniform(1.2, 3.5))
        driver.execute_script("window.scrollTo(0,3000)")
        time.sleep(random.uniform(1.2, 3.5))
        driver.execute_script("window.scrollTo(0,5000)")

        time.sleep(random.uniform(4.1,10.6))
        next_data = driver.page_source
        parse_detail(next_data)
    except:
        break

def parse_detail(parse_data):
soup=bs(parse_data,’lxml’)

divs = soup.find_all('div', class_="WB_cardwrap S_bg2 clearfix")
aa=0
for content in divs:
    aa+=1
    print('第---%d---条信息'%aa)
    p_content=content.find('p',class_="comment_txt")
    amounts=content.find('ul',class_="feed_action_info feed_action_row4")
    if amounts:
        amount=amounts.find_all('li')
        transmit_amount_li=amount[1]          #得到转发的li标签
        transmit_amount_content=transmit_amount_li.find('span',class_="line S_line1").get_text()     #得到‘转发’
        print(transmit_amount_content)

        comment_amount_li=amount[2]           #得到评论的li标签
        comment_amount_content=comment_amount_li.find('span',class_="line S_line1").get_text()      #得到‘评论’
        print(comment_amount_content)

        praise_amount_li=amount[3]            #得到赞的li标签
        praise_amount_content = praise_amount_li.find('span', class_="line S_line1").get_text()   # 得到‘赞’
        praise_amount_content='赞'+praise_amount_content
        print(praise_amount_content)

    if p_content:
        headers = {'User-Agent': UserAgent().random}
        if p_content.find_all('a',class_="WB_text_opt"):
            action_data=p_content.find('a',class_="WB_text_opt")['action-data']
            url = 'http://s.weibo.com/ajax/direct/morethan140?' + action_data + '&_t=0&__rnd=1532056726026'
            response = requests.get(url, headers=headers)
            response.encoding = 'utf-8'
            text1 = json.loads(response.text)
            text = text1['data']['html']
        else:
            text=p_content.get_text().strip()
        print(text)

    writer = csv.writer(file)
    writer.writerow([text,transmit_amount_content,comment_amount_content,praise_amount_content])

if name==’main‘:
for date_url in url_2016:
#切换用户,每个月换一个用户,一共4个用户
if ‘2016-01-01’ in date_url or ‘2016-05-02’ in date_url or ‘2016-09-02’ in date_url:
username=’17139025785’
password=’hn12021’
elif ‘2016-02-02’ in date_url or ‘2016-06-02’ in date_url or ‘2016-10-02’ in date_url:
username=’14556642563’
password=’hn12021’
elif ‘2016-03-02’ in date_url or ‘2016-07-02’ in date_url or ‘2016-11-02’ in date_url:
username=’17326257480’
password=’hn12021’
elif ‘2016-04-02’ in date_url or ‘2016-08-02’ in date_url or ‘2016-12-02’ in date_url:
username=’[email protected]
password=’lijiaojiao2010’

    name = '_'.join(date_url.split(':')[2].split('-')[:2])
    file = open(name, 'w', encoding='utf-8-sig')

    proxy=get_ip()
    get_html(date_url, proxy, username, password)
    file.close()

猜你喜欢

转载自blog.csdn.net/qq_42717902/article/details/82624274