Python 得到娜扎博客内容的词云

import requests
from time import sleep
from urllib.parse import urlencode
headers={
    'Host': 'm.weibo.cn',
    'Referer': 'https://m.weibo.cn/u/1350995007',
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
    'X-Requested-With': 'XMLHttpRequest',
}
def get_user_info(uid):
    # 发送请求
    result = requests.get('https://m.weibo.cn/api/container/getIndex?type=uid&value={}'
                          .format(uid),headers=headers)
    if result.status_code==200:
        json_data = result.json().get('data')  # 获取繁华信息中json内容
    userinfo = {
        'name': json_data['userInfo']['screen_name'],                    # 获取用户头像
        'description': json_data['userInfo']['description'],             # 获取用户描述
        'follow_count': json_data['userInfo']['follow_count'],           # 获取关注数
        'followers_count': json_data['userInfo']['followers_count'],     # 获取粉丝数
        'profile_image_url': json_data['userInfo']['profile_image_url'], # 获取头像
        'verified_reason': json_data['userInfo']['verified_reason'],     # 认证信息
        'containerid': json_data['tabsInfo']['tabs'][1]['containerid']   # 此字段在获取博文中需要
    }

    # 获取性别,微博中m表示男性,f表示女性
    if json_data['userInfo']['gender'] == 'm':
        gender = '男'
    elif json_data['userInfo']['gender'] == 'f':
        gender = '女'
    else:
        gender = '未知'
    userinfo['gender'] = gender
    return userinfo
userinfo = get_user_info('1350995007')
print(userinfo)
base_url='https://m.weibo.cn/api/container/getIndex?'
posts = []
def get_all_post(page):
    # 从第一页开始
    # 这个用来存放博文列表
    params={
        'type':'uid',
        'value':'1350995007',
        'containerid':'1076031350995007',
        'page':page,
    }
    try:
        url=base_url+urlencode(params)
        response = requests.get(url, headers=headers)
        if response.status_code == 200:
            data=response.json().get('data')
            if data:
                items=data.get('cards')
                for item in items:
                    blog = item.get('mblog')
                    if blog:
                        post = blog.get('text')
                        posts.append(post)
        return  posts

    except requests.ConnectionError as e:
        print('Error:',e.args)
for i in range(0,15):
    posts = get_all_post(i)
print(len(posts))
import jieba.analyse
from html2text import html2text

content = '\n'.join([html2text(i) for i in posts])

# 这里使用jieba的textrank提取出1000个关键词及其比重
result = jieba.analyse.textrank(content, topK=1000, withWeight=True)

# 生成关键词比重字典
keywords = dict()
for i in result:
    keywords[i[0]] = i[1]
from PIL import Image, ImageSequence
import numpy as np
import matplotlib.pyplot as plt
from wordcloud import WordCloud, ImageColorGenerator

# 初始化图片
image = Image.open('images/nazha.jpg')
graph = np.array(image)

# 生成云图,这里需要注意的是WordCloud默认不支持中文,所以这里需要加载中文黑体字库
wc = WordCloud(font_path='fonts/simhei.ttf',
    background_color='white', max_words=300, mask=graph)
wc.generate_from_frequencies(keywords)
image_color = ImageColorGenerator(graph)
plt.imshow(wc)
plt.imshow(wc.recolor(color_func=image_color))
plt.axis("off")
plt.show()

将其使用flask做成应用

from flask import Flask, request, render_template
import requests
from urllib.parse import urlencode
import jieba.analyse
from html2text import html2text
from PIL import Image, ImageSequence
import numpy as np
import matplotlib.pyplot as plt
from wordcloud import WordCloud, ImageColorGenerator
app = Flask(__name__)


@app.route('/')
def hello_world():
    return 'Hello World!'

headers={
    'Host': 'm.weibo.cn',
    'Referer': 'https://m.weibo.cn/u/1350995007',
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
    'X-Requested-With': 'XMLHttpRequest',
}
def get_user_info(uid):
    # 发送请求
    result = requests.get('https://m.weibo.cn/api/container/getIndex?type=uid&value={}'
                          .format(uid),headers=headers)
    if result.status_code==200:
        json_data = result.json().get('data')  # 获取繁华信息中json内容
    userinfo = {
        'name': json_data['userInfo']['screen_name'],                    # 获取用户头像
        'description': json_data['userInfo']['description'],             # 获取用户描述
        'follow_count': json_data['userInfo']['follow_count'],           # 获取关注数
        'followers_count': json_data['userInfo']['followers_count'],     # 获取粉丝数
        'profile_image_url': json_data['userInfo']['profile_image_url'], # 获取头像
        'verified_reason': json_data['userInfo']['verified_reason'],     # 认证信息
        'containerid': json_data['tabsInfo']['tabs'][1]['containerid']   # 此字段在获取博文中需要
    }

    # 获取性别,微博中m表示男性,f表示女性
    if json_data['userInfo']['gender'] == 'm':
        gender = '男'
    elif json_data['userInfo']['gender'] == 'f':
        gender = '女'
    else:
        gender = '未知'
    userinfo['gender'] = gender
    return userinfo

base_url='https://m.weibo.cn/api/container/getIndex?'
posts = []
def get_all_post(uid,containerid):
    # 从第一页开始
    # 这个用来存放博文列表
    for page in range(0,15):
        params = {
            'type': 'uid',
            'value': uid,
            'containerid': containerid,
            'page': page,
        }
        try:
            url = base_url + urlencode(params)
            response = requests.get(url, headers=headers)
            if response.status_code == 200:
                data = response.json().get('data')
                if data:
                    items = data.get('cards')
                    for item in items:
                        blog = item.get('mblog')
                        if blog:
                            post = blog.get('text')
                            posts.append(post)
        except requests.ConnectionError as e:
            print('Error:', e.args)
    return  posts

def get_ciyun(posts,uid):
    content = '\n'.join([html2text(i) for i in posts])

    # 这里使用jieba的textrank提取出1000个关键词及其比重
    result = jieba.analyse.textrank(content, topK=1000, withWeight=True)

    # 生成关键词比重字典
    keywords = dict()
    for i in result:
        keywords[i[0]] = i[1]

    # 初始化图片
    image = Image.open('./static/images/nazha.jpg')
    graph = np.array(image)

    # 生成云图,这里需要注意的是WordCloud默认不支持中文,所以这里需要加载中文黑体字库
    wc = WordCloud(font_path='./static/fonts/simhei.ttf',
                   background_color='white', max_words=300, mask=graph)
    wc.generate_from_frequencies(keywords)
    image_color = ImageColorGenerator(graph)
    plt.imshow(wc)
    plt.imshow(wc.recolor(color_func=image_color))
    plt.axis("off")
    dest_img = './static/images/nazha.jpg'
    plt.savefig(dest_img)
    plt.show()
    return dest_img

@app.route('/index',methods=['GET','POST'])
def index():
    userinfo={}
    if request.method=='POST' and request.form.get('uid'):
        uid=request.form.get('uid')
        userinfo=get_user_info(uid)
        posts=get_all_post(uid,userinfo['containerid'])
        dest_img=get_ciyun(posts,uid)
        userinfo['personas']=dest_img
        flag=True

    return  render_template('index.html',**userinfo)

if __name__ == '__main__':
    app.run()
<!DOCTYPE html>
<html lang="en">
<head>
    <meta charset="UTF-8">
    <title>Search Weibo</title>
</head>
<body>
<div>
    <form action="/index",method="post",id="search-form">
        <input type="text" name="uid"class="search"placeholder="微博用户ID">
        <input type="submit" class="submit"value="生成画像">

    </form>
</div>
<!--这是一个注解  -->
</body>
</html>

猜你喜欢

转载自blog.csdn.net/qq_37312720/article/details/83818075
今日推荐