Python work: jieba library

Jieba library using statistical word frequency, word frequency and sort

import jieba
txt = open("文章.txt","r",encoding='gbk',errors='replace').read()
words  = jieba.lcut(txt)
counts = {}
for word in words:
    if len(word) == 1:
        continue
    else:
        counts[word] = counts.get(word,0) + 1
        
items = list(counts.items())
items.sort(key=lambda x:x[1], reverse=True) 
for i in range(15):
    word, count = items[i]
    print ("{0:<10}{1:>5}".format(word, count))

Word cloud

from wordcloud import WordCloud
import matplotlib.pyplot as plt
import jieba
def create_word_cloud(filename):
    text = open(file='文章.txt', encoding='utf-8').read()
    wordlist = jieba.cut(text, cut_all=True)
    wl = " ".join(wordlist)
    wc = WordCloud(
        background_color="black",
        max_words=2000,
        font_path='msyhl.ttf',
        height=1200,
        width=1600,
        max_font_size=100,
        random_state=100,
        )
    myword = wc.generate(wl)  
    plt.imshow(myword)
    plt.axis("off")
    plt.show()
    wc.to_file('img_book.png') 
if __name__ == '__main__':
    create_word_cloud('mytext')

Word cloud Reference: https: //blog.csdn.net/weixin_40902527/article/details/86717490

https://www.jb51.net/article/142134.htm

 

Guess you like

Origin www.cnblogs.com/linjiaxin59/p/12650734.html