Combine jieba library word segmentation and do word frequency statistics

Define the function to get the text content

def get_content(path):
    with open(path, 'r', encoding='utf-8', errors='ignore') as f:
        content = ''
        for l in f:
            l = l.strip()
            content += l
        return content

Define word frequency statistics function

def get_TF(words, topK=10):
    tf_dic = {
    
    }
    for w in words:
        tf_dic[w] = tf_dic.get(w, 0) + 1
    return sorted(tf_dic.items(), key = lambda x: x[1], reverse=True)[:topK]

Get stop word function

def stop_words(path):
    with open(path) as f:
        return [l.strip() for l in f]

Define the main function

import jieba
def main():
    corpus = get_content('word.txt')
    words = [x for x in jieba.cut(corpus) if x not in stop_words('stoplist.txt')]
    print(corpus)
    print('分词效果:'+'/ '.join(words))
    print('topK(10)词:'+str(get_TF(words)))
main()

Insert picture description here

Guess you like

Origin blog.csdn.net/wjl__ai__/article/details/108647439