机器学习算法Python实现:kmeans文本聚类

# -*- coding:utf-8 -*
#本代码是在jupyter notebook上实现,author:huzhifei, create time:2018/8/14
#本脚本主要实现了基于python通过kmeans做的文本聚类的项目目的

#导入相关包
import numpy as np
import pandas as pd
import re
import os
import codecs
from sklearn import feature_extraction
import jieba

#对title文本做分词
f1 =open("title.txt","r",encoding='utf-8',errors='ignore')
f2 =open("title_fenci", 'w',encoding='utf-8',errors='ignore')
for line in f1:
    seg_list = jieba.cut(line, cut_all=False)
    f2.write((" ".join(seg_list)).replace("\t\t\t","\t"))
#print(w)
f1.close()
f2.close()


#对summary(在这里用content表示summary)文本做分词
f1 =open("content.txt","r",encoding='utf-8',errors='ignore')
f2 =open("content_fenci.txt", 'w',encoding='utf-8',errors='ignore')
for line in f1:
    seg_list = jieba.cut(line, cut_all=False)
    f2.write((" ".join(seg_list)).replace("\t\t\t","\t"))
#print(w)
f1.close()
f2.close()


#打开已经分好词的title与content文本内容
titles = open('title_fenci.txt',encoding='utf-8',errors='ignore').read().split('\n')
#print(titles)
print(str(len(titles)) + ' titles')
contents = open('content_fenci.txt',encoding='utf-8',errors='ignore').read().split('\n')
contents = contents[:len(titles)]
#print(contents)
print(str(len(contents)) + ' contents')


#中文停用词
def get_custom_stopwords(stop_words_file):
    with open(stop_words_file,encoding='utf-8')as f:
        stopwords=f.read()
    stopwords_list=stopwords.split('\n')
    custom_stopwords_list=[i for i in stopwords_list]
    return custom_stopwords_list

#停用词函数调用
stop_words_file="stopwordsHIT.txt"
stopwords=get_custom_stopwords(stop_words_file)


#做tfidf
from sklearn.feature_extraction.text import TfidfVectorizer
max_df=0.8
min_df=2
tfidf_vectorizer = TfidfVectorizer(max_df=max_df,min_df=min_df, max_features=200000,
                                stop_words='english',
                                 use_idf=True, token_pattern=u'(?u)\\b[^\\d\\W]\\w+\\b',tokenizer=tokenize_and_stem, ngram_range=(1,2))

%time tfidf_matrix = tfidf_vectorizer.fit_transform(contents)

print(tfidf_matrix.shape)


#获取特证词
terms = tfidf_vectorizer.get_feature_names()

#kmeans聚类
from sklearn.cluster import KMeans

num_clusters = 6

km = KMeans(n_clusters=num_clusters)

%time km.fit(tfidf_matrix)

clusters = km.labels_.tolist()


#调用pkl的kmeans模型
from sklearn.externals import joblib

joblib.dump(km,  'y_cluster.pkl')
km = joblib.load('y_cluster.pkl')
#print(km)
clusters = km.labels_.tolist()
print(len(clusters))


#将结果存入pandas
import pandas as pd
films = { 'title': titles, 'rank': ranks, 'synopsis': contents[0:53612],'cluster': clusters[0:53612]}
frame = pd.DataFrame(films, index=[films['cluster']],columns = ['cluster','title','rank', 'synopsis'])

#簇统计
frame['cluster'].value_counts()

#打印出每个簇的详细簇信息
from __future__ import print_function
print("Top terms per cluster:")
print()
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
#print(order_centroids)
for i in range(num_clusters):
    print("Cluster %d words:" % i, end='')
    #print(order_centroids[1,:100])
    for ind in order_centroids[i, :50]:
        print (ind)
        frame=frame.insert(4,'word',(vocab_frame.ix[terms[ind].split(' ')].values.tolist()[0][0].encode('utf-8', 'ignore'), end=','))
        t=vocab_frame.ix[terms[ind].split(' ')].values.tolist()[0][0].encode('utf-8', 'ignore')
        print(len(t))
        print(' %s' % vocab_frame.ix[terms[ind].split(' ')].values.tolist()[0][0].encode('utf-8', 'ignore'), end=',')
    print()

猜你喜欢

转载自blog.csdn.net/hellozhxy/article/details/82083239