【NLP】NO5:文本聚类

在这里插入图片描述

一、主要步骤

语料加载->分词->去停用词->抽取词向量模型特征->基于tf-idf和word2vec进行kmeans中文文本聚类

import random
import jieba
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import TfidfTransformer
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
import gensim
from gensim.models import Word2Vec
from sklearn.preprocessing import scale
import multiprocessing
#加载停用词,txt内容可以随项目进行改变
stopwords = pd.read_csv('stopword.txt',index_col=False,quoting=3,sep='\t',names=['stopwords'],encoding='utf-8')
stopwords = stopwords['stopwords'].values
#加载语料库
data = pd.read_csv('data.csv',encoding='utf-8',seq=',')
data.dropna(inplace=True)
data = data.segment.values.tolist()#dataframe转为list
#分词、去停用词
def preprocess(data):
	for line in data:
		try:
			segs = jieba.lcut(line)  #分词
			segs = [v for v in segs if not str(v).isdigit()]#取数字
			segs = list(filter(lambda x:x.strip(),segs)) #去左右空格
			segs = list(filter(lambda x:len(x)>1,segs)) #去掉长度为1的字符
			segs = list(filter(lambda x:x not in stopwords,segs)) #去掉停用词
			sentences.append("".join(segs))
		except Exception:
			print(line)
			continue
	return sentences
sentences = preprocess(data)
#抽取词向量特征,tf-idf
#将文本中的词语转换为词频矩阵 矩阵元素a[i][j] 表示j词在i类文本下的词频
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5)
#统计每个词语的tf-idf权值
transformer = TfidfTransformer()
# 第一个fit_transform是计算tf-idf 第二个fit_transform是将文本转为词频矩阵
tfidf = transformer.fit_transform(vectorizer.fit_transform(sentences))
# 获取词袋模型中的所有词语
word = vectorizer.get_feature_names()
# 将tf-idf矩阵抽取出来,元素w[i][j]表示j词在i类文本中的tf-idf权重
weight = tfidf.toarray()
#查看特征大小
print ('Features length: ' + str(len(word)))
#基于tf-idf进行kmeans聚类,也可以使用基于密度的DBSCAN和层次聚类等算法
numClass=4 #聚类分几簇
clf = KMeans(n_clusters=numClass, max_iter=10000, init="k-means++", tol=1e-6) 
pca = PCA(n_components=10)  # 降维
TnewData = pca.fit_transform(weight)  # 载入N维
s = clf.fit(TnewData)
#可视化聚类结果
 def plot_cluster(result,newData,numClass):
        plt.figure(2)
        Lab = [[] for i in range(numClass)]
        index = 0
        for labi in result:
            Lab[labi].append(index)
            index += 1
        color = ['oy', 'ob', 'og', 'cs', 'ms', 'bs', 'ks', 'ys', 'yv', 'mv', 'bv', 'kv', 'gv', 'y^', 'm^', 'b^', 'k^',
                 'g^'] * 3 
        for i in range(numClass):
            x1 = []
            y1 = []
            for ind1 in newData[Lab[I]]:
                # print ind1
                try:
                    y1.append(ind1[1])
                    x1.append(ind1[0])
                except:
                    pass
            plt.plot(x1, y1, color[I])

        #绘制初始中心点
        x1 = []
        y1 = []
        for ind1 in clf.cluster_centers_:
            try:
                y1.append(ind1[1])
                x1.append(ind1[0])
            except:
                pass
        plt.plot(x1, y1, "rv") #绘制中心
        plt.show()
#对数据降到2维,为了更好的表达和获取更具代表性的信息,在可视化的时候一般先用PCA进行降维,再使用TSNE进行降维
pca = PCA(n_components=4)  # 输出两维
newData = pca.fit_transform(weight)  # 载入N维
from sklearn.manifold import TSNE
newData =TSNE(2).fit_transform(newData)
result = list(clf.predict(newData))
plot_cluster(result,newData,numClass)
发布了60 篇原创文章 · 获赞 55 · 访问量 3万+

猜你喜欢

转载自blog.csdn.net/MARY197011111/article/details/100072530