[文本语义相似] 基于tf-idf的余弦距离(sklearn实现)

文本相似在问答系统中有很重要的应用,如基于知识的问答系统(Knowledge-based QA),基于文档的问答系统(Documen-based QA),以及基于FAQ的问答系统(Community-QA)等。像 对于问题的内容,需要进行相似度匹配,从而选择出与问题最接近,同时最合理的答案。本节介绍 基于tf-idf的余弦距离计算相似度。

基于sklearn的方式如下:
 

import os
import jieba
import pickle
import logging
import numpy as np
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer 
 
 
class StopWords(object):
    '''
    '''
    def __init__(self, stopwords_file=stopwords_file ):
        self.stopwords = set( [ word.strip() for word in open(stopwords_file, 'r') ] )
    
    def del_stopwords(self, words):
        return [ word for word in words if word not in self.stopwords ]
 
stop_word = StopWords()
 
# 是否分词、  及其停用词语
def _seg_word(words_list, jieba_flag=True, del_stopword=False):
    if jieba_flag:
        word_list = [[stop_word.del_stopwords(words) if del_stopword else word for word in jieba.cut(words)] for words in words_list]
    else:
        word_list = [[stop_word.del_stopwords(words) if del_stopword else word for word in words] for words in words_list]
    return [ ' '.join(word) for word in word_list  ]
 
 
# 不实用自带停用词
vectorizer = CountVectorizer(stop_words = None, token_pattern='(?u)\\b\\w\\w*\\b') 
transformer = TfidfTransformer()
 
word_list = ['我爱北京天安门', '你好,在干嘛呢', '这个什么价钱']
word_list = _seg_word(word_list)
dic = vectorizer.fit_transform(word_list)
tfidf = transformer.fit_transform(dic)
 
# 保存模型
dic_path = './bow.model'
with open(dic_path, 'wb') as f:
    pickle.dump(vectorizer, f)
tfidf_model_path = 'tfidf.model'
with open(tfidf_model_path, 'wb') as f:
    pickle.dump(transformer, f)
 
# 加载模型
with open(dic_path, 'rb') as f:
    vectorizer = pickle.load(f)
with open(tfidf_model_path, 'rb') as f:
    transformer = pickle.load(f)
 
 
def _normalize(x):
   x /= (np.array(x)**2).sum(axis=1, keepdims=True)**0.5 
   return x
 
# 得到句子向量
word1 = ['你好,在干嘛呢']
tfidf_embedding1 = transformer.transform( vectorizer.transform(_seg_word([word1])) )
tfidf_embedding1 = tfidf_embedding1.toarray().sum(axis=0)
tfidf_embedding1  = tfidf_embedding1[np.newaxis, :].astype(float)
tfidf_embedding1 = _normalize(tfidf_embedding1)
word2 = ['这个什么价钱']
tfidf_embedding2 = transformer.transform( vectorizer.transform(_seg_word([word2])) )
tfidf_embedding2 = tfidf_embedding2.toarray().sum(axis=0)
tfidf_embedding2  = tfidf_embedding2[np.newaxis, :].astype(float)
tfidf_embedding2 = _normalize(tfidf_embedding2)
 
 
# 获取两个句子相似度得分
score = np.dot( tfidf_embedding1[0], tfidf_embedding2[0] ) 
 
原创文章 377 获赞 835 访问量 187万+

猜你喜欢

转载自blog.csdn.net/u014365862/article/details/105850457