[文本语义相似] 基于bert的余弦距离(bert4keras实现)

文本相似在问答系统中有很重要的应用,如基于知识的问答系统(Knowledge-based QA),基于文档的问答系统(Documen-based QA),以及基于FAQ的问答系统(Community-QA)等。像 对于问题的内容,需要进行相似度匹配,从而选择出与问题最接近,同时最合理的答案。本节介绍 基于bert的余弦距离计算相似度。

学习bert可以看这里:https://blog.csdn.net/u014365862/article/details/104412737

训练/预测:


# 绘图案例 an example of matplotlib
import numpy as np
import matplotlib.pyplot as plt
from scipy.special import jn
from IPython.display import display, clear_output
import time
from sklearn.model_selection import KFold, train_test_split, GridSearchCV
'''
x = np.linspace(0,5)
f, ax = plt.subplots()
ax.set_title("Bessel functions")

for n in range(1,10):
    time.sleep(1)
    ax.plot(x, jn(x,n))
    clear_output(wait=True)
    display(f)

# close the figure at the end, so we don't get a duplicate
# of the last plot
plt.close()
'''


from keras.layers import *

from bert4keras.backend import keras, set_gelu
from bert4keras.bert import build_bert_model
from bert4keras.optimizers import Adam
from bert4keras.snippets import sequence_padding, DataGenerator
from bert4keras.tokenizer import Tokenizer
import pandas as pd
import numpy as np

set_gelu('tanh')  # 切换gelu版本

maxlen = 32
batch_size = 16
num_classes = 2
epochs = 20
learning_rate = 2e-5 


# sim roeberta_zh
config_path = 'albert_tiny_google_zh_489k/albert_config.json'
checkpoint_path = 'albert_tiny_google_zh_489k/albert_model.ckpt'
dict_path = 'albert_tiny_google_zh_489k/vocab.txt'


def load_data(filename):
    D = []
    data = pd.read_csv(filename)
    data.dropna(axis=0, how='any', inplace=True)
    data = data.values.tolist()
    for per_data in data:
        D.append( (per_data[0],per_data[1],int(per_data[2])) )
    return D


# 加载数据集
train_val_data = load_data('train_data.csv') 
# test_data = load_data('dev.csv') 
# 查看一下数据
print ( 'train>>>>', train_val_data[0] )
print ( '训练验证集数量:', len(train_val_data) )

random_order = range(len(train_val_data))
np.random.shuffle(list(random_order))
train_data = [train_val_data[j] for i, j in enumerate(random_order) if i % 5 != 1 ] 
valid_data = [train_val_data[j] for i, j in enumerate(random_order) if i % 5 == 1 ] 
test_data = valid_data
print ( '训练集数量:', len(train_data) )
print ( '验证集数量:', len(valid_data) )
print ( '测试集数量:', len(test_data) )



# 建立分词器
tokenizer = Tokenizer(dict_path, do_lower_case=True)
class data_generator(DataGenerator):
    """数据生成器
    """

    def __iter__(self, random=False):
        idxs = list(range(len(self.data)))
        if random:
            np.random.shuffle(idxs)
        batch_token_ids, batch_segment_ids, batch_labels = [], [], []
        for i in idxs:
            text1, text2, label = self.data[i]
#             print(text1, text2, label)
            token_ids, segment_ids = tokenizer.encode(text1, text2, max_length=maxlen)
            batch_token_ids.append(token_ids)
            batch_segment_ids.append(segment_ids)
            batch_labels.append([label])
            if len(batch_token_ids) == self.batch_size or i == idxs[-1]:
                batch_token_ids = sequence_padding(batch_token_ids)
                batch_segment_ids = sequence_padding(batch_segment_ids)
                batch_labels = sequence_padding(batch_labels)
                yield [batch_token_ids, batch_segment_ids], batch_labels
                batch_token_ids, batch_segment_ids, batch_labels = [], [], []


# 加载预训练模型
bert = build_bert_model(
    model='albert', 
    config_path=config_path,
    checkpoint_path=checkpoint_path,
    with_pool=True,
    return_keras_model=False,
)
output = Dropout(rate=0.1)(bert.model.output)
output = Dense(units=num_classes,
               activation='softmax',
               kernel_initializer=bert.initializer)(output)

model = keras.models.Model(bert.model.input, output)
model.summary()

model.compile(
    loss='sparse_categorical_crossentropy',
    optimizer=Adam(learning_rate),  # 用足够小的学习率
    # optimizer=PiecewiseLinearLearningRate(Adam(5e-5), {10000: 1, 30000: 0.1}),
    metrics=['accuracy'],
)

# 转换数据集
train_generator = data_generator(train_data, batch_size)
valid_generator = data_generator(valid_data, batch_size)
test_generator = data_generator(test_data, batch_size)


def evaluate(data):
    total, right = 0., 0.
    for x_true, y_true in data:
        y_pred = model.predict(x_true).argmax(axis=1)
        y_true = y_true[:, 0]
        total += len(y_true)
        right += (y_true == y_pred).sum()
    return right / total


class Evaluator(keras.callbacks.Callback):
    def __init__(self):
        self.best_val_acc = 0.

    def on_epoch_end(self, epoch, logs=None):
        val_acc = evaluate(valid_generator)
        if val_acc > self.best_val_acc:
            self.best_val_acc = val_acc
            model.save_weights('best_model.weights')
        test_acc = evaluate(test_generator)
        print(u'val_acc: %.5f, best_val_acc: %.5f, test_acc: %.5f\n'
              % (val_acc, self.best_val_acc, test_acc))


evaluator = Evaluator()
model.fit_generator(train_generator.forfit(),
                    steps_per_epoch=len(train_generator),
                    epochs=epochs,
                    callbacks=[evaluator])

model.load_weights('best_model.weights')
print(u'final test acc: %05f\n' % (evaluate(test_generator)))


import numpy as np
import tensorflow as tf
from holmes.models.model_base.model_base import ModelBase
from bert4keras.backend import keras, set_gelu
from bert4keras.bert import build_bert_model
from bert4keras.tokenizer import Tokenizer
from bert4keras.snippets import sequence_padding
set_gelu('tanh')  

class ALBertEmbedding():
    '''通过ALBert计算句向量
    '''
    def __init__(self, words_list=None,
                       config_path=albert_config_path, 
                       checkpoint_path = albert_checkpoint_path, 
                       dict_path = albert_dict_path,
                       albert_checkpoint_path = albert_checkpoint_path ):
        self.session = tf.Session() 
        keras.backend.set_session(self.session)
        self.bert = build_bert_model(
                     model='albert', 
                     config_path=config_path,
                     # checkpoint_path=checkpoint_path,
                     with_pool=True,
                     return_keras_model=False,)
        self.encoder = keras.models.Model(self.bert.model.inputs, self.bert.model.outputs[0])
        self.encoder.load_weights(albert_checkpoint_path, by_name=True)
        self.tokenizer = Tokenizer(dict_path, do_lower_case=True) 
    
    def init(self, words_list=None, update=True):
        token_ids_list, segment_ids_list = [], []
        for words in words_list:
            token_ids, segment_ids = self.tokenizer.encode(words)
            token_ids_list.append(token_ids)
            segment_ids_list.append(segment_ids)
        token_ids_list = sequence_padding(token_ids_list)
        segment_ids_list = sequence_padding(segment_ids_list)
        self.words_list_pre = self.encoder.predict([token_ids_list, segment_ids_list])
        self.words_list_pre = self._normalize(self.words_list_pre)
        return self

    def _normalize(self, x):
       x /= (np.array(x)**2).sum(axis=1, keepdims=True)**0.5 
       return x
    
    def _predict(self, words):
        with self.session.as_default():
            with self.session.graph.as_default():
                token_ids, segment_ids = self.tokenizer.encode( words )
                pre = self.encoder.predict([np.array([token_ids]), np.array([segment_ids])])
                pre = self._normalize(pre)
        return pre
        
    # 句向量 
    def predict(self, words):
        with self.session.as_default():
            with self.session.graph.as_default():
                token_ids, segment_ids = self.tokenizer.encode( words )
                pre = self.encoder.predict([np.array([token_ids]), np.array([segment_ids])])
                pre = self._normalize(pre)
        return np.dot( self.words_list_pre[:], pre[0] ) 

原创文章 377 获赞 835 访问量 187万+

猜你喜欢

转载自blog.csdn.net/u014365862/article/details/105915905