Deep AutoEncoder 和 CDL代码

Training Deep AutoEncoders for Collaborative Filtering 2017

Deep AutoEncoder 核心思想是希望能通过反向传播训练网络最小化输入和输出的误差,从而完成对未知的电影进行预估。

import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
import tensorflow as tf
df = pd.read_csv('ratings.dat', sep='\t', names=['user', 'item', 'rating', 'timestamp'], header=None)
num_items = df.item.nunique()
num_users = df.user.nunique()
print("USERS: {} ITEMS: {}".format(num_users, num_items))

# 对输入做Normalization
from sklearn import preprocessing
r = df['rating'].values.astype(float)
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(r.reshape(-1,1))
df['rating'] = pd.DataFrame(x_scaled)
#转化填充
# 把DataFrame转成user-item矩阵
matrix = df.pivot(index='user', columns='item', values='rating')#DataFrame转成user-item矩阵
matrix.fillna(0, inplace=True)

users = matrix.index.tolist()
items = matrix.columns.tolist()
matrix = matrix.as_matrix()
#网络
num_input = num_items
num_hidden_1 = 10
num_hidden_2 = 5
隐层的变量初始化
weights = {
    'encoder_h1': tf.Variable(tf.random_normal([num_input, num_hidden_1], dtype=tf.float64)),
    'encoder_h2': tf.Variable(tf.random_normal([num_hidden_1, num_hidden_2], dtype=tf.float64)),
    'decoder_h1': tf.Variable(tf.random_normal([num_hidden_2, num_hidden_1], dtype=tf.float64)),
    'decoder_h2': tf.Variable(tf.random_normal([num_hidden_1, num_input], dtype=tf.float64)),
}

biases = {
    'encoder_b1': tf.Variable(tf.random_normal([num_hidden_1], dtype=tf.float64)),
    'encoder_b2': tf.Variable(tf.random_normal([num_hidden_2], dtype=tf.float64)),
    'decoder_b1': tf.Variable(tf.random_normal([num_hidden_1], dtype=tf.float64)),
    'decoder_b2': tf.Variable(tf.random_normal([num_input], dtype=tf.float64)),
}
#构建
def encoder(x):
    layer_1 = tf.nn.relu(tf.add(tf.matmul(x, weights['encoder_h1']), biases['encoder_b1']))#input*weight+biases
    layer_2 = tf.nn.relu(tf.add(tf.matmul(layer_1, weights['encoder_h2']), biases['encoder_b2']))
    return layer_2

def decoder(x):
    layer_1 = tf.nn.relu(tf.add(tf.matmul(x, weights['decoder_h1']), biases['decoder_b1']))
    layer_2 =  tf.nn.relu(tf.add(tf.matmul(layer_1, weights['decoder_h2']), biases['decoder_b2']))
    return layer_2

X = tf.placeholder(tf.float64, [None, num_input])
encoder_op = encoder(X)
decoder_op = decoder(encoder_op)
y_pred = decoder_op
y_true = X
loss = tf.losses.mean_squared_error(y_true, y_pred)
optimizer = tf.train.RMSPropOptimizer(0.03).minimize(loss)
eval_x = tf.placeholder(tf.int32, )
eval_y = tf.placeholder(tf.int32, )
pre, pre_op = tf.metrics.precision(labels=eval_x, predictions=eval_y)

init = tf.global_variables_initializer()
local_init = tf.local_variables_initializer()

predictions = pd.DataFrame()
with tf.Session() as session:
    epochs = 100
    batch_size = 250

    session.run(init)
    session.run(local_init)

    num_batches = int(matrix.shape[0] / batch_size)
    matrix = np.array_split(matrix, num_batches)

    for i in range(epochs):

        avg_cost = 0

        for batch in matrix:
            _, l = session.run([optimizer, loss], feed_dict={X: batch})
            avg_cost += l

        avg_cost /= num_batches

        print("Epoch: {} Loss: {}".format(i + 1, avg_cost))

    print("Predictions...")

    matrix = np.concatenate(matrix, axis=0)

    preds = session.run(decoder_op, feed_dict={X: matrix})

    predictions = predictions.append(pd.DataFrame(preds))

    predictions = predictions.stack().reset_index(name='rating')
    predictions.columns = ['user', 'item', 'rating']
    predictions['user'] = predictions['user'].map(lambda value: users[value])
    predictions['item'] = predictions['item'].map(lambda value: items[value])
#top10推荐
df = pd.read_csv('ratings.dat', sep='\t', names=['user', 'item', 'rating', 'timestamp'], header=None)
rateditem=list(df[df['user']==10]['item'])
df2=predictions[predictions['user']==10]
df2.sort_values("rating",inplace=True,ascending=False)
result=[]
count=0
for i in df2['item']:
    if i not in rateditem:
        result.append(i)
        count+=1
        if count==10:
            break

CDL(协同深度学习)——一种基于深度学习的推荐系统

  论文要点

(1).   CDL可抽取content的深度特征,并捕获content或者user的相似度。这种学习方式不仅可以用于推荐和别的地方。

(2).   学习目标不是简单的分类或者reconstruction,本文的目标是通过概率框架用CF做一个更复杂的目标。

(3).   用了最大后验估计(MAP),CDL贝叶斯法则的抽样,贝叶斯版本的反向传播。

(4).   用贝叶斯模型连接DL和RS
 

class CollaborativeDeepLearning:
    
    def __init__(self, item_mat, hidden_layers):
        '''
        hidden_layers = a list of three integer indicating the embedding dimension of autoencoder
        item_mat = item feature matrix with shape (# of item, # of item features)
        '''
        assert(len(hidden_layers)==3)
        self.item_mat = item_mat
        self.hidden_layers = hidden_layers
        self.item_dim = hidden_layers[0]
        self.embedding_dim = hidden_layers[-1]
        
    def pretrain(self, lamda_w=0.1, encoder_noise=0.1, dropout_rate=0.1, activation='sigmoid', batch_size=64, epochs=10):
        '''
        layer-wise pretraining on item features (item_mat)
        '''
        self.trained_encoders = []
        self.trained_decoders = []
        X_train = self.item_mat
        
        for input_dim, hidden_dim in zip(self.hidden_layers[:-1], self.hidden_layers[1:]):
            
            pretrain_input = Input(shape=(input_dim,))
            encoded = GaussianNoise(stddev=encoder_noise)(pretrain_input)
            encoded = Dropout(dropout_rate)(encoded)
            encoder = Dense(hidden_dim, activation=activation, kernel_regularizer=l2(lamda_w), bias_regularizer=l2(lamda_w))(encoded)
            decoder = Dense(input_dim, activation=activation, kernel_regularizer=l2(lamda_w), bias_regularizer=l2(lamda_w))(encoder)
            
            # autoencoder
            ae = Model(inputs=pretrain_input, outputs=decoder)
            
            # encoder
            ae_encoder = Model(inputs=pretrain_input, outputs=encoder)
            
            # decoder
            encoded_input = Input(shape=(hidden_dim,))
            
            decoder_layer = ae.layers[-1] # the last layer
            ae_decoder = Model(encoded_input, decoder_layer(encoded_input))

            ae.compile(loss='mse', optimizer='rmsprop')
            ae.fit(X_train, X_train, batch_size=batch_size, epochs=epochs, verbose=2)

            self.trained_encoders.append(ae_encoder)
            self.trained_decoders.append(ae_decoder)
            X_train = ae_encoder.predict(X_train)

    def fineture(self, train_mat, test_mat, lamda_u=0.1, lamda_v=0.1, lamda_n=0.1, lr=0.001, batch_size=64, epochs=10):
        '''
        Fine-tuning with rating prediction
        '''
        num_user = int( max(train_mat[:,0].max(), test_mat[:,0].max()) + 1 )
        num_item = int( max(train_mat[:,1].max(), test_mat[:,1].max()) + 1 )

        # item autoencoder 
        itemfeat_InputLayer = Input(shape=(self.item_dim,), name='item_feat_input')
        encoded = self.trained_encoders[0](itemfeat_InputLayer)
        encoded = self.trained_encoders[1](encoded)
        decoded = self.trained_decoders[1](encoded)
        decoded = self.trained_decoders[0](decoded)

        # user embedding
        user_InputLayer = Input(shape=(1,), dtype='int32', name='user_input')
        user_EmbeddingLayer = Embedding(input_dim=num_user, output_dim=self.embedding_dim,
                                        input_length=1, name='user_embedding', 
                                        embeddings_regularizer=l2(lamda_u),
                                        embeddings_initializer=RandomNormal(mean=0, stddev=1))(user_InputLayer)
        user_EmbeddingLayer = Flatten(name='user_flatten')(user_EmbeddingLayer)

        # item embedding
        item_InputLayer = Input(shape=(1,), dtype='int32', name='item_input')
        item_OffsetVector = Embedding(input_dim=num_item, output_dim=self.embedding_dim, 
                                      input_length=1, name='item_offset_vector',
                                      embeddings_regularizer=l2(lamda_v), 
                                      embeddings_initializer=RandomNormal(mean=0, stddev=1))(item_InputLayer)
        item_OffsetVector = Flatten(name='item_flatten')(item_OffsetVector)
        item_EmbeddingLayer = Add()([encoded, item_OffsetVector]) 
        
        # rating prediction
        dotLayer = Dot(axes = -1, name='dot_layer')([user_EmbeddingLayer, item_EmbeddingLayer])

        self.cdl_model = Model(inputs=[user_InputLayer, item_InputLayer, itemfeat_InputLayer], outputs=[dotLayer, decoded])
        self.cdl_model.compile(optimizer='rmsprop', loss=['mse', 'mse'], loss_weights=[1, lamda_n])

        train_user, train_item, train_item_feat, train_label = self.matrix2input(train_mat)
        test_user, test_item, test_item_feat, test_label = self.matrix2input(test_mat)

        model_history = self.cdl_model.fit([train_user, train_item, train_item_feat], 
                                           [train_label, train_item_feat], 
                                           epochs=epochs, batch_size=batch_size, 
                                           validation_data=([test_user, test_item, test_item_feat], [test_label, test_item_feat]))
        return model_history

    def matrix2input(self, rating_mat):
        train_user = rating_mat[:, 0].reshape(-1, 1).astype(int)
        train_item = rating_mat[:, 1].reshape(-1, 1).astype(int)
        train_label = rating_mat[:, 2].reshape(-1, 1)
        train_item_feat = [self.item_mat[train_item[x]][0] for x in range(train_item.shape[0])]
        return train_user, train_item, np.array(train_item_feat), train_label
    
    def build(self):
        # rating prediction
        prediction_layer = Dot(axes = -1, name='prediction_layer')([user_EmbeddingLayer, encoded])
        self.model = Model(inputs=[user_InputLayer, itemfeat_InputLayer], outputs=[prediction_layer])
        
    def getRMSE(self, test_mat):
        test_user, test_item, test_item_feat, test_label = self.matrix2input(test_mat)
        pred_out = self.cdl_model.predict([test_user, test_item, test_item_feat])
        return np.sqrt(np.mean(np.square(test_label.flatten() - pred_out[0].flatten())))

代码运行 

# To Run
model = CollaborativeDeepLearning(item_mat, [item_feat_dim, 16, 8])
model.pretrain(lamda_w=0.001, encoder_noise=0.3, epochs=10)
model_history = model.fineture(train_mat, test_mat, lamda_u=0.01, lamda_v=0.1, lamda_n=0.1, lr=0.01, epochs=4)
model.getRMSE(test_mat)
发布了156 篇原创文章 · 获赞 28 · 访问量 3万+

猜你喜欢

转载自blog.csdn.net/serenysdfg/article/details/104564567