变分自编码VAE实战

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data',one_hot=True)
import matplotlib.pyplot as plt
import numpy as np

class Net:
    def __init__(self):
        self.x = tf.placeholder(dtype=tf.float32,shape=[None,784])#为要仿照的样本预留位置
        #构造一个编码器和一个解码器
        self.encode = Encoder()
        self.decode = Decoder()
    """
    在前向计算中,需要获得样本的真实分布的均值和方差
    然后生成一些随机数来使得其分布尽量接近样本的分布
    """
    def forward(self):
        #通过编码器去获得样本分布的均值和对数似然
        self.mean,self.logVar = self.encode.forward(self.x)
        #获得方差和标准差
        self.Var = tf.exp(self.logVar)
        self.std = tf.sqrt(self.Var)
        #生成标准正态分布的随机数
        normal_y = tf.random_normal(shape=[128])#128是一个超参,因为在编码获得均值和方差的时候返回的是128
        y = self.mean+self.std*normal_y#在原样本均值的基础上添加一些噪声,而这个噪声就是随机数乘上样本的标准差
        #将得到的数进行解码得到输出
        self.output = self.decode.forward(y)
    """
    得到两个损失,分别是输出损失和KL损失
    然后再将两个损失相加得到总的损失,将总的损失拿过去优化
    """
    def backward(self):
        out_loss = tf.reduce_mean((self.output-self.x)**2)
        kl_loss = tf.reduce_mean(0.5*(-self.logVar+self.mean**2+self.Var-1))#(-log(var)+u^2+var-1)/2
        self.loss = out_loss+kl_loss
        self.opt = tf.train.AdamOptimizer().minimize(self.loss)
    """用于生成随机数带进生成网络进行测试看看效果好坏"""
    def Decode(self):
        normal_x = tf.random_normal(shape=[1,128])#[1,128]而不是[None,128]是为了生成一张图片用于检验网络训练的效果
        return self.decode.forward(normal_x)
class Encoder:
    def __init__(self):
        self.w = tf.Variable(tf.truncated_normal(dtype=tf.float32,shape=[784,100],stddev=0.1))
        self.b = tf.Variable(tf.zeros([100]))
        self.logvar_w = tf.Variable(tf.random_normal(dtype=tf.float32,shape=[100,128],stddev=0.1))
        self.mean_w = tf.Variable(tf.random_normal(dtype=tf.float32,shape=[100,128],stddev=0.1))
    def forward(self,x):
        #传进来的x是真实的样本,需要获得它的均值和方差的对数,而形状是[None,784]
        y = tf.nn.relu(tf.matmul(x,self.w)+self.b)#[None,100]
        mean = tf.matmul(y,self.mean_w)#[None,128]
        logvar = tf.matmul(y,self.logvar_w)
        return mean,logvar
class Decoder:
    #任务是将[None,128]的数据换回到[None,784]
    def __init__(self):
        self.w = tf.Variable(tf.random_normal(dtype=tf.float32,shape=[128,100],stddev=0.1))
        self.b = tf.Variable(tf.zeros([100]))
        self.out_w = tf.Variable(tf.random_normal(dtype=tf.float32,shape=[100,784],stddev=0.1))
    def forward(self,x):
        y = tf.nn.relu(tf.matmul(x,self.w)+self.b)
        return tf.matmul(y,self.out_w)
if __name__ == '__main__':
    net = Net()
    net.forward()
    net.backward()
    init = tf.global_variables_initializer()
    test = net.Decode()
    with tf.Session() as sess:
        sess.run(init)
        plt.ion()
        for i in range(10000):
            x,_ = mnist.train.next_batch(100)
            loss,_,out = sess.run([net.loss,net.opt,net.output],feed_dict={net.x:x})
            #每训练100次就做一下测试看一下效果
            if i%100 == 0:
                print(loss)
                test_img = sess.run([test])
                test_img = np.reshape(test_img,[28,28])
                plt.imshow(test_img)
                plt.pause(0.1)

猜你喜欢

转载自blog.csdn.net/weixin_38241876/article/details/85600995