生成对抗网络(DCGAN, LSGAN, WGAN, WGAN-GP, SNGAN, RSGAN)TensorFlow实现

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/Geoffrey_MT/article/details/81198504

Paper:

DCGAN: Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks

WGAN: Wasserstein GAN

WGAN-GP:  Improved Training of Wasserstein GANs

LSGAN:  Least Squares Generative Adversarial Networks

SNGAN:  Spectral normalization for generative adversarial networks

RSGAN:  The relativistic discriminator: a key element missing from standard GAN

损失函数如下表所示:

DCGAN D=-E_{x\sim pdata(x)}[log(D(x)]-E_{z\sim p(z)}[log(1-D(G(z)))]                  G=-E_{z\sim p(z)}[log(D(G(z)))]
WGAN D=-E_{x\sim pdata(x)}[D(x)]+E_{z\sim p(z)}[D(G(z))]                  G=-E_{z\sim p(z)}[D(G(z))]
WGAN-GP D=-E_{x\sim pdata(x)}[D(x)]+E_{z\sim p(z)}[D(G(z))]+\lambda (\left \|\nabla _{\hat{x}}D_{w}(\hat{x}) \right \|)                  G=-E_{z\sim p(z)}[D(G(z))]
LSGAN D=E_{x\sim pdata(x)}[0.5(D(x)-1)^{2}]+E_{z\sim p(z)}[0.5(D(G(z)))^{2})]                  G=E_{z\sim p(z)}[0.5(D(G(z))-1)^{2}]
SNGAN D=-E_{x\sim pdata(x)}[log(D(x)]-E_{z\sim p(z)}[log(1-D(G(z)))]                  G=-E_{z\sim p(z)}[log(D(G(z)))]     
RSGAN

D=-E_{(x_{r},x_{f})\sim (P,Q)}[log(sigmoid(D(x)-D(G(z)))))]

D=-E_{(x_{r},x_{f})\sim (P,Q)}[log(sigmoid(D(G(z))-D(x))))]

五种GAN不同迭代次数生成样本,对比结果如下图所示:

代码具体请参看我的Github:

https://github.com/MingtaoGuo/DCGAN_WGAN_WGAN-GP_LSGAN_SNGAN_RSGAN_RaSGAN_TensorFlow

class GAN:
    #Architecture of generator and discriminator just like DCGAN.
    def __init__(self):
        self.Z = tf.placeholder("float", [batchsize, 100])
        self.img = tf.placeholder("float", [batchsize, img_H, img_W, img_C])
        D = Discriminator("discriminator")
        G = Generator("generator")
        self.fake_img = G(self.Z)
        if GAN_type == "DCGAN":
            #DCGAN, paper: UNSUPERVISED REPRESENTATION LEARNING WITH DEEP CONVOLUTIONAL GENERATIVE ADVERSARIAL NETWORKS
            self.fake_logit = tf.nn.sigmoid(D(self.fake_img))
            self.real_logit = tf.nn.sigmoid(D(self.img, reuse=True))
            self.d_loss = - (tf.reduce_mean(tf.log(self.real_logit + epsilon)) + tf.reduce_mean(tf.log(1 - self.fake_logit + epsilon)))
            self.g_loss = - tf.reduce_mean(tf.log(self.fake_logit + epsilon))
            self.opt_D = tf.train.AdamOptimizer(2e-4, beta1=0.5).minimize(self.d_loss, var_list=D.var)
            self.opt_G = tf.train.AdamOptimizer(2e-4, beta1=0.5).minimize(self.g_loss, var_list=G.var)
        elif GAN_type == "WGAN":
            #WGAN, paper: Wasserstein GAN
            self.fake_logit = D(self.fake_img)
            self.real_logit = D(self.img, reuse=True)
            self.d_loss = -tf.reduce_mean(self.real_logit) + tf.reduce_mean(self.fake_logit)
            self.g_loss = -tf.reduce_mean(self.fake_logit)
            self.clip = []
            for _, var in enumerate(D.var):
                self.clip.append(tf.clip_by_value(var, -0.01, 0.01))
            self.opt_D = tf.train.RMSPropOptimizer(5e-5).minimize(self.d_loss, var_list=D.var)
            self.opt_G = tf.train.RMSPropOptimizer(5e-5).minimize(self.g_loss, var_list=G.var)
        elif GAN_type == "WGAN-GP":
            #WGAN-GP, paper: Improved Training of Wasserstein GANs
            self.fake_logit = D(self.fake_img)
            self.real_logit = D(self.img, reuse=True)
            e = tf.random_uniform([batchsize, 1, 1, 1], 0, 1)
            x_hat = e * self.img + (1 - e) * self.fake_img
            grad = tf.gradients(D(x_hat, reuse=True), x_hat)[0]
            self.d_loss = tf.reduce_mean(self.fake_logit - self.real_logit) + 10 * tf.reduce_mean(tf.square(tf.sqrt(tf.reduce_sum(tf.square(grad), axis=[1, 2, 3])) - 1))
            self.g_loss = tf.reduce_mean(-self.fake_logit)
            self.opt_D = tf.train.AdamOptimizer(1e-4, beta1=0., beta2=0.9).minimize(self.d_loss, var_list=D.var)
            self.opt_G = tf.train.AdamOptimizer(1e-4, beta1=0., beta2=0.9).minimize(self.g_loss, var_list=G.var)
        elif GAN_type == "LSGAN":
            #LSGAN, paper: Least Squares Generative Adversarial Networks
            self.fake_logit = D(self.fake_img)
            self.real_logit = D(self.img, reuse=True)
            self.d_loss = tf.reduce_mean(0.5 * tf.square(self.real_logit - 1) + 0.5 * tf.square(self.fake_logit))
            self.g_loss = tf.reduce_mean(0.5 * tf.square(self.fake_logit - 1))
            self.opt_D = tf.train.AdamOptimizer(2e-4, beta1=0.5).minimize(self.d_loss, var_list=D.var)
            self.opt_G = tf.train.AdamOptimizer(2e-4, beta1=0.5).minimize(self.g_loss, var_list=G.var)
        elif GAN_type == "SNGAN":
            #SNGAN, paper: SPECTRAL NORMALIZATION FOR GENERATIVE ADVERSARIAL NETWORKS
            self.fake_logit = tf.nn.sigmoid(D(self.fake_img, is_sn=True))
            self.real_logit = tf.nn.sigmoid(D(self.img, reuse=True, is_sn=True))
            self.d_loss = - (tf.reduce_mean(tf.log(self.real_logit + epsilon) + tf.log(1 - self.fake_logit + epsilon)))
            self.g_loss = - tf.reduce_mean(tf.log(self.fake_logit + epsilon))
            self.opt_D = tf.train.AdamOptimizer(2e-4, beta1=0.5).minimize(self.d_loss, var_list=D.var)
            self.opt_G = tf.train.AdamOptimizer(2e-4, beta1=0.5).minimize(self.g_loss, var_list=G.var)
        elif GAN_type == "RSGAN":
            # RSGAN, paper: The relativistic discriminator: a key element missing from standard GAN
            self.fake_logit = D(self.fake_img)
            self.real_logit = D(self.img, reuse=True)
            self.d_loss = - tf.reduce_mean(tf.log(tf.nn.sigmoid(self.real_logit - self.fake_logit) + epsilon))
            self.g_loss = - tf.reduce_mean(tf.log(tf.nn.sigmoid(self.fake_logit - self.real_logit) + epsilon))
            self.opt_D = tf.train.AdamOptimizer(2e-4, beta1=0.5).minimize(self.d_loss, var_list=D.var)
            self.opt_G = tf.train.AdamOptimizer(2e-4, beta1=0.5).minimize(self.g_loss, var_list=G.var)
        elif GAN_type == "RaHingeGAN":
            self.fake_logit = D(self.fake_img)
            self.real_logit = D(self.img, reuse=True)
            d_tiled_r = self.real_logit - tf.reduce_mean(self.fake_logit, 0)
            d_tiled_f = self.fake_logit - tf.reduce_mean(self.real_logit, 0)
            self.d_loss = tf.reduce_mean(tf.maximum(0., 1. - d_tiled_r)) + tf.reduce_mean(tf.maximum(0., 1. + d_tiled_f))
            self.g_loss = tf.reduce_mean(tf.maximum(0., 1. - d_tiled_f)) + tf.reduce_mean(tf.maximum(0., 1. + d_tiled_r))
            self.opt_D = tf.train.AdamOptimizer(2e-4, beta1=0.5).minimize(self.d_loss, var_list=D.var)
            self.opt_G = tf.train.AdamOptimizer(2e-4, beta1=0.5).minimize(self.g_loss, var_list=G.var)
        elif GAN_type == "RSGAN-GP":
            self.fake_logit = D(self.fake_img)
            self.real_logit = D(self.img, reuse=True)
            e = tf.random_uniform([batchsize, 1, 1, 1], 0, 1)
            x_hat = e * self.img + (1 - e) * self.fake_img
            grad = tf.gradients(D(x_hat, reuse=True), x_hat)[0]
            self.d_loss = - tf.reduce_mean(tf.log(tf.nn.sigmoid(self.real_logit - self.fake_logit) + epsilon)) + 10 * tf.reduce_mean(tf.square(tf.sqrt(tf.reduce_sum(tf.square(grad), axis=[1, 2, 3])) - 1))
            self.g_loss = - tf.reduce_mean(tf.log(tf.nn.sigmoid(self.fake_logit - self.real_logit) + epsilon))
            self.opt_D = tf.train.AdamOptimizer(2e-4, beta1=0.5).minimize(self.d_loss, var_list=D.var)
            self.opt_G = tf.train.AdamOptimizer(2e-4, beta1=0.5).minimize(self.g_loss, var_list=G.var)
        self.sess = tf.Session()
        self.sess.run(tf.global_variables_initializer())

猜你喜欢

转载自blog.csdn.net/Geoffrey_MT/article/details/81198504
今日推荐