From the encoder to achieve MNIST Photo Gallery

  Since the encoder structure: Input -> Encoder -> Embed -> Decoder -> Output

  Compressing the input data to obtain encoded low-dimensional vector, this part is called the encoder, because it produces a low dimensional embedding or encoding. The second portion is different from the network to forward the embedded neural network output is mapped to the tag, but the inverse of the encoder, reconstructs the original input part is called the decoder.

  From the encoder is a neural network similar to the PCA, which is unsupervised learning method, the target output is input.

###加载库和配置参数###
import os
import pdb
import torch
import torchvision
from torch import nn
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets import MNIST
from torchvision.utils import save_image
from torchvision import datasets
import matplotlib.pyplot as plt

# Configuration parameters 
torch.manual_seed (. 1) # Set random seed to ensure reproducible results 
the batch_size = 128 
learning_rate = 1E-2 
num_epochs = 10

# Download the database and pre 
train_dataset = datasets.MNIST (
    the root = ' ./data ' , # position data held 
    Train = True, # training set 
    # a is in the range of [0, 255] into ranges PIL.Image [0,1.0] of torch.FloadTensor 
    Transform = transforms.ToTensor (),
    download=True
)
test_dataset=datasets.MNIST(
    root='./data',
    train=False,
    transform=transforms.ToTensor()
)
# Batch size data for the batch_size 
# in the training set, shuffle must be set to True, indicates random order 
train_loader = DataLoader (train_dataset, the batch_size = the batch_size, shuffle = True)
test_loader=DataLoader(test_dataset,batch_size=10000,shuffle=False)

# ### from the encoder model #### 
# ################# 
class autoencoder (nn.Module):
     DEF  the __init__ (Self):
        super(autoencoder,self).__init__()
        self.encoder=nn.Sequential(
            nn.Linear(28*28,1000),
            nn.ReLU(True),
            nn.Linear(1000,500),
            nn.ReLU(True),
            nn.Linear(500,250),
            nn.ReLU(True),
            nn.Linear(250,2)
        )
        self.decoder=nn.Sequential(
            nn.Linear(2,250),
            nn.ReLU(True),
            nn.Linear(250,500),
            nn.ReLU(True),
            nn.Linear(500,1000),
            nn.ReLU(True),
            nn.Linear(1000,28*28),
            nn.Tanh ()
        )
    def forward(self,x):
        x=self.encoder(x)
        x=self.decoder(x)
        return x

model = autoencoder (). cuda ()
criterion=nn.MSELoss()
optimizer=torch.optim.Adam(
    model.parameters(),lr=learning_rate,weight_decay=1e-5
)

# ## model train ### 
for Epoch in the Range (num_epochs):
     for the Data in train_loader:
        img,_=data
        img=img.view(img.size(0),-1)
        img=Variable(img).cuda()
        ###forward###
        output=model(img)
        loss=criterion(output,img)
        ###backward###
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        ####log#####
    print('epoch [{}/{}],loss:{:.4f}'.format(epoch+1,num_epochs,loss.item()))


# ## test model ### 
# Due to training and testing BatchNorm, Dropout different configuration, you need to indicate whether the model could be tested 
model.eval ()
eval_loss=0
for data in test_loader:#test set批处理
    img,label=data

    img=img.view(img.size(0),-1)

    img=Variable(img).cuda()
    with torch.no_grad():
        label=Variable(label)
    out=model(img).cuda().data.cpu().numpy()
    y=(label.data).numpy()
    plt.scatter(out[:,0],out[:,1],c=y)
    plt.colorbar()
    plt.title('autocoder of MNIST test dataset')
    plt.show()

Guess you like

Origin www.cnblogs.com/candyRen/p/12110953.html