【AI】_02_RNN_实际例子

【一】数据准备 and 预处理
  • 数据
import numpy as np
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt

# normalize the dataset between 0 and 1 with sklearn preprocessing
T = np.arange(0, 100, 0.1)
dataset = np.sin(T)
scaler = MinMaxScaler()
# reshape 第一个是样本数,第二个是特征数
dataset = scaler.fit_transform(dataset.reshape(-1, 1)).ravel()
  • 划分 训练集测试集
# convert an array of values into a dataset matrix x and y
def create_dataset(dataset, look_back=2):
    dataX, dataY = [], []
    for i in range(len(dataset)-look_back):
        dataX.append(dataset[i])
        dataY.append(dataset[i + look_back])
    return numpy.array(dataX), numpy.array(dataY)
# split into train and test sets
train_size = int(len(dataset) * 0.7)
test_size = len(dataset) - train_size
train, test = dataset[: train_size], dataset[train_size:]
trainX, trainY = create_dataset(train)
testX, testY = create_dataset(test)  
# resize_(时间序列数,batch_size,特征数)
trainX = torch.from_numpy(trainX).float().resize_(698, 1, 1)
trainY = torch.from_numpy(trainY).float().resize_(698, 1, 1)

testX = torch.from_numpy(testX).float().resize_(298, 1, 1)
testY = torch.from_numpy(testY).float().resize_(298, 1, 1)

plt.plot(trainX.numpy().ravel()[:30], 'b--')
plt.plot(trainY.numpy().ravel()[:30], 'r')
【二】线性模型
  • 建立一个模型, 这个模型的内容是 y = wx + b, 求参数 wb 看输出的形态
import torch.nn as nn

class Model(nn.Module):

    def __init__(self, input_size, output_size):
        super(Model, self).__init__()
        # define parameters of one layer network
        self.fc = nn.Linear(1, 1)
        
    def forward(self, input):
        output = self.fc(input)
        return output
  • 开始 训练,打印 loss
learning_rate = 0.02 # 学习速率
for k in range (100):
    loss = 0
    for i in range (trainX.size()[0]):
        x = Variable(trainX[i]) 
        y = model(x)
        target = Variable(trainY[i])
        loss  += (y - target) ** 2 
    # train the model 
    loss = loss/trainX.size()[0]
    loss.backward() # 回传
    for p in model.parameters():
        p.data -= learning_rate * p.grad.data # 参数更新
        p.grad.data.zero_() # 梯度更新
	# model.zero_grad()
    print (loss)
  • 结果预测(用蓝色的线 X 拟合 红色的线 Y
Y = []
for i in range (trainY.size()[0]):
        x = Variable(trainY[i])
        y = model(x)
        Y.append(y.data.numpy())
        
plt.figure()
plt.plot(np.array(Y).ravel(), 'r')
plt.ylim(0, 1)

Y_real = [y.numpy() for y in trainY] 
Y_real = np.array(Y_real).ravel()

plt.plot(Y_real, 'b--')
plt.show()
【三】神经网络模型
  • 预测下一步曲线的 变化 而不是 它本身,这是一个实际 需要记忆 的任务,或者说需要 “不能够通过上一步的数值直接预测”
  • 需要 存储差值 进行记忆
# convert an array of values into a dataset matrix
def create_dataset(dataset, look_back=1):
    dataX, dataY = [], []
    for i in range(len(dataset)-look_back):
        dataX.append(dataset[i])
        # 存储差值
        dataY.append(dataset[i + look_back] - dataset[i])
    return numpy.array(dataX), numpy.array(dataY)
  • 分割 测试集训练集,绘图,可以看到 trainx 是 sin 走势,而 trainy 是 cos 走势
# reshape into X=t and Y=t+1
look_back = 1
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
# sin
plt.plot((trainX - trainX.mean())[:60]*0.1, 'b--', label = 'trainx')
# cos
plt.plot(trainY[:60], 'r', label = 'trainy')
plt.legend()
  • 将数据转化成 pytorch 格式
trainX = torch.from_numpy(trainX).float().resize_(699, 1, 1)
trainY = torch.from_numpy(trainY).float().resize_(699, 1, 1)

testX = torch.from_numpy(testX).float().resize_(299, 1, 1)
testY = torch.from_numpy(testY).float().resize_(299, 1, 1)
  • 建立一个 单隐层 的神经网络, 观察这一次 x 是否可以完美的预测 y
class Model(nn.Module):
    
    def __init__(self, input_size, hidden_size, output_size):
        super(Model, self).__init__()
        # 定义一个单一隐层的神经网络之模型参数
        self.i2h = nn.Parameter(torch.randn(input_size, hidden_size))
        self.h2o = nn.Parameter(torch.randn(hidden_size, output_size))
        self.bh = nn.Parameter(torch.randn(1, hidden_size))
        self.bo = nn.Parameter(torch.randn(1, output_size))

    def forward(self, inputs):
        # dim should be same except catting dimension
        hidden = F.relu(inputs.matmul(self.i2h) + self.bh)
        output = hidden.matmul(self.h2o) + self.bo 

        return output

model = Model(1, 64, 1)
  • 开始训练
learning_rate = 1e-2
for k in range (500):
    loss = 0
    for i in range (trainX.size()[0]):
        x = Variable(trainX[i])
        y = model(x)
        loss += (y - Variable(trainY[i]))** 2 
    loss = loss/trainX.size()[0]
    loss.backward(retain_graph=True)
    for p in model.parameters():
        p.data.add_(-learning_rate, p.grad.data)
    model.zero_grad()
    print (loss)
  • 无论是一层还是两层的神经网络,都 不能 顺利得到合适的解,这是一个 不能用线性映射 解决的问题, 一个线性的变换无法对 历史信息 进行综合, 拟合不能顺利进行
Y = []

for i in range (testX.size()[0]):
        x = Variable(testX[i])
        y = model(x)
        Y.append(y.data.numpy())

plt.figure()
plt.plot(np.array(Y).ravel(), 'r', label = 'predicted')
Y_real = [y.numpy() for y in testY] 
Y_real = np.array(Y_real).ravel()
plt.plot(Y_real, 'b--', label = 'real y')
plt.legend()

plt.figure()
plt.plot(np.array(Y).ravel(), 'r', label = 'predicted')
X_real = [x.numpy() for x in testX] 
X_real = np.array(X_real).ravel()
plt.plot((X_real - X_real.mean()) * 0.1, 'g--', label = 'x')
plt.legend()
    
【四】RNN
  • RNN 网络架构
# create and fit a RNN network   
class RNN(nn.Module):
    
    def __init__(self, input_size, hidden_size, output_size):
        super(RNN, self).__init__()

        self.hidden_size = hidden_size

        self.i2h = nn.Parameter(torch.randn(input_size, hidden_size))
        self.h2h = nn.Parameter(torch.randn(hidden_size, hidden_size))
        self.h2o = nn.Parameter(torch.randn(hidden_size, output_size))
        self.bh  =  nn.Parameter(torch.zeros(1, hidden_size))
        self.bo = nn.Parameter(torch.zeros(1, output_size))

    def forward(self, input, hidden):
        # dim should be same except catting dimension
        hidden = F.tanh(input.matmul(self.i2h) + hidden.matmul(self.h2h) + self.bh)
        output = hidden.matmul(self.h2o) + self.bo    
        return output, hidden

    def initHidden(self):
        return Variable(torch.zeros(1, self.hidden_size))
  • 直接调用 nn.RNN,对 整个序列 进行处理
class RNN(nn.Module):
	def __init__(self, input_size, hidden_size, output_size):
		super(RNN, self).__init__()
		self.rnn = nn.RNN(
			input_size = input_size,
			hidden_size = hidden_size,
			num_layers = 1, # 有几层 RNN Layers
			batch_first = True, # 批量,为第一个维度
			)
		self.out = nn.Linear(hidden_size, 1)
	def forward(self, x, h_state):
		# x (batch, time_step, input_size)
		# r_out (batch, time_step, output_size)
		# h_state (n_layers*n_directions, batch, hidden_size)
		r_out, h_state = self.rnn(x, h_state) # h_state 也要作为 RNN 的一个输入
		# 这一步是关键,此处需要能够去除所有的 rnn 中间值,也就是对整个 sequence 的时间编码
		outs = []
		for time_step in range(r_out.size(1)):
			outs.append(self.out(r_out[:, time_step, :]))
		return torch.stack(outs, dim=1), h_state
  • 开始训练
rnn = RNN(1, 32, 1)
def train(epochs):
	hidden = Variable(torch.randn(2, 1, 32))
	criterion = nn.MSELoss()
	optimizer = torch.optim.Adam(rnn.parameters(), lr = 1e-2)
	for i in range(epochs):
		running_loss = 0
		for idx, (x, target) in enumerate(train_loader):
			x, target = Variable(x).resize(2, 100, 1), Variable(target).resize(2, 100, 1)
			y, hidden = rnn(x, hidden)
			loss = criterion(y, target)
			# 回传
			loss.backward(retain_graph = True)
			running_loss += loss
			# 优化
			optimizer.step()
			# 梯度清零
			optimizer.zero_grad()
		print(running_loss)
发布了57 篇原创文章 · 获赞 5 · 访问量 2889

猜你喜欢

转载自blog.csdn.net/qq_34330456/article/details/99103270
RNN