RNN using sin cos curve prediction curve

1. We need to import library

import torch                                                                       
from torch import nn    
import numpy as np    
import matplotlib.pyplot as plt    
    
# 随机种子    
torch.manual_seed(1)    

2. Define the parameters of our ultra

# 超参数      
TIME_STEP = 10 # 时间    
INPUT_SIZE = 1 # 输入的尺寸    
LR = 0.02 # 学习率   

3. Check our goal

# 展示数据    
steps = np.linspace(0,np.pi*2,100,dtype=np.float32)    
x_np = np.sin(steps)      
y_np = np.cos(steps)    
# 展示目标的输入和输出    
plt.plot(steps,x_np,'b-',label='input(sin)')    
plt.plot(steps,y_np,'r-',label='target(cos)')    
plt.legend(loc='best')    
plt.show()   

aims

4. build our RNN

  • h_0 is our initial h_state is None, then when we finished every training session, after our first data our h_state will be saved, and then output, then the output (the previous memory) when we next training data.
  • Our argument is training more than the hidden layer, and our h_state (memory)
  • Below is a flow chart of us RNN
    flow chart
  • We note that the format of the data x (batch_size, time_size , input_size)
class RNN(nn.Module):
    def __init__(self):
        super(RNN,self).__init__()

        self.rnn = nn.RNN(
            input_size = INPUT_SIZE,    # 输入的格式
            hidden_size=32,             # 隐藏层的节点
            num_layers=1,               # 几个隐藏层
            batch_first=True,           # batch是不是第一维的
        )
        self.out = nn.Linear(32,1)

    def forward(self,x,h_state):
    # 我们把x看成每个时间的输入
        # x (batch, time_step, input_size)
        # h_state (n_layers, batch ,hidden_size)
        # r_out (batch, time_step, hidden_size)
        r_out, h_state = self.rnn(x,h_state)

        outs = []   # 储存每个预测的结果
        # 计算每个输入的输出
        for time_step in range(r_out.size(1)):    # 将每个时间点的数据输入到隐藏层中去
            outs.append(self.out(r_out[:,time_step,:]))
        # 让结果叠加  outs的列表里面必须是Tensor形式
        return torch.stack(outs,dim=1),h_state

# 初始化RNN
rnn = RNN()
print(rnn)
optimizer = torch.optim.Adam(rnn.parameters(),lr=LR)
loss_func = nn.MSELoss()

5. Training Our RNN

Precautions

  • Do not forget to pass initialization state, and the state of
  • Do not forget to change the format of the input data
h_state = None   # 初始化状态

plt.figure(1,figsize=(12,5))
# 图出来了,可以继续画
plt.ion()

for step in range(100):
    start, end = step*np.pi ,(step+1)*np.pi  # time range
    # 使用sin 来预测 cos
    steps = np.linspace(start, end, TIME_STEP,dtype=np.float32,endpoint=False)

    x_np = np.sin(steps)
    y_np = np.cos(steps)

    # shape (batch , time_step, input_size)
    x = torch.from_numpy(x_np[np.newaxis,:,np.newaxis])
    y = torch.from_numpy(y_np[np.newaxis,:,np.newaxis])

    prediction,h_state = rnn(x,h_state)
    # !! 下一步很重要
    h_state = h_state.data  # 重新打包隐藏状态,断开上次迭代

    loss = loss_func(prediction,y)  # 计算误差
    optimizer.zero_grad()       # 梯度清零
    loss.backward()             # 误差返回
    optimizer.step()            # 优化器优化误差

    # 画图
    plt.plot(steps,y_np.flatten(),'r-')   # flatten 扁平化 [[1,2]] --> [1,2]
    plt.plot(steps,prediction.data.numpy().flatten(),'b-')
    plt.draw();plt.pause(0.05)

plt.ioff()
plt.show()    
Published 31 original articles · won praise 13 · views 9904

Guess you like

Origin blog.csdn.net/qq_43497702/article/details/97929872