学习笔记第八篇之rnn预测序列数据

       这两天帮师兄处理一些论文要用到的数据。数据格式是已知序列的前面标签值,预测后面每条数据的标签值。 

       我开始的思路是用sklearn这个机器学习包用Python来写一个神经网络,用已知标签的数据去训练网络模型,然后预测后面的数据。但是结果总是不对,后来发现sklearn.neural_network的MLPClassifier只能用来处理二分类问题,而我的数据标签是二分类的。所以我改为用rnn循环神经网络来处理。

       下面是我的代码:

        

#coding:utf-8
,,,
Created on 2018年1月10日
,,,
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf

#定义常量
rnn_unit=10       #hidden layer units
input_size=2
output_size=1
lr=0.0006         #学习率
#——————————————————导入数据——————————————————————
f=open('walking_data.csv')
df=pd.read_csv(f)     #读入股票数据
data_x = df.iloc[:,0:2].values  #取第3-10列
data_y = df.iloc[:15,2].values
#获取训练集
def get_train_data(batch_size=13,time_step=1,train_begin=0,train_end=15):
    batch_index=[]
    data_train=data_x[train_begin:train_end]
    # normalized_train_data=(data_train-np.mean(data_train,axis=0))/np.std(data_train,axis=0)  #标准化
    train_x,train_y=[],[]   #训练集
    for i in range(len(data_train)-time_step):
       if i % batch_size==0:
           batch_index.append(i)
       x=data_train[i:i+time_step,:]
       y=data_y[i:i+time_step,np.newaxis]
       train_x.append(x.tolist())
       train_y.append(y.tolist())
    # print normalized_train_data
    batch_index.append((len(data_train)-time_step))
    return train_x,train_y



#获取测试集
def get_test_data(time_step=1,test_begin=15):
    batch_index = []
    data_test = data_x[test_begin:]

    # normalized_train_data=(data_train-np.mean(data_train,axis=0))/np.std(data_train,axis=0)  #标准化
    test_x = []  # 训练集
    for i in range(len(data_test)):
        x = data_test[i*time_step:(i+1)*time_step, :]
        test_x.append(x.tolist())
    return test_x

# get_test_data()
#——————————————————定义神经网络变量——————————————————
#输入层、输出层权重、偏置

weights={
         'in':tf.Variable(tf.random_normal([input_size,rnn_unit])),
         'out':tf.Variable(tf.random_normal([rnn_unit,1]))
        }
biases={
        'in':tf.Variable(tf.constant(0.1,shape=[rnn_unit,])),
        'out':tf.Variable(tf.constant(0.1,shape=[1,]))
       }

#——————————————————定义神经网络变量——————————————————
def lstm(X):
    batch_size=tf.shape(X)[0]
    time_step=tf.shape(X)[1]
    w_in=weights['in']
    b_in=biases['in']
    input=tf.reshape(X,[-1,input_size])  #需要将tensor转成2维进行计算,计算后的结果作为隐藏层的输入
    input_rnn=tf.matmul(input,w_in)+b_in
    input_rnn=tf.reshape(input_rnn,[-1,time_step,rnn_unit])  #将tensor转成3维,作为lstm cell的输入
    cell=tf.nn.rnn_cell.BasicLSTMCell(rnn_unit)
    init_state=cell.zero_state(batch_size,dtype=tf.float32)
    output_rnn,final_states=tf.nn.dynamic_rnn(cell, input_rnn,initial_state=init_state, dtype=tf.float32)  #output_rnn是记录lstm每个输出节点的结果,final_states是最后一个cell的结果
    output=tf.reshape(output_rnn,[-1,rnn_unit]) #作为输出层的输入
    w_out=weights['out']
    b_out=biases['out']
    pred=tf.matmul(output,w_out)+b_out
    return pred,final_states



#——————————————————训练和预测模型——————————————————
def train_lstm(batch_size=13,time_step=1,train_begin=0,train_end=15):
    X=tf.placeholder(tf.float32, shape=[None,time_step,input_size])
    Y=tf.placeholder(tf.float32, shape=[None,time_step,output_size])
    train_x,train_y=get_train_data(batch_size=13,time_step=1,train_begin=0,train_end=15)
    with tf.variable_scope('sec_lstm'):
        pred,_=lstm(X)
    #损失函数
    max_loss = 1
    loss=tf.reduce_mean(tf.square(tf.reshape(pred,[-1])-tf.reshape(Y, [-1])))
    train_op=tf.train.AdamOptimizer(lr).minimize(loss)
    saver=tf.train.Saver(max_to_keep=1)
    with tf.Session() as sess:
        sess.run(tf.initialize_all_variables())
        #重复训练10000次
        for i in range(2000):
            _,loss_=sess.run([train_op,loss],feed_dict={X:train_x,Y:train_y})
            if loss_ < max_loss :
                max_loss = loss_
                print "保存模型:",saver.save(sess,'walking.model')
        test_x = get_test_data(time_step)
        test_predict = []
        for step in range(len(test_x)):
            prob = sess.run(pred, feed_dict={X: [test_x[step]]})
            predict = prob.reshape((-1))
            test_predict.extend(predict)
    return test_predict
if __name__ == '__main__':
    test_predict = train_lstm()
    with open('predict_data.txt', 'w') as predict_data:
        for each in test_predict:
            predict_data.writelines(str(each) + '\n')
数据是下列格式
1,2,3


0.2,300,0.338551763
0.3,300,0.351245825
0.4,300,0.360065945
0.5,300,0.367681455
0.6,1,0.36542
0.6,100,0.36952
0.6,200,0.37162
0.6,300,0.3715
0.6,400,0.37199
0.6,500,0.37162
0.6,600,0.37091
0.6,700,0.36838
0.7,300,0.372535982
0.8,300,0.372188818
0.9,300,0.373901261
0.2,1	
0.2,100	
0.2,200	
0.2,400	
0.2,500	
0.2,600	
0.2,700	
0.3,1	
0.3,100	
0.3,200	
0.3,400	
0.3,500	
0.3,600	
0.3,700	
0.4,1	
0.4,100	
0.4,200	
0.4,400	
0.4,500	
0.4,600	
0.4,700	
0.5,1	
0.5,100	
0.5,200	
0.5,400	
0.5,500	
0.5,600	
0.5,700	
0.7,1	
0.7,100	
0.7,200	
0.7,400	
0.7,500	
0.7,600	
0.7,700	
0.8,1	
0.8,100	
0.8,200	
0.8,400	
0.8,500	
0.8,600	
0.8,700	
0.9,1	
0.9,100	
0.9,200	
0.9,400	
0.9,500	
0.9,600	
0.9,700	
迭代2000次的最终的结果如下
0.364152
0.366082
0.366085
0.366085
0.366085
0.366085
0.232465
0.364304
0.366082
0.366085
0.366085
0.366085
0.366085
0.277275
0.364443
0.366082
0.366085
0.366085
0.366085
0.366085
0.322069
0.364572
0.366083
0.366085
0.366085
0.366085
0.366085
0.406077
0.364798
0.366083
0.366085
0.366085
0.366085
0.366085
0.443003
0.364898
0.366083
0.366085
0.366085
0.366085
0.366085
0.475439
0.36499
0.366083
0.366085
0.366085
0.366085
0.366085

猜你喜欢

转载自blog.csdn.net/zhangye_2017/article/details/79052043