Tensorflow - 生成批量数据 - 方法2

上一篇笔记,Tensorflow - 将序列处理成embedding - 方法1 - keras调包,也介绍了如何生成批量数据,适合处理文本序列数据,用于分类,可以视为      Tensorflow - 生成批量数据 - 方法1 。 

此篇笔记,Example 1 适合用序列数据做线性回归预测, Example 2 适合用文本序列做分类

具体做法也不同:

上一篇笔记,每次只yield一个batch的数据;

此次,一次性生成一个epoch的所有数据。

Example 1: 

#coding=utf-8
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf

rnn_unit=10       #hidden layer units
input_size=7
output_size=1
lr=0.0006
f=open('D:/DL/lstm_blog_case/stock_dataset/dataset_2.csv')
df = pd.read_csv(f)     # 读入股票数据
data=df.iloc[:, 2:10].values  # 取所有行的,第3-10列


# 获取训练集
def get_train_data(batch_size=60,time_step=20,train_begin=0,train_end=5800):
    batch_index=[]
    data_train=data[train_begin:train_end]
    normalized_train_data=(data_train-np.mean(data_train,axis=0))/np.std(data_train,axis=0)  #标准化
    train_x, train_y = [],[]   #训练集
    for i in range(len(normalized_train_data)-time_step):
        if i % batch_size==0:
            batch_index.append(i)
        x=normalized_train_data[i:i+time_step, :7]
        # [[-0.13319847 -0.15049982 -0.12760952 -0.13653869 -0.57690395  0.20655675  -0.53271994] ...] 20个
        y=normalized_train_data[i:i+time_step,7, np.newaxis]
        # [[-0.13687717] ...] 20个
        train_x.append(x.tolist())
        train_y.append(y.tolist())
    #print(len(train_y)) # 5780
    #print(train_y)
    batch_index.append((len(normalized_train_data)-time_step))  # 加上最后一个的index
    # train_x : 5780 * 20 * 7 三维张量[[[]]]
    # print(np.array(train_x[1:2]))
    # print("train_y:",np.array(train_y))  # 加了np.newaxis之后,也成了三维张量,np.array(train_y)):(5780, 20, 1)
    print(" batch_index", batch_index)  # batch_index [0, 80, 160, 240, 320, 400 ...3780]
    return batch_index,train_x,train_y


batch_index,train_x,train_y = get_train_data()
print(train_x[0:2])


#获取测试集
def get_test_data(time_step=20,test_begin=5800):
    data_test=data[test_begin:]
    mean=np.mean(data_test,axis=0)
    std=np.std(data_test,axis=0)
    normalized_test_data=(data_test-mean)/std  #标准化
    size=(len(normalized_test_data)+time_step-1)//time_step  #有size个sample 
    test_x,test_y=[],[]  
    for i in range(size-1):
        x=normalized_test_data[i*time_step:(i+1)*time_step,:7]
        y=normalized_test_data[i*time_step:(i+1)*time_step,7]
        test_x.append(x.tolist())
        test_y.extend(y)
    test_x.append((normalized_test_data[(i+1)*time_step:,:7]).tolist())
    test_y.extend((normalized_test_data[(i+1)*time_step:,7]).tolist())
    return mean,std,test_x,test_y


# ——————————————————定义神经网络变量——————————————————

weights={
         'in':tf.Variable(tf.random_normal([input_size,rnn_unit])),
         'out':tf.Variable(tf.random_normal([rnn_unit,1]))
        }
biases={
        'in':tf.Variable(tf.constant(0.1,shape=[rnn_unit,])),
        'out':tf.Variable(tf.constant(0.1,shape=[1,]))
       }


# ——————————————————定义神经网络变量——————————————————
def lstm(X):     
    batch_size=tf.shape(X)[0]
    time_step=tf.shape(X)[1]
    w_in=weights['in']
    b_in=biases['in']  
    input=tf.reshape(X,[-1,input_size])  # input_size是7,rnn_unit是10,此处转换,使得LSTM能接收数据
    input_rnn=tf.matmul(input,w_in)+b_in
    input_rnn=tf.reshape(input_rnn,[-1,time_step,rnn_unit])  # 将tensor转成3维,作为lstm cell的输入
    cell=tf.nn.rnn_cell.BasicLSTMCell(rnn_unit)
    init_state=cell.zero_state(batch_size,dtype=tf.float32)
    output_rnn, final_states=tf.nn.dynamic_rnn(cell, input_rnn,initial_state=init_state, dtype=tf.float32)
    output=tf.reshape(output_rnn,[-1,rnn_unit])  # 作为输出层的输入
    # 注意这里的shape   下面还有reshape过程
    w_out=weights['out']
    b_out=biases['out']
    pred=tf.matmul(output,w_out)+b_out
    return pred,final_states


# ——————————————————训练模型——————————————————
def train_lstm(batch_size=80,time_step=15,train_begin=2000,train_end=5800):
    X=tf.placeholder(tf.float32, shape=[None,time_step,input_size])
    Y=tf.placeholder(tf.float32, shape=[None,time_step,output_size])
    batch_index,train_x,train_y= get_train_data(batch_size,time_step,train_begin,train_end)
    pred,_=lstm(X)
    #损失函数
    loss=tf.reduce_mean(tf.square(tf.reshape(pred,[-1])-tf.reshape(Y, [-1])))
    train_op=tf.train.AdamOptimizer(lr).minimize(loss)
    saver=tf.train.Saver(tf.global_variables(),max_to_keep=15)
    #module_file = tf.train.latest_checkpoint()
    module_file = 'D:/DL/lstm_blog_case/ckpt/stock2.ckpt'
    with tf.Session() as sess:
        try :
            saver.restore(sess, module_file)
            print("成功加载模型参数")
        except:
            sess.run(tf.global_variables_initializer())
            print("未加载模型参数,文件被删除或者第一次运行")
        for i in range(2000):  # epoch
            for step in range(len(batch_index)-1):
                # train_x[x:y]三维张量     # batch_size 差为60的等差数列
                # input:60*20*7
                _,loss_=sess.run([train_op,loss],feed_dict={X:train_x[batch_index[step]:batch_index[step+1]],Y:train_y[batch_index[step]:batch_index[step+1]]})
            print(i,loss_)
            if i % 200==0:
                print("保存模型:",saver.save(sess,'D:/DL/lstm_blog_case/ckpt/stock2.ckpt',global_step=i))

# train_lstm()


# ————————————————预测模型————————————————————
def prediction(time_step=20):
    X=tf.placeholder(tf.float32, shape=[None,time_step,input_size])
    #Y=tf.placeholder(tf.float32, shape=[None,time_step,output_size])
    mean,std,test_x,test_y=get_test_data(time_step)
    pred,_=lstm(X)     
    saver=tf.train.Saver(tf.global_variables())
    with tf.Session() as sess:
        #参数恢复
        module_file = tf.train.latest_checkpoint('D:/DL/lstm_blog_case/ckpt/')
        saver.restore(sess, module_file)
        print("tf.train.latest_checkpoint():", tf.train.latest_checkpoint('D:/DL/lstm_blog_case/ckpt/'))
        test_predict = []
        for step in range(len(test_x)-1):
            prob=sess.run(pred,feed_dict={X: [test_x[step]]})
            predict=prob.reshape((-1))
            test_predict.extend(predict)
        test_y=np.array(test_y)*std[7]+mean[7]
        test_predict=np.array(test_predict)*std[7]+mean[7]
        acc=np.average(np.abs(test_predict-test_y[:len(test_predict)])/test_y[:len(test_predict)])  #偏差
        # 以折线图表示结果
        plt.figure()
        plt.plot(list(range(len(test_predict))), test_predict, color='b')
        plt.plot(list(range(len(test_y))), test_y,  color='r')
        plt.show()

#prediction()

Example 2:

def load_csv(filename, type):
    matrix_data = []
    with open(filename, 'r') as csvfile:
        csvreader = csv.reader(csvfile, delimiter=',')
        next(csvreader)
        for row_vector in csvreader:
            if type == 'int':
                matrix_data.append(list(map(int, row_vector[0:])))
            else:
                matrix_data.append(list(map(float, row_vector[0:])))
    return np.matrix(matrix_data[0:5000])


# 把正负样本合起来
def stack_all_sequence():
    padding_sequence_neg=load_csv('mirRNA_data/pad_neg_binary_sequence.csv','float')
    neg_number = len(padding_sequence_neg)
    padding_sequence_pos = load_csv('mirRNA_data/pad_pos_binary_sequence.csv', 'float')
    y_label = np.zeros((10000, 2))

    all_seq = np.row_stack((padding_sequence_neg, padding_sequence_pos))
    # print(len(all_seq))   # 10000
    # print(all_seq.shape)  # (10000, 1600)
    pos_label = np.mat(np.array([1, 0]))
    neg_label = np.mat(np.array([0, 1]))
    for i in range(len(padding_sequence_pos)):
        y_label[i] = pos_label
    for j in range(neg_number):
        y_label[j+5000] = neg_label
    return all_seq, y_label


shuffled_x = np.zeros((100, 100, 1600))
shuffled_y = np.zeros((100, 100, 2))
# 打乱数据,新的next_batch方法; 100batch ; 每个batch 100sample
def batch_iter(x, y, batch_size=100):
    data_len = len(x)
    num_batch = int((data_len - 1) / batch_size) + 1
    indices = np.random.permutation(np.arange(data_len))

    x_shuffle = np.zeros((10000, 1600))
    y_shuffle = np.zeros((10000, 2))
    for i in indices:
        x_shuffle[i] = np.mat(x[i])
    for j in indices:
        y_shuffle[j] = np.mat(y[j])

    for i in range(num_batch):
        start_id = i * batch_size
        end_id = min((i + 1) * batch_size, data_len)
        shuffled_x[i] = x_shuffle[start_id:end_id]
        shuffled_y[i] = y_shuffle[start_id:end_id]
    return shuffled_x, shuffled_y    # shuffled_y 100维


all_seq, y = stack_all_sequence()
x_shuffled, y_shuffled = batch_iter(all_seq, y)  # # 100个batch,每个batch 100 sample

# print(x_shuffled.shape)   # (100, 100, 1600)
# print(x_shuffled[0].shape)  # (100, 1600)
# print(y_shuffled.shape)   # (100, 100, 2)
# print(y_shuffled[0].shape) # (100, 2)
发布了18 篇原创文章 · 获赞 5 · 访问量 1万+

猜你喜欢

转载自blog.csdn.net/Zhou_Dao/article/details/103748142