Pthon语言应用Keras实现ANN模型搭建(应用在预测)

from keras import metrics
from sklearn.preprocessing import MinMaxScaler
import numpy as np
from keras.models import Sequential
from keras.layers import LSTM, Dense,Dropout,Bidirectional,Activation,TimeDistributed
import csv
from sklearn.model_selection import train_test_split
from pylab import*
from sklearn import preprocessing
from sklearn.preprocessing import MinMaxScaler
import time
from matplotlib import pyplot
import math
from random import random
from sklearn.model_selection import KFold
from sklearn.pipeline import Pipeline
from sklearn.model_selection import  cross_val_score, KFold
from pylab import*
from sklearn.utils import shufflei=0
j=[]
data = []
X = []
indicess = []
xback =24
with open(r'D:\多云新.csv') as f:
    reader = csv.reader(f)
    for row in reader:
            if i == 0:
                i += 1
                continue
            else:
             data.append(row[:])
data = np.array(data)
print("the shape of data",np.shape(data))
m,n = np.shape(data)
for i in range(m):
    for j in range(n):
        data[i][j] = data[i][j].astype('float64')
data = data.astype('float64')
y = data[:,-1]
y1 = data[:,-1]
set1 = data[:,:-1]
set2 = data[:,-1]
def create_interval_dataset(dataset1, dataset2, xback):
    dataX, dataY = [], []
    for i in range(0, len(dataset1)-xback,24):
        dataX.append(dataset1[i:i+xback])
        dataY.append(dataset2[i:i+xback])
    return np.asarray(dataX), np.asarray(dataY)
dataX, dataY = create_interval_dataset(set1, set2, 24) 
dataY=np.reshape(dataY, [-1,24])
from sklearn.model_selection import KFold
MAPE = []
RMSE = []
MABE = []
kf =KFold(n_splits=10, random_state=None, shuffle=False)
for train_index, test_index in kf.split(dataX):
    print("TRAIN:", train_index, "TEST:", test_index)
    X_tr, X_te = dataX[train_index], dataX[test_index]
    y_tr, y_te = dataY[train_index], dataY[test_index]
    X_tr = np.reshape(X_tr, [-1, 144])
    X_te = np.reshape(X_te, [-1, 144])
    y_te = y_te.astype('float64')
    y_tr = y_tr.astype('float64')
    scaler = MinMaxScaler(feature_range=(0, 1))
    scaler1 = MinMaxScaler(feature_range=(0, 1))
    X_tr = scaler.fit_transform(X_tr)  # 归一化(x-min(x))/(max(x)-min(x))
    X_te = scaler1.fit_transform(X_te)
    scaler2 = MinMaxScaler(feature_range=(0, 1))
    y_tr = scaler2.fit_transform(y_tr.reshape(-1, 24))
    scaler3 = MinMaxScaler(feature_range=(0, 1))
    y_te = scaler3.fit_transform(y_te.reshape(-1, 24))
    model = Sequential()  
    model.add(Dense(output_dim=15, input_dim=144))
    model.add(Activation('relu'))
    model.add(Dense(output_dim=24, input_dim=15))
    model.add(Activation('relu'))
    model.compile(loss='mse', optimizer='adam')
    model.fit(X_tr, y_tr, epochs=200, batch_size=200, verbose=2, shuffle=False)
    predicted = model.predict(X_te)
    predicted1 = model.predict(X_tr)
    predicted = np.reshape(predicted, [-1, 24])
    predicted1 = np.reshape(predicted1, [-1, 24])
    predicted = scaler3.inverse_transform(predicted)
    predicted1 = scaler2.inverse_transform(predicted1)
    y_tr = scaler2.inverse_transform(y_tr)
    y_te = scaler3.inverse_transform(y_te)
    for i in range(len(predicted1)):
        for j in range(24):
            if predicted1[i][j] <= 10:
                predicted1[i][j] = 0
    for i in range(len(predicted)):
        for j in range(24):
            if predicted[i][j] <= 10:
                predicted[i][j] = 0
    y_tr = y_tr.flatten()
    y_te = y_te.flatten()
    predicted = predicted.flatten()
    predicted1 = predicted1.flatten()
    start = 0
    end = 0
    fei0 = []
    for i in range(len(y_tr)):
        if y_tr[i] != 0:
            fei0.append(i)
      xina = []
    xinb = []
    for i in fei0:
        xina.append(y_tr[i])
        xinb.append(predicted1[i])
    fei01 = []
    for i in range(len(y_te)):
        if y_te[i] != 0:
            fei01.append(i)
    xina1 = []
    xinb1 = []
    for i in fei01:
        xina1.append(y_te[i])
        xinb1.append(predicted[i])
    print("prediction", predicted[:72])
    print("y_te", y_te[:72])
    file = open("E:/十折交叉验证/ANN1多云新模型.txt", 'a')
    file.write("\n")
    file.write('——————————ANN提前一小时训练误差————————————' + '\n')
    print("*****************训练集误差***********************")
    v = list(map(lambda x: (abs((x[0] - x[1]) / x[0])), zip(xina1, xinb1)))
    loss = sum(v) * 100 / len(y_te)
    print("MAPE loss", loss)
    MAPE.append(loss)

    v = list(map(lambda x: ((pow((x[0] - x[1]), 2))), zip(xina1, xinb1)))
    loss = math.sqrt(sum(v) / len(y_te))
    print("the RMSE  is :", loss)
    RMSE.append(loss)

    v = list(map(lambda x: (abs((x[0] - x[1]))), zip(xina1, xinb1)))
    loss = sum(v) / len(y_te)
    print("the MABE of is :", loss)
    MABE.append(loss)
    plt.plot(y_te[:73], 'g--', lw=2, label='ANN真实值曲线')
    plt.plot(predicted[:73], 'r', lw=2, label='ANN预测值曲线')
    plt.title('ANN', fontsize=18)
    plt.legend(loc=0, numpoints=1)
    leg = plt.gca().get_legend()
    ltext = leg.get_texts()
    plt.setp(ltext, fontsize='small')



猜你喜欢

转载自blog.csdn.net/pwtd_huran/article/details/79729312