Neural network approach stock price

! # / usr / bin / Python 
# - * - Coding: UTF-8 - * -
# author: cxx Time: 2019/8/28 0028
Import tensorflow AS TF
Import numpy AS NP
Import matplotlib.pyplot AS plt
DATE = NP. linspace (1,15,15) # 15 generated numbers have the same spacing as the date between 1-15 fifteen days
endPrice = np.array ([2511,2538,2510,2591,2732,2701,2701,2678 , 2726,2681,2739,2715,2823,2864,2919]) # day when the stock closed (three in the afternoon) price
beginPrice = np.array ([2438,2500,2534,2512,2594,2743,2697,2695 , 2678,2722,2674,2744,2717,2832,2877]) # when the stock opened the day (nine o'clock) price
Print (DATE)
plt.figure ()
for i in the Range (0,15):
dateOne = NP .zeros ([2]) # 0 generates an initial value of length is one-dimensional array of 2 -> opening closing date
dateOne [0] = I
dateOne [. 1] = I
priceOne np.zeros = ([2] ) # price
priceOne [0] = beginPrice [i ] # day opening price
priceOne [1] = endPrice [i ] # day closing price
IF beginPrice [I]> endPrice [I]:
plt.plot (dateOne, priceOne, 'G', LW =. 8)
the else:
plt.plot (dateOne, priceOne, 'R & lt', LW =. 8)
# A (15X1) * W1 (1x10) + B1 (1x10) = B (15x10)
# B (15x10) * w2 of (10x1) + B2 (15X1) = C (15X1)
#A input layer
dateNormal np.zeros = ([15,1])
priceNormal np.zeros = ([15,1])
for I in Range (0,15):
dateNormal [I, 0] = I / normalization 14.0 #
priceNormal [I, 0] = [I] endPrice /3000.0
X = tf.placeholder (tf.float32, [None,. 1]) # placeholder placeholder for
y = tf.placeholder (tf.float32, [None , 1] )
#B hidden layer
w1 = tf.Variable (tf.random_uniform ([1,10 ], 0,1)) # return a value between 0-1 ten row of matrix
b1 = tf.Variable (tf.zeros ([1,10]))
= tf.matmul WB1 (X, W1) + B1
Layer1 = tf.nn.relu (WB1) excitation function RELU #
#C output layer
w2 = tf.Variable (tf.random_uniform ([10,1 ], 0,1) )
B2 = tf.Variable (tf.zeros ([15,1]))
WB2 = tf.matmul (Layer1, w2 of) + B2
Layer2 = tf.nn.relu (WB2)
Loss = tf.reduce_mean (tf.square ( y-layer2)) # y calc true value layer2
train_step = tf.train.GradientDescentOptimizer (0.1) .minimize ( loss) # gradient descent method parameters: learning rate
with tf.Session () AS Sess:
sess.run (TF .global_variables_initializer ())
for I in Range (0,50):
sess.run (train_step, feed_dict = ({X: dateNormal, Y: priceNormal})) # trained optimized w1, w2, b1, b2 values
pred sess.run = (Layer2, feed_dict = {X: dateNormal})
predPrice np.zeros = ([15,1]) # forecast final price
for i in range (0,15):
predPrice [i, 0] = ( pred * 3000) [i, 0] # price reduction after normalization
plt.plot (DATE, predPrice, 'B', LW =. 1)
plt.show ()

renderings:

 

Guess you like

Origin www.cnblogs.com/cxxBoo/p/11443756.html