tensorflow搭建自己的神经网络(二)

定义一个层
按照我的理解输入层作为输入,到隐藏层1,hidden1就是将输入*w + b,再将结果经过各种激活函数激活一下,这就是一个神经元所做的,一层有很多个神经元,也就是重复一下。hidden2的输入就是hidden1的输出。以此类推。于是自己写一个一层的一个神经元的神经网络:

#activate_func=None 默认的为线性函数
def add_layer(inputs, in_size, out_size, activate_func=None):
    Weights = tf.Variable(tf.random_normal([in_size, out_size]))
    biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)
    Wx_plus_b = tf.matmul(inputs, Weights) + biases
    if activate_func is None:
        out_puts = Wx_plus_b
    else:
        out_puts = activate_func(Wx_plus_b)

    return out_puts

为了看这个神经网络是否能正常学习,而本文的数据时资金及构造的线性方程,loss 是否会减小。

loss =tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction),
                                   reduction_indices=[1]))#1表示按行求和。0表按列

然后用梯度下降来优化。

train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)  #设好学习率0.1
# optimizer = tf.train.GradientDescentOptimizer(0.5)
# train_step = optimizer.minimize(loss)

在session中run一下。所有代码如下:

import tensorflow as tf
import  numpy as np

#activate_func=None 默认的为线性函数
def add_layer(inputs, in_size, out_size, activate_func=None):
    Weights = tf.Variable(tf.random_normal([in_size, out_size]))
    biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)
    Wx_plus_b = tf.matmul(inputs, Weights) + biases
    if activate_func is None:
        out_puts = Wx_plus_b
    else:
        out_puts = activate_func(Wx_plus_b)

    return out_puts

x_data = np.linspace(-1, 1, 300)[:,np.newaxis]#在-1到1之间取300个值
noise = np.random.normal(0, 0.05, x_data.shape)#以 x的shape取正忒分布的随机值
y_data = np.square(x_data) - 0.5 + noise

xs = tf.placeholder(tf.float32, [None, 1])#shape = None
ys = tf.placeholder(tf.float32, [None, 1])

l1 = add_layer(xs, 1, 10, activate_func=tf.nn.relu) #输入层 ->第一层
prediction = add_layer(l1, 10, 1, activate_func=None) #第一层 -> 输出层

loss =tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction),
                                   reduction_indices=[1]))#1表示按行求和。0表按列
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)  #设好学习率0.1
# optimizer = tf.train.GradientDescentOptimizer(0.5)
# train_step = optimizer.minimize(loss)

init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)

for i in range(1000):
    sess.run(train_step, feed_dict={xs:x_data, ys : y_data})
    if i % 50 == 0:
        print(sess.run(loss, feed_dict={xs:x_data, ys :y_data}))#只要涉及placeholder都要feed一下

这都是根据莫凡的视频学的。
还是用keras构造神经网络简单。

猜你喜欢

转载自blog.csdn.net/weixin_41376658/article/details/79453887