Tensorflow学习(layer)

错误代码:

tensorflow.python.framework.errors_impl.InvalidArgumentError: Shape must be rank 2 but is rank 1 for 'MatMul' (op: 'MatMul') with input shapes: [?,1], [1].

Wx_plus_b = tf.matmul(inputs,Weights) + biases     --》
Wx_plus_b = tf.multiply(inputs,Weights) + biases      在新的版本已经multiply取代了
import tensorflow as tf
import numpy as np

#传入的参数有输入层,输入大小,输出大小,还有一个激励函数,默认是NONE(线性函数)
def add_layer(inputs,in_size,out_size,activation_function = None):
    Weights = tf.Variable(tf.random_normal([in_size],[out_size]))   #定义权重为随机变量,因为随机变量生成初始变量要比0好很多。形状是【2】【3】:2行3列
   #机器学习推荐变量不为0.他的size是:1行our_size列
    biases = tf.Variable(tf.zeros([1,out_size])) + 0.1
    Wx_plus_b = tf.multiply(inputs,Weights) + biases           #matmul是矩阵的乘法。还没被激活的值存在这个变量中
    if activation_function is None:
        outputs = Wx_plus_b  #因为当参数是None的时候,activation_function是一个线性方程,所以不需要激活。一般需要激活的函数都是非线性函数
    else:
        outputs = activation_function(Wx_plus_b)
    return  outputs
#添加了一层神经层 范围-1到1,300个例子
x_data = np.linspace(-1,1,300)[:,np.newaxis]
noise = np.random.normal(0,0.05,x_data.shape)  #没有完全是一元二次曲线,有可以在线周围有很多其他噪点.方差是0.05,格式是
y_data = np.square(x_data) - 0.5 + noise   #square是平方

xs = tf.placeholder(tf.float32,[None,1])
ys = tf.placeholder(tf.float32,[None,1])

l1 = add_layer(xs,1,10,activation_function=tf.nn.relu) #隐藏层有10个神经元
prediction = add_layer(l1,10,1,activation_function=None)

loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction),reduction_indices=[1]))#将每一个误差求和最后求平均
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)#0.1是学习效率,小于1.通过以0.1的学习效率优化器进行提升

init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)

for i in range(1000):
    sess.run(train_step,feed_dict={xs:x_data,ys:y_data})
    if i % 50 == 0:
        print(sess.run(loss,feed_dict={xs:x_data,ys:y_data}))

猜你喜欢

转载自blog.csdn.net/daxuan1881/article/details/84858695