Python Tensorflow学习(持续更新)

第一个利用tensorflow里的梯度下降法来拟合系数的程序

import tensorflow as tf
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf

x_data=np.random.rand(100).astype(np.float32)
y_data=x_data*0.1+0.3

Weight=tf.Variable(tf.random_uniform([1],-1,1))
biases=tf.Variable(tf.zeros([1]))

y=Weight*x_data+biases

loss=tf.reduce_mean(tf.square(y-y_data))
optimizer = tf.train.GradientDescentOptimizer(0.5)
train = optimizer.minimize(loss)

init = tf.global_variables_initializer()

sess=tf.Session()
sess.run(init)

for i in range(1000):
	sess.run(train)
	print(sess.run(loss))
	print(i,sess.run(Weight),sess.run(biases))
sess.close()

梯度下降法拟合二次函数的三个参数

import tensorflow as tf
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf

x_data=np.random.rand(100).astype(np.float32)
y_data=x_data*0.1*x_data+0.2*x_data+0.3

a=tf.Variable(tf.random_uniform([1],-1,1))
b=tf.Variable(tf.random_uniform([1],-1,1))
c=tf.Variable(tf.random_uniform([1],-1,1))

y=a*x_data*x_data+b*x_data+c

loss=tf.reduce_mean(tf.square(y-y_data))
optimizer = tf.train.GradientDescentOptimizer(0.5)
train = optimizer.minimize(loss)

init = tf.global_variables_initializer()

sess=tf.Session()
sess.run(init)

for i in range(1000):
	sess.run(train)
	# print(sess.run(loss))
	print(i,sess.run(a),sess.run(b),sess.run(c))
sess.close()

Tensorflow官方文档上的例子

import tensorflow as tf
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
x_data=np.float32(np.random.rand(2,100))
y_data=np.dot([0.1,0.2],x_data)+0.3

a=tf.Variable(tf.random_uniform([1,2],-1.0,1.0))
b=tf.Variable(tf.zeros([1]))
y=tf.matmul(a,x_data)+b

loss=tf.reduce_mean(tf.square(y-y_data))
optimizer=tf.train.GradientDescentOptimizer(0.5)
train=optimizer.minimize(loss)

init=tf.global_variables_initializer()
with tf.Session() as sess:
	sess.run(init)
	for step in range(201):
		sess.run(train)
		print(step,sess.run(a),sess.run(b))

placeholder的用法:相当于一个可以改变读入的变量,在run的时候以字典的形式对输入值进行赋值

import tensorflow as tf
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
x=tf.placeholder(tf.float32,shape=(1024,1024))
y=tf.matmul(x,x)

with tf.Session() as sess:
	rand_array=np.random.rand(1024,1024)
	print(sess.run(y,feed_dict={x:rand_array}))

这里介绍一下constant函数

tf.constant(value,dtype=None,shape=None,name='Const')
tensor = tf.constant([1, 2, 3, 4, 5, 6, 7])#=> [1 2 3 4 5 6 7]
tensor = tf.constant(-1.0, shape=[2, 3])#=> [[-1. -1. -1.] [-1. -1. -1.]]

MNIST中的HelloWorld,用神经网络训练识别手写阿拉伯字母

虽然有些细节没看懂,比如他的评估方法,以后再来补这里,可能是数学能力限制了我

但是整体的思想是对每个照片改为01像素矩阵,然后用属于每个数字的特征矩阵进行矩阵乘法,最后通过一个奇怪的评估方法,再用梯度下降来训练,整体就是这样。

import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
import tensorflow as tf
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
x = tf.placeholder("float", [None, 784])
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x,W) + b)
y_ = tf.placeholder("float", [None,10])
cross_entropy = -tf.reduce_sum(y_*tf.log(y))
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
for i in range(1000):
    batch_xs, batch_ys = mnist.train.next_batch(100)
    sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))

自定义一个全连接网络的神经层函数,并制作数据进行训练

import tensorflow as tf
import numpy as np
#添加一个全连接神经网络层
def add_layer(inputs,in_size,out_size,activation_function=None):
    Weights = tf.Variable(tf.random_normal([in_size,out_size]))
    biases = tf.Variable(tf.zeros([1,out_size])+0.1)
    Wx_plus_b = tf.matmul(inputs,Weights)+biases
    if activation_function is None:
        outputs = Wx_plus_b
    else:
        outputs = activation_function(Wx_plus_b)
    return outputs
#制作一些数据
x_data = np.linspace(-1,1,300)[:,np.newaxis]
noise = np.random.normal(0,0.05,x_data.shape)
y_data = np.square(x_data)-0.5+noise
#设置输入和损失函数
xs = tf.placeholder(tf.float32,[None,1])
ys = tf.placeholder(tf.float32,[None,1])
l1 = add_layer(xs,1,10,activation_function=tf.nn.relu)
prediction = add_layer(l1,10,1)
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction),
                    reduction_indices=[1]))
#开始训练
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
init = tf.global_variables_initializer()
sess=tf.Session()
sess.run(init)
for i in range(1000):
    sess.run(train_step,feed_dict={xs:x_data,ys:y_data})
    if(i%10==0):
        print(sess.run(loss,feed_dict={xs:x_data,ys:y_data}))
    

猜你喜欢

转载自blog.csdn.net/Gipsy_Danger/article/details/81098494