tensorflow (2)

import tensorflow as tf
a = tf.constant([1.0,2.0],name='a')
b = tf.constant([2.0,3.0],name='b')
result = a + b
sess = tf.Session()
print(sess.run(result))
with sess.as_default():
    print(result.eval())
[ 3.  5.]
[ 3.  5.]
In [5]:
print(a.graph is tf.get_default_graph())
True
In [11]:
g1 = tf.Graph()
with g1.as_default():
    #在计算图个g1中定义变量‘V’,并设置初始值为0
    v = tf.get_variable('v',initializer=tf.zeros_initializer()(shape=[1]))
print(v)
g2 = tf.Graph()
with g2.as_default():
    #在计算图个g1中定义变量‘V’,并设置初始值为0
    v = tf.get_variable('v',initializer=tf.ones_initializer()(shape=[1]))
#在计算图g1中读取变量v的取值。
with tf.Session(graph=g1) as sess:
    tf.global_variables_initializer().run()
    with tf.variable_scope("",reuse=True):
        print(sess.run(tf.get_variable("v")))
#在计算图g2中读取变量v的取值。
with tf.Session(graph=g2) as sess:
    tf.global_variables_initializer().run()
    with tf.variable_scope("",reuse=True):
        print(sess.run(tf.get_variable("v")))
<tf.Variable 'v:0' shape=(1,) dtype=float32_ref>
[ 0.]
[ 1.]
In [32]:
w1 = tf.Variable(tf.random_normal((2,3),stddev=1,seed=1))
w2 = tf.Variable(tf.random_normal((3,1),stddev=1,seed=1))
x = tf.constant([[0.7,0.9]])
a = tf.matmul(x,w1)
y = tf.matmul(a,w2)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
print(sess.run(y))
print(sess.run(x))
sess.close()
[[ 3.95757794]]
[[ 0.69999999  0.89999998]]
In [41]:
v = tf.constant([[1.0,2.0,3.0],[4.0,5.0,6.0]])
sess = tf.Session()
with sess.as_default():
    print(tf.reduce_mean(v).eval())
3.5
In [45]:
from numpy.random import RandomState
rdm = RandomState(1)
print(rdm)
X = rdm.rand(128,2)
print(X.shape)
<mtrand.RandomState object at 0x0000001DCF8E9480>
(128, 2)
In [61]:
batch_size = 8
w1 = tf.Variable(tf.random_normal([2,3],stddev=1,seed=1))
w2 = tf.Variable(tf.random_normal([3,1],stddev=1,seed=1))

x = tf.placeholder(tf.float32,shape=(None,2),name='x_input')
y_ = tf.placeholder(tf.float32,shape=(None,1),name='y_input')

a = tf.matmul(x,w1)
y = tf.matmul(a,w2)

#定义损失函数以及反向传播算法
y = tf.sigmoid(y)
cross_entropy = -tf.reduce_mean(y_ * tf.log(tf.clip_by_value(y,1e-10,1.0))
                                +(1-y)*tf.log(tf.clip_by_value(1-y,1e-10,1.0)))
train_step = tf.train.AdamOptimizer(0.001).minimize(cross_entropy)

#产生数据
rdm = RandomState(1)
dataset_size = 128
X = rdm.rand(dataset_size,2)
Y = [[int(x1+x2 < 1)] for (x1,x2) in X]

#创建一个会话,来运行tensorflow
with tf.Session() as sess:
    init = tf.global_variables_initializer()
    sess.run(init)
    print(sess.run(w1))
    print(sess.run(w2))
    
    steps = 5000
    for i in range(steps):
        start = (i * batch_size) % dataset_size
        end = min(start+batch_size,dataset_size)
        
        sess.run(train_step,feed_dict={x: X[start:end],y_: Y[start:end]})
        if i % 1000 == 0:
            total_cross_entropy = sess.run(cross_entropy,feed_dict={x: X,y_: Y})
            print("After %d training step(s),cross entropy on all data is %g"%(i,total_cross_entropy))
            #print("After %d trainint step(s),cross entropy on all data is %g"%(i,total_cross_entropy))
    print(sess.run(w1))
    print(sess.run(w2))
[[-0.81131822  1.48459876  0.06532937]
 [-2.4427042   0.0992484   0.59122431]]
[[-0.81131822]
 [ 1.48459876]
 [ 0.06532937]]
After 0 training step(s),cross entropy on all data is 0.314006
After 1000 training step(s),cross entropy on all data is 0.0684551
After 2000 training step(s),cross entropy on all data is 0.033715
After 3000 training step(s),cross entropy on all data is 0.020558
After 4000 training step(s),cross entropy on all data is 0.0136867
[[-2.54865503  3.07930875  2.89517117]
 [-4.11127472  1.62590706  3.3972702 ]]
[[-2.32309365]
 [ 3.30116868]
 [ 2.4632082 ]]
In [63]:
def get_weight(shape,_lambda):
    var = tf.Variable(tf.random_normal(shape),dtype=tf.float32)
    tf.add_to_collection('losses',tf.contrib.layers.l2_regularizer(_lambda)(var))
    return var

x = tf.placeholder(tf.float32,shape=(None,2))
y_ = tf.placeholder(tf.float32,shape=(None,1))
batch_size = 8

layer_dimension = [2,10,10,10,1]
n_layers = len(layer_dimension)

cur_layer = x
in_dimension = layer_dimension[0]

for i in range(1,n_layers):
    out_dimension = layer_dimension[i]
    weight = get_weight([in_dimension,out_dimension],0.001)
    bias = tf.Variable(tf.constant(0.1,shape=[out_dimension]))
    cur_layer = tf.nn.relu(tf.matmul(cur_layer,weight) + bias)
    in_dimension = layer_dimension[i]
    
mse_loss = tf.reduce_mean(tf.square(y_ - cur_layer))

tf.add_to_collection('losses',mse_loss)

loss = tf.add_n(tf.get_collection('losses'))
In [66]:
#滑动平均模型
v1 = tf.Variable(0,dtype=tf.float32)
step = tf.Variable(0,trainable=False)

ema = tf.train.ExponentialMovingAverage(0.99,step)
print(ema)

maintain_averages_op = ema.apply([v1])

with tf.Session() as sess:
    init = tf.global_variables_initializer()
    sess.run(init)
    
    print(sess.run([v1,ema.average(v1)]))
    
    sess.run(tf.assign(v1,5))
    sess.run(maintain_averages_op)
    print(sess.run([v1,ema.average(v1)]))
    
    sess.run(tf.assign(step,1000))
    sess.run(tf.assign(v1,10))
    sess.run([v1,ema.average(v1)])
    
    sess.run(maintain_averages_op)
    print(sess.run([v1,ema.average(v1)]))
<tensorflow.python.training.moving_averages.ExponentialMovingAverage object at 0x0000001DE14F1DA0>
name: "ExponentialMovingAverage_2"
op: "NoOp"
input: "^ExponentialMovingAverage_2/AssignMovingAvg"

[0.0, 0.0]
[5.0, 4.5]
[10.0, 4.5549998]
In [2]:
#使用tensorflow来解决MNIST手写体数字识别问题
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
#MNIST数据集相关的常数
input_node = 784   #输入层的节点数,图片的像素
output_node = 10   #输出层的节点数,0-9的类别数

#配置神经网络的参数
layer_node = 500   #隐藏层节点数
batch_size = 100   #一个训练batch中数据个数,数字越小,越接近随机梯度下降,越大,越接近梯度下降
learning_rate_base = 0.8   #基础的学习率
learning_rate_decay = 0.99  #学习率的衰减率
regularization_rate = 0.0001 #描述模型复杂度的正则化项在损失函数中的系数
training_steps = 30000   #训练轮数
moving_average_decay = 0.99  # 滑动平均衰减率

#给定神经网络的输入与所有参数,计算神经网络的前向通达的传播结果
def inference(input_tensor,avg_class,weight1,biases1,weight2,biases2):
    if avg_class == None:
        layer1 = tf.nn.relu(tf.matmul(input_tensor,weight1) + biases1)
        return tf.matmul(layer1,we ight2) + biases2
    else:
        layer1 = tf.nn.relu(tf.matmul(input_tensor,avg_class.average(weight1)) + avg_class.average(biases1))
        return tf.matmul(layer1,avg_class.average(weight2)) + avg_class.average(biases2)
 #训练模型的过程
def train(mnist):
    x = tf.placeholder(tf.float32,[None,input_node],name='x-input')
    y_ = tf.placeholder(tf.float32,[None,output_node],name='y-input')
    
    #生成隐藏层的参数
    weight1 = tf.Variable(tf.truncated_normal([input_node,layer_node],stddev=0.1))
    biases1 = tf.Variable(tf.constant(0.1,shape=[layer_node]))
    
    #生成输出层的参数
    weight2 = tf.Variable(tf.truncated_normal([layer_node,output_node],stddev=0.1))
    biases2 = tf.Variable(tf.constant(0.1,shape=[output_node]))
    
    #计算当前参数下神经网络前向通道的结果,不使用滑动平均值
    y = inference(x,None,weight1,biases1,weight2,biases2)
    #代表训练轮数的变量指定为不可训练的参数
    global_step = tf.Variable(0,trainable=False)
    #给定滑动平均衰减率与训练轮数的变量,初始化滑动平均类
    variable_averages = tf.train.ExponentialMovingAverage(moving_average_decay,global_step)
    #在所有代表神经网络参数的变量上使用滑动平均
    variable_averages_op = variable_averages.apply(tf.trainable_variables())
    #计算使用了滑动平均之后的前向传播结果
    average_y = inference(x,variable_averages,weight1,biases1,weight2,biases2)
    #计算交叉熵作为刻画预测值与真实值之间差距的损失函数
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y,labels=tf.argmax(y_,1))
    #计算在当前batch中所有样例的交叉熵平均值
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    
    #计算L2正则化损失函数
    regularizer = tf.contrib.layers.l2_regularizer(regularization_rate)
    #计算模型的正则化损失
    regularization = regularizer(weight1) + regularizer(weight2)
    #总损失
    loss = cross_entropy_mean + regularization
    #设置指数衰减的学习率
    learning_rate = tf.train.exponential_decay(learning_rate_base,global_step,
                                               mnist.train.num_examples / batch_size,
                                               learning_rate_decay)
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step=global_step)

    #在训练神经网络模型时,每过一遍数据就要通过BP更新神经网络的参数以及每个参数的滑动平均值
    with tf.control_dependencies([train_step,variable_averages_op]):
        train_op = tf.no_op(name='train')

    #检验使用了滑动平均模型的神经网络前向传播结果是否挣正确
    correct_prediction = tf.equal(tf.argmax(average_y,1),tf.argmax(y_,1))
    #计算模型的正确率
    accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))

    #初始化会话并开始训练过程
    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        #准备验证数据
        validate_feed = {x: mnist.validation.images,y_: mnist.validation.labels}
        #准备测试数据
        test_feed = {x: mnist.test.images,y_: mnist.test.labels}
        #迭代的训练神经网络
        for i in range(training_steps):
            if i % 1000 == 0:
                validate_acc = sess.run(accuracy,feed_dict=validate_feed)
                print("After %d training steps, validation accuracy using average model is %g" %(i,validate_acc))
            #产生这一轮使用的batch的训练数据,并运行训练过程
            xs, ys = mnist.train.next_batch(batch_size)
            sess.run(train_op,feed_dict={x: xs,y_: ys})
         
        #测试结束后,在测试数据上检测神经网络模型的最终正确率
        test_acc = sess.run(accuracy,feed_dict=test_feed)
        print('After %d training steps ,test accuracy using average model is %g'%(training_steps,test_acc))

#主程序入口
def main(argv=None):
    #声明处理MNIST 数据集的类,这个类在初始化时会自动下载
    mnist = input_data.read_data_sets("/tmp/data",one_hot=True)
    train(mnist)
    
#Tensorflow 提供的一个主程序入口,tf.app.run 会调用上面定义的main函数
if __name__ == '__main__':
    tf.app.run()
WARNING:tensorflow:From <ipython-input-2-75ab9cb66acf>:97: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use alternatives such as official/mnist/dataset.py from tensorflow/models.
WARNING:tensorflow:From D:\ruanjianku\Python\anaconda\anaconda\envs\tensorflow\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.
Instructions for updating:
Please write your own downloading logic.
WARNING:tensorflow:From D:\ruanjianku\Python\anaconda\anaconda\envs\tensorflow\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.data to implement this functionality.
Extracting /tmp/data\train-images-idx3-ubyte.gz
WARNING:tensorflow:From D:\ruanjianku\Python\anaconda\anaconda\envs\tensorflow\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.data to implement this functionality.
Extracting /tmp/data\train-labels-idx1-ubyte.gz
WARNING:tensorflow:From D:\ruanjianku\Python\anaconda\anaconda\envs\tensorflow\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:110: dense_to_one_hot (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.one_hot on tensors.
Extracting /tmp/data\t10k-images-idx3-ubyte.gz
Extracting /tmp/data\t10k-labels-idx1-ubyte.gz
WARNING:tensorflow:From D:\ruanjianku\Python\anaconda\anaconda\envs\tensorflow\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\mnist.py:290: DataSet.__init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use alternatives such as official/mnist/dataset.py from tensorflow/models.
After 0 training steps, validation accuracy using average model is 0.086
After 1000 training steps, validation accuracy using average model is 0.9778
After 2000 training steps, validation accuracy using average model is 0.983
After 3000 training steps, validation accuracy using average model is 0.9822
After 4000 training steps, validation accuracy using average model is 0.9828
After 5000 training steps, validation accuracy using average model is 0.9842
After 6000 training steps, validation accuracy using average model is 0.9826
After 7000 training steps, validation accuracy using average model is 0.9844
After 8000 training steps, validation accuracy using average model is 0.9844
After 9000 training steps, validation accuracy using average model is 0.9846
After 10000 training steps, validation accuracy using average model is 0.9848
After 11000 training steps, validation accuracy using average model is 0.985
After 12000 training steps, validation accuracy using average model is 0.9854
After 13000 training steps, validation accuracy using average model is 0.986
After 14000 training steps, validation accuracy using average model is 0.9854
After 15000 training steps, validation accuracy using average model is 0.985
After 16000 training steps, validation accuracy using average model is 0.985
After 17000 training steps, validation accuracy using average model is 0.9854
After 18000 training steps, validation accuracy using average model is 0.9856
After 19000 training steps, validation accuracy using average model is 0.985
After 20000 training steps, validation accuracy using average model is 0.9854
After 21000 training steps, validation accuracy using average model is 0.9852
After 22000 training steps, validation accuracy using average model is 0.9852
After 23000 training steps, validation accuracy using average model is 0.986
After 24000 training steps, validation accuracy using average model is 0.986
After 25000 training steps, validation accuracy using average model is 0.9848
After 26000 training steps, validation accuracy using average model is 0.9864
After 27000 training steps, validation accuracy using average model is 0.9856
After 28000 training steps, validation accuracy using average model is 0.9852
After 29000 training steps, validation accuracy using average model is 0.9852
After 30000 training steps ,test accuracy using average model is 0.9841
An exception has occurred, use %tb to see the full traceback.

SystemExit
D:\ruanjianku\Python\anaconda\anaconda\envs\tensorflow\lib\site-packages\IPython\core\interactiveshell.py:2870: UserWarning: To exit: use 'exit', 'quit', or Ctrl-D.
  warn("To exit: use 'exit', 'quit', or Ctrl-D.", stacklevel=1)

猜你喜欢

转载自blog.csdn.net/doulinxi115413/article/details/80700764