Machine learning 0004 The function that generates tensor in Tensorflow


Machine learning 0004 The function that generates tensor in Tensorflow


I have been learning tensorflow recently. There are a lot of things I don’t understand during the learning process. One of them is generating tensor. The following is organized today. All the content is in the code and comments.

----------------


import tensorflow as tf
import numpy as np

sex = tf.Session ()

#1.tf.ones
#Generate tensor with shape as shape, type as dtype, name as name, and all values ​​as "1"
a=tf.ones(shape=[2,3], dtype=tf.float32,name=None)
print (sess.run (a))
print('#'*40)#Gorgeous dividing line
# [[ 1.  1.  1.]
#  [ 1.  1.  1.]]
########################################

#2.tf.zeros
#Generate tensor with shape as shape, type as dtype, name as name, and all values ​​as "0"
a=tf.zeros(shape=[2,3], dtype=tf.float32,name=None)
print (sess.run (a))
print('#'*40)#Gorgeous dividing line
# [[ 0.  0.  0.]
#  [ 0.  0.  0.]]
########################################

#3.tf.ones_like
#Generate a tensor shape, the type is dtype, and all tensors whose value is "1"
vec = [[1,2,3,4], [5,6,7,8]]
a=tf.ones_like(tensor=vec, dtype=tf.float32, name=None, optimize=True)
print (sess.run (a))
print('#'*40)#Gorgeous dividing line
# [[ 1.  1.  1.  1.]
#  [ 1.  1.  1.  1.]]
########################################

#4.tf.zeros_like
#Generate a tensor shape, the type is dtype, and all tensors whose value is "0"
a=tf.zeros_like(tensor=a, dtype=tf.float32, name=None, optimize=True)
print (sess.run (a))
print('#'*40)#Gorgeous dividing line
# [[ 0.  0.  0.  0.]
#  [ 0.  0.  0.  0.]]
########################################

#5.tf.fill
#Generate a tensor whose shape is dims and all values ​​are value
a=tf.fill(dims=[2,3], value=3.14159, name=None)
print (sess.run (a))
print('#'*40)#Gorgeous dividing line
# [[ 3.14159012  3.14159012  3.14159012]
#  [ 3.14159012  3.14159012  3.14159012]]
########################################

#6.tf.constant
#Generate a constant tensor whose shape is shape, type is dtype, and value is value. This is a bit difficult to explain, see the running result below
#(1)
a=tf.constant(value=88, dtype=tf.float32, shape=[6], name=None)
print (sess.run (a))
# [ 88.  88.  88.  88.  88.  88.]
#(2)
a=tf.constant(value=[88,99], dtype=tf.float32, shape=[6], name=None)
print (sess.run (a))
# [ 88.  99.  99.  99.  99.  99.]
#(3)
a=tf.constant(value=[88,99,11,22,33,44], dtype=tf.float32, shape=[6], name=None)
print (sess.run (a))
# [ 88.  99.  11.  22.  33.  44.]
#(4)
#a=tf.constant(value=[88,99,11,22,33,44,55,66,77], dtype=tf.float32, shape=[6], name=None)
#print (sess.run)
# ValueError: Too many elements provided. Needed at most 6, but received 9  直接报错
#(5)
a=tf.constant(value=88, dtype=tf.float32, shape=[2,3], name=None)
print (sess.run (a))
# [[ 88.  88.  88.]
#  [ 88.  88.  88.]]
#(6)
a=tf.constant(value=[1,2], dtype=tf.float32, shape=[2,3], name=None)
print (sess.run (a))
# [[ 1.  2.  2.]
#  [ 2.  2.  2.]]
#(7)
a=tf.constant(value=[1,2,3], dtype=tf.float32, shape=[2,3], name=None)
print (sess.run (a))
# [[ 1.  2.  3.]
#  [ 3.  3.  3.]]
#(8)
a=tf.constant(value=[1,2,3,4,5,6], dtype=tf.float32, shape=[2,3], name=None)
print (sess.run (a))
print('#'*40)#Gorgeous dividing line
# [[ 1.  2.  3.]
#  [ 4.  5.  6.]]
########################################

#7.tf.random_normal
#Similar to this one:
#tf.truncated_nomal truncates the normal distribution and produces values ​​in the interval [mean-2*dev,mean+2*dev]
#tf.random_uniform Uniform distribution
#tf.random_gamma Gamma distribution
#tf.random_poisson Poisson Distribution
#Generate a tensor whose shape is shape, the mean is mean, the standard deviation is stddev, and the value of type dtype conforms to the normal distribution
a=tf.random_normal(shape=[2,3], mean=0, stddev=2.0, dtype=tf.float32, seed=None, name=None)
b=sess.run(a)
print(b)
print("mean=",np.mean(b,keepdims=False))#Calculate the mean
print("stddev=",np.std(b,keepdims=False))#Calculate standard deviation
#Note: The following results are for reference only, the results are different each time, and the generated values ​​obey the normal distribution. After a large amount of data, the effect will be more stable
# [[-1.06130075  3.85585999  2.9854815 ]
#  [-1.93925571  2.28367829 -2.51560712]]
# mean= 0.601476
# stddev = 2.51796
a=tf.random_normal(shape=[5000], mean=0, stddev=2.0, dtype=tf.float32, seed=None, name=None)
b=sess.run(a)
print("mean=",np.mean(b,keepdims=False))#Calculate the mean
print("stddev=",np.std(b,keepdims=False))#Calculate standard deviation
print('#'*40)#Gorgeous dividing line
#The following is the result of several runs after generating 5000 data, the mean is close to 0, and the std is close to 2.0
# mean = -0.0181304 stddev = 1.97549  
# mean = 0.0371914 stddev = 2.00946
# mean = -0.0146296 stddev = 1.98888
########################################

#8.tf.random_shuffle
#Random shuffle, only valid for one dimension
a=tf.random_shuffle(value=[1,2,3,4,5,6,7,8,9,10], seed=None, name=None)
b=tf.random_shuffle(value=[[1,2,3,4,5],[6,7,8,9,10]], seed=None, name=None)
print (sess.run (a))
print (sess.run (b))
print('#'*40)#Gorgeous dividing line
#[ 9  5  2  3  7  1  8 10  6  4]
# [[ 1  2  3  4  5]
#  [ 6  7  8  9 10]]
########################################

#9.tf.random_crop
#Random cropping, generate a tensor with a shape of size, and randomly select a continuous area with a shape of size from the value for cropping
a=tf.random_crop(value=[1,2,3,4,5,6,7,8,9,10], size=[4],seed=None, name=None)
print (sess.run (a))
print('#'*40)#Gorgeous dividing line
# [6 7 8 9] could also be [5 6 7 8] [4 5 6 7] etc.
########################################

sess.close()












Guess you like

Origin http://43.154.161.224:23101/article/api/json?id=325645394&siteId=291194637