forget tf1! tensorflow2的数据类型,tensor属性参数以及创建方法

tensorflow2

import tensorflow as tf
import numpy as np

tensorflow数据类型

constant(可修改)

a = tf.constant(1)
print(a)
a = tf.constant(2)
print(a)
tf.Tensor(1, shape=(), dtype=int32)
tf.Tensor(2, shape=(), dtype=int32)
tf.constant(1.)
<tf.Tensor: shape=(), dtype=float32, numpy=1.0>
tf.constant(2.2, dtype=tf.double)
<tf.Tensor: shape=(), dtype=float64, numpy=2.2>
tf.constant([True, False])
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>
tf.constant('hello tf')
<tf.Tensor: shape=(), dtype=string, numpy=b'hello tf'>

Tensor Property

device

tf.test.is_gpu_available()
WARNING:tensorflow:From <ipython-input-7-17bb7203622b>:1: is_gpu_available (from tensorflow.python.framework.test_util) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.config.list_physical_devices('GPU')` instead.





True
tf.config.list_physical_devices('GPU')
[PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]
with tf.device('cpu'):
    a = tf.constant([1])
with tf.device('gpu'):
    b = tf.range(4)
    
print(a.device)
print(b.device)

aa = a.gpu()
print(aa.device)

bb = b.cpu()
print(bb.device)
/job:localhost/replica:0/task:0/device:CPU:0
/job:localhost/replica:0/task:0/device:GPU:0
WARNING:tensorflow:From <ipython-input-9-b0029145c942>:9: _EagerTensorBase.gpu (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.identity instead.
/job:localhost/replica:0/task:0/device:GPU:0
WARNING:tensorflow:From <ipython-input-9-b0029145c942>:12: _EagerTensorBase.cpu (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.identity instead.
/job:localhost/replica:0/task:0/device:CPU:0

check

x = tf.range(4)
print(x)

# tensor -> numpy
print(x.numpy())

# shape
print(x.shape)

# 查看维度 1
print(x.ndim)

# 查看维度 2
print(tf.rank(x))
tf.Tensor([0 1 2 3], shape=(4,), dtype=int32)
[0 1 2 3]
(4,)
1
tf.Tensor(1, shape=(), dtype=int32)

Tensor Type

a = tf.constant([1.])
b = tf.constant([True, False])
c = tf.constant('hello tf')
d = np.arange(4)
isinstance(a, tf.Tensor)
True
tf.is_tensor(b)
True
tf.is_tensor(d)
False
(a.dtype, b.dtype, c.dtype, d.dtype)
(tf.float32, tf.bool, tf.string, dtype('int32'))

Type Convert

d = np.arange(4)
# numpy -> tensor
dd = tf.convert_to_tensor(d)
isinstance(dd, tf.Tensor)
True
a = tf.constant([1.])
a
<tf.Tensor: shape=(1,), dtype=float32, numpy=array([1.], dtype=float32)>
aa = tf.cast(a, dtype=tf.int64)
aa
<tf.Tensor: shape=(1,), dtype=int64, numpy=array([1], dtype=int64)>
b = tf.constant([0, 1])
bb = tf.cast(b, dtype=tf.bool)
bb
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([False,  True])>
tf.cast(bb, tf.int32)
<tf.Tensor: shape=(2,), dtype=int32, numpy=array([0, 1])>

tf.Variable

a = tf.range(5)
a
<tf.Tensor: shape=(5,), dtype=int32, numpy=array([0, 1, 2, 3, 4])>
b = tf.Variable(a)
b
<tf.Variable 'Variable:0' shape=(5,) dtype=int32, numpy=array([0, 1, 2, 3, 4])>
b.dtype
tf.int32
b.name
'Variable:0'
b = tf.Variable(a, name='input_data')
b.name
'input_data:0'
b.trainable
True
isinstance(b, tf.Tensor)  # not recommend
False
isinstance(b, tf.Variable)
True
tf.is_tensor(b)
True
b.numpy()
array([0, 1, 2, 3, 4])

Tensor Create

1. From numpy or list

2. tf.ones/zeros/fill

a = tf.zeros([2, 3, 3])
a
<tf.Tensor: shape=(2, 3, 3), dtype=float32, numpy=
array([[[0., 0., 0.],
        [0., 0., 0.],
        [0., 0., 0.]],

       [[0., 0., 0.],
        [0., 0., 0.],
        [0., 0., 0.]]], dtype=float32)>
b = tf.ones([2, 3, 3])
b
<tf.Tensor: shape=(2, 3, 3), dtype=float32, numpy=
array([[[1., 1., 1.],
        [1., 1., 1.],
        [1., 1., 1.]],

       [[1., 1., 1.],
        [1., 1., 1.],
        [1., 1., 1.]]], dtype=float32)>
c = tf.zeros_like(b)
c
<tf.Tensor: shape=(2, 3, 3), dtype=float32, numpy=
array([[[0., 0., 0.],
        [0., 0., 0.],
        [0., 0., 0.]],

       [[0., 0., 0.],
        [0., 0., 0.],
        [0., 0., 0.]]], dtype=float32)>
d = tf.zeros(b.shape)
d
<tf.Tensor: shape=(2, 3, 3), dtype=float32, numpy=
array([[[0., 0., 0.],
        [0., 0., 0.],
        [0., 0., 0.]],

       [[0., 0., 0.],
        [0., 0., 0.],
        [0., 0., 0.]]], dtype=float32)>
e = tf.ones_like(b)
e
<tf.Tensor: shape=(2, 3, 3), dtype=float32, numpy=
array([[[1., 1., 1.],
        [1., 1., 1.],
        [1., 1., 1.]],

       [[1., 1., 1.],
        [1., 1., 1.],
        [1., 1., 1.]]], dtype=float32)>
d = tf.fill([2, 2], 9)
d
<tf.Tensor: shape=(2, 2), dtype=int32, numpy=
array([[9, 9],
       [9, 9]])>

Normal

tf.random.normal([2, 2], mean=0, stddev=1, seed=2333)
<tf.Tensor: shape=(2, 2), dtype=float32, numpy=
array([[-1.4627922 ,  1.7796164 ],
       [ 0.9516031 ,  0.21233863]], dtype=float32)>
# The generated values follow a normal distribution with specified mean and standard deviation, except that values whose magnitude is more than 2 standard deviations from the mean are dropped and re-picked.
tf.random.truncated_normal([2, 2], mean=0, stddev=1)
<tf.Tensor: shape=(2, 2), dtype=float32, numpy=
array([[-0.80775446,  0.7858228 ],
       [-0.13292529, -0.5749178 ]], dtype=float32)>

Uniform

tf.random.uniform([2, 2], minval=0, maxval=1)
<tf.Tensor: shape=(2, 2), dtype=float32, numpy=
array([[0.36148584, 0.7097454 ],
       [0.68507016, 0.38183832]], dtype=float32)>

Random Permutation

idx = tf.range(10)
idx = tf.random.shuffle(idx)
idx 
<tf.Tensor: shape=(10,), dtype=int32, numpy=array([5, 6, 8, 3, 9, 2, 1, 0, 7, 4])>
a = tf.random.normal([10, 28*28])
b = tf.random.uniform([10], maxval=10, dtype=tf.int32)
b
<tf.Tensor: shape=(10,), dtype=int32, numpy=array([1, 6, 1, 8, 6, 3, 4, 4, 2, 3])>
a = tf.gather(a, idx)
tf.gather(b, idx)
<tf.Tensor: shape=(10,), dtype=int32, numpy=array([3, 4, 2, 8, 3, 1, 6, 1, 4, 6])>

Scalar

loss, accuracy

data = tf.random.uniform([4, 10])
data
<tf.Tensor: shape=(4, 10), dtype=float32, numpy=
array([[0.38977015, 0.30231702, 0.8459405 , 0.73039913, 0.45134795,
        0.09178519, 0.73915493, 0.8408618 , 0.17082012, 0.826511  ],
       [0.6355922 , 0.9059446 , 0.10259736, 0.0463959 , 0.11150622,
        0.81397057, 0.27795923, 0.44129503, 0.48229563, 0.6907159 ],
       [0.004933  , 0.3695724 , 0.8248651 , 0.6683624 , 0.11724544,
        0.48900843, 0.25385535, 0.18276036, 0.02643847, 0.5070598 ],
       [0.64495564, 0.37315404, 0.88732004, 0.01456606, 0.24429095,
        0.10366774, 0.5600475 , 0.33780813, 0.13967168, 0.15355384]],
      dtype=float32)>
label = tf.range(4)
label = tf.one_hot(label, depth=10)
label
<tf.Tensor: shape=(4, 10), dtype=float32, numpy=
array([[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
       [0., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
       [0., 0., 1., 0., 0., 0., 0., 0., 0., 0.],
       [0., 0., 0., 1., 0., 0., 0., 0., 0., 0.]], dtype=float32)>
loss = tf.keras.losses.mse(data, label)
loss
<tf.Tensor: shape=(4,), dtype=float32, numpy=array([0.38907123, 0.20821846, 0.12225167, 0.2854908 ], dtype=float32)>
loss = tf.reduce_mean(loss)
loss
<tf.Tensor: shape=(), dtype=float32, numpy=0.25125805>

Vector

Weight, Bias

from tensorflow.keras import layers
net = layers.Dense(10)
net.build((4, 8))  # batch = 4
net.kernel  # Weight
<tf.Variable 'kernel:0' shape=(8, 10) dtype=float32, numpy=
array([[-0.4179684 , -0.06267828, -0.45666292,  0.326221  ,  0.31872034,
        -0.1694096 ,  0.5285542 ,  0.37876195, -0.3375371 , -0.5454484 ],
       [ 0.4007795 ,  0.3538822 ,  0.41432947, -0.2941674 , -0.3806044 ,
        -0.3627179 , -0.44102082, -0.17222813,  0.3040642 , -0.04760015],
       [ 0.01388901,  0.20185792,  0.04658937, -0.08713791, -0.18877614,
         0.34765035, -0.4080443 ,  0.22766227,  0.5752988 ,  0.43546546],
       [-0.13318878,  0.32705832, -0.04608393,  0.34649134, -0.46813822,
         0.35031474,  0.18447495, -0.10142335, -0.00638908,  0.08682185],
       [ 0.16860312, -0.15874386,  0.34065145,  0.2607547 , -0.07928783,
        -0.2534556 ,  0.57035196, -0.40059972,  0.41721088,  0.13721591],
       [ 0.32329136, -0.09107184, -0.0579837 , -0.1746428 ,  0.45514572,
        -0.09799445, -0.30792797,  0.01333565,  0.22302413, -0.49811006],
       [-0.16520065, -0.27170965, -0.52430135,  0.4587494 , -0.2788463 ,
        -0.54026115, -0.5256784 , -0.29682723, -0.077066  ,  0.33198535],
       [-0.30735588,  0.0420959 ,  0.29359698,  0.4672147 , -0.5655383 ,
        -0.22525173,  0.44292676, -0.22308359, -0.33041078,  0.21447903]],
      dtype=float32)>
net.bias  # Bias
<tf.Variable 'bias:0' shape=(10,) dtype=float32, numpy=array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], dtype=float32)>

Matrix

x = tf.random.normal([4, 784])
x
<tf.Tensor: shape=(4, 784), dtype=float32, numpy=
array([[-1.2882563 , -2.5096843 , -0.51790476, ..., -0.7876267 ,
         0.33790225,  0.41124803],
       [-2.6235921 , -0.73433775,  0.32151344, ...,  0.00425905,
        -0.22845691,  0.5476811 ],
       [-0.2859343 ,  0.5680826 , -0.49395   , ..., -0.52301675,
        -1.1615732 ,  0.19866285],
       [-0.3445326 ,  1.204968  ,  1.0537173 , ...,  1.9456455 ,
         0.33143657, -1.3287663 ]], dtype=float32)>
net = layers.Dense(10)
net.build((4, 784))
net(x).shape
TensorShape([4, 10])
net.kernel.shape
TensorShape([784, 10])
net.bias.shape
TensorShape([10])

猜你喜欢

转载自blog.csdn.net/qq_40326280/article/details/113616400
今日推荐