tensorflow2.0 优势

版权声明:版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/z_feng12489/article/details/89209254

相比 tensorflow 1.x

  1. 调试简单
  2. API相对清晰
  3. 容易入门
  4. 趋势:大批研究人员者将转向tensorflow 2.x
  5. session.run
  6. tf.control_dependencies
  7. tf.global_variables_initializer
  8. tf.cond, tf.while_loop
'''
tensorflow 1.x
'''
import tensorflow as tf

a = tf.constant(7)
b = tf.constant(10)
c = tf.add(a, b)

simple_session = tf.session()
value_of_c = simple_session.run(c)
print (value_of_c)
simple_session.close()


'''
now
tensorflow 2.0
'''
import tensorflow as tf

a = tf.constant(7.)
b = tf.constant(10.)
c = tf.add(a, b)
print (c)

gpu 加速

import tensorflow as tf
import timeit


with tf.device('/cpu:0'):
	cpu_a = tf.random.normal([10000, 1000])
	cpu_b = tf.random.normal([1000, 2000])
	print(cpu_a.device, cpu_b.device)

with tf.device('/gpu:0'):
	gpu_a = tf.random.normal([10000, 1000])
	gpu_b = tf.random.normal([1000, 2000])
	print(gpu_a.device, gpu_b.device)

def cpu_run():
	with tf.device('/cpu:0'):
		c = tf.matmul(cpu_a, cpu_b)
	return c 

def gpu_run():
	with tf.device('/gpu:0'):
		c = tf.matmul(gpu_a, gpu_b)
	return c 


# warm up
cpu_time = timeit.timeit(cpu_run, number=10)
gpu_time = timeit.timeit(gpu_run, number=10)
print('warmup:', cpu_time, gpu_time)


cpu_time = timeit.timeit(cpu_run, number=10)
gpu_time = timeit.timeit(gpu_run, number=10)
print('run time:', cpu_time, gpu_time)
/job:localhost/replica:0/task:0/device:CPU:0 /job:localhost/replica:0/task:0/device:CPU:0
/job:localhost/replica:0/task:0/device:GPU:0 /job:localhost/replica:0/task:0/device:GPU:0
warmup: 2.1033559539999995 0.33675007299999926
run time: 2.0104764399999997 0.0006462219999985308

自动求导

import tensorflow as tf 


x = tf.constant(1.)
a = tf.constant(2.)
b = tf.constant(3.)
c = tf.constant(4.)


with tf.GradientTape() as tape:
	tape.watch([a, b, c])
	y = a**2 * x + b * x + c


[dy_da, dy_db, dy_dc] = tape.gradient(y, [a, b, c])
print(dy_da, dy_db, dy_dc)
tf.Tensor(4.0, shape=(), dtype=float32) tf.Tensor(1.0, shape=(), dtype=float32) tf.Tensor(1.0, shape=(), dtype=float32)

神经网络的Layers

神经网络API

  1. tf.matmul
  2. tf.nn.conv2d
  3. tf.nn.relu
  4. tf.nn.max_pool2d
  5. tf.nn.sigmoid
  6. tf.nn.softmax
  7. layers.Dense
  8. layers.Conv2D
  9. layers.SimpleRNN
  10. layers.LSTM
  11. layers.ReLU
  12. layers.MaxPool2D

猜你喜欢

转载自blog.csdn.net/z_feng12489/article/details/89209254