tensorflow [2] Detailed -Variable

Variable primary role is to maintain a particular node status , such as the depth of learning model parameters

 

create

tf.Variable is the most common way to create a variable

class VariableV1(Variable):
    def __init__(self,  
           initial_value = None,         # - variable values 
           trainable = None,             # the variable need for training, or whether the optimizer can update 
           the Collections = None,
           validate_shape=True,
           caching_device=None,
           name = None,                  # - variable name 
           variable_def = None,
           dtype = None,                 # - variable type 
           expected_shape = None,
           import_scope=None,
           constraint=None,
           use_resource=None,
           synchronization=VariableSynchronization.AUTO,
           aggregation=VariableAggregation.NONE,
           Shape = None):                # - variable size 
        pass

is tf.Variable operation (OP) , the return value is Variable;

d1 = tf.Variable(2)
d2 = tf.Variable(3, dtype=tf.int32, name='int')
d3 = tf.Variable(4., dtype=tf.float32, name='float')
d4 = tf.add(d1, d2)
d5 of = D1 + D2
 # d6 = tf.add (D1, D3) ### different types of data operation is not 

the init = tf.global_variables_initializer ()         # ## variable must be initialized 

sess1 = tf.Session ()
sess1.run(init)
print(sess1.run(d4))        # 5
print(sess1.run(d5))        # 5
# print(sess1.run(d6))
print(type(d5))             # <class 'tensorflow.python.framework.ops.Tensor'>

 

Another way to create a variable tf.get_variable

d1 = tf.get_variable('d1', shape=[2, 3], initializer=tf.ones_initializer)
d2 = tf.get_variable('d2', shape=[3, 2], initializer=tf.zeros_initializer)
sess3 = tf.Session()
sess3.run(tf.global_variables_initializer())
print(sess3.run(d1))
# [[1. 1. 1.]
#  [1. 1. 1.]]
print(sess3.run(d2))

 

initialization

Variable must be initialized before participating in the calculation, in two ways

d1 = tf.Variable(1)
print(d1)       # <tf.Variable 'Variable:0' shape=() dtype=int32_ref>
init = tf.global_variables_initializer()
with tf.Session() as sess:
    sess.run(init)
    print (sess.run (d1))      # 1

###
d2 = tf.Variable(1)
with tf.Session() as sess:
    tf.global_variables_initializer().run()

If the initialization and then create a new variable, we need to re-initialize

 

Just before initiating node

 

Properties Methods

All the properties as follows Variable

['SaveSliceInfo', 'aggregation', 'assign', 'assign_add', 'assign_sub', 'batch_scatter_update', 'constraint', 'count_up_to', 'device', 'dtype', 'eval', 'from_proto', 'gather_nd', 'get_shape', 
'graph', 'initial_value', 'initialized_value', 'initializer', 'load', 'name', 'op', 'read_value', 'scatter_add', 'scatter_nd_add', 'scatter_nd_sub', 'scatter_nd_update', 'scatter_sub', 
'scatter_update', 'set_shape', 'shape', 'sparse_read', 'synchronization', 'to_proto', 'trainable', 'value']

 

variable name

Node name, variable name is explicitly set when creating the Variable name, the name is taken, and if not, incrementing places format subscript Variable_1

d1 = tf.Variable(tf.zeros(2,2))
d2 = tf.Variable(2., dtype=tf.float32, name='d2')
d3 = tf.Variable(3)
print(d1)               # <tf.Variable 'Variable:0' shape=() dtype=float32_ref>
print(d1.op.name)       # Variable_1
print(d2.op.name)       # d2
print(d3.op.name)       # Variable_2

 

Memory mechanism

Variable tf.Variable created as tensor can be used as input and output operations, except that:

1. tensor life cycle is usually dependent on the completion of the calculation of the ends, then release the memory

2. Variable permanent memory, synchronized with the updated calculation, the calculation does not end with the ends

d1 = tf.Variable(2.)
d2 = tf.constant(42.)
print(d2)       # Tensor("Const:0", shape=(), dtype=float32)
d3 = tf.assign_add(d1, 1.)
d4 = tf.add(d2, 1.)
with tf.Session() as sess:
    tf.global_variables_initializer().run()
    for I in Range (2 ):
         Print (sess.run (D3))      # 3.0, 4.0 ### continuously updates the state variable cycle, the memory is not released 
        Print (sess.run (D4))      # 43.0, 43.0 ### constant loop state is not updated, the real time release memory 
    Print (D1, sess.run (D1))      # <tf.Variable 'variable: 0' Shape = () = float32_ref DTYPE> holding state variable cycle ### after the end of 4.0, memory is not released 
    Print (D2, sess.run (D2))      # the Tensor ( "Const: 0", Shape = (), DTYPE = float32) 42.0 ### after the loop constant state recovery, memory release

Variable This feature commonly used in the iterative model parameters ;

 

Variable assignment

Assignment of variables can not be directly used =, there are several ways: tf.assign, var.assign, tf.assign_add

d1 = tf.Variable(2)
d2 = tf.Variable(3, dtype=tf.int32, name='int')
D3 = tf.Variable (. 4., DTYPE = tf.float32, name = ' a float ' )
 # # the method1 
# D4 = tf.assign (D2, D3) ### two variables consistent with the type of data 
d5 of = tf.assign (D2, D1)
 # variable d7 = tf.assign (d6, d3) ### is assigned must exist in advance 
# # method2 
D8 = d2.assign (100 )
 # # the method3: adding the number and assignment 
d9 = tf. assign_add (D2, 50 )

with tf.Session() as sess2:
    tf.global_variables_initializer().run()
    Print (sess2.run (d5 of))         # 2 is assigned a d5 of equal d2 
    Print (sess2.run (d2))         # 2 becomes the real d2 
    Print (sess2.run (D8))         # 100 
    Print (sess2. RUN (D9))         # 150 
    Print (sess2.run (tf.assign_add (D2,. 3)))   # 153 
    Print (sess2.run (tf.assign (D2,. 3)))   # . 3 
    Print (sess2.run (TF .assign (D2, D1))) # 2

 

trainable

trainable attribute specifies whether the variables involved in the training, or whether the optimizer can be updated , similar to PyTorch in requires_grad;

False representatives do not participate in the training, the default is True;

trainable for the read-only attribute takes effect only when you create a Variable, the latter can not be changed;

When creating minimize the tensor optimizer Optimizer, tf will all be trained to collect Variable trainable_variables , after which add or remove variables trainable, trainable_variables will not change;

 

= tf.Variable the X-(3.0, dtype = tf.float32, trainable = False)      # trainable of the X-## is F, does not participate in training 
the y-tf.Variable = (13.0, dtype = tf.float32)          # ## involved in the training 
train_op tf.train.AdamOptimizer = (0.01) .minimize (tf.abs (Y - X))
with tf.Session()as sess:
    sess.run(tf.global_variables_initializer())
    for i in range(5):
        _, XX, YY = sess.run ([train_op, x, y])
         Print ( ' Epoch ' , I, XX, YY)   # observe the change of x and y

# epoch 0 3.0 12.99
# epoch 1 3.0 12.98
# epoch 2 3.0 12.969999
# epoch 3 3.0 12.959999
# epoch 4 3.0 12.949999

 

tf.trainable_variables and tf.all_variables

tf.trainable_variables () returns a list of all the variables need to be trained

tf.all_variables () returns a list of all variables

v1 = tf.Variable(0, name='v1')
v2 = tf.Variable(tf.constant(5, shape=[1], dtype=tf.float32), name='v2')
global_step = tf.Variable (6, name = ' global_step ' , trainable = False)        # declaration is not training variable

for ele1 in tf.trainable_variables():
    print(ele1.name)
# v1:0
# v2:0

for ele2 in tf.all_variables():
    print(ele2.name)
# v1:0
# v2:0
# global_step:0

 

Variable saving and loading

Save and load need to create Saver object and then call Save to save and restore load

from tensorflow.core.protobuf import saver_pb2
class Saver(object):
    def __init__(self,
               var_list=None,
               reshape=False,
               sharded=False,
               max_to_keep=5,
               keep_checkpoint_every_n_hours=10000.0,
               name=None,
               restore_sequentially=False,
               saver_def=None,
               builder=None,
               defer_build=False,
               allow_empty=False,
               write_version=saver_pb2.SaverDef.V2,
               pad_step_number=False,
               save_relative_paths=False,
               filename=None):
        pass
    def save(self,
           sex,
           save_path,
           global_step=None,
           latest_filename=None,
           meta_graph_suffix="meta",
           write_meta_graph=True,
           write_state=True,
           strip_default_attrs=False,
           save_debug_info=False):
        pass
    def restore(self, sess, save_path):
        """Restores previously saved variables."""
        pass

 

Objects created Saver

var_list : saved or loaded variable, the parameter value of several forms:

  1. The default is None, that is, for all Variable
  2. list format, specified variable, variable naming defaults v1, v2 ...
  3. dict format, specify the name and variable

save method

save_path: the specified storage path, generally ckpt (checkpoint) end

global_step: Specifies the global stage, in fact, markers, usually with a number specifying the variable stored at what stage, after this number is located in filename, see specific examples

restore method

save_path: Note global_step when this path depends save

 

d1 = tf.Variable(1.)
d2 = tf.Variable(2., dtype=tf.float32, name='d2')
init = tf.global_variables_initializer()

# ## initialize the object Saver 
Saver = tf.train.Saver ()             # ## save all variables 
saver1 = tf.train.Saver ([d1, D2])        # ## List specified variable, the variable name defaults to v1 v2 incremental 
saver2 tf.train.Saver = ({ ' V1 ' : D1, ' V2 ' : D2})       # ## dict specified variable and variable name

with tf.Session() as sess:
    sess.run(init)
    # ## Save method saves variable 
    saver.save (Sess, ' ./var/all.ckpt ' )
    saver1.save (Sess, ' ./var/list.ckpt ' , global_step = 0)
     Print (saver2.save (Sess, ' ./var/dict.ckpt ' , = global_step. 1))       # ./var/dict. . 1-CKPT 
    sess.run (tf.assign_add (D2, 3.))      # again saved after changing ## 
    Print (sess.run (D2))      # 5.0

    # ## 1 loading variables: the same Saver, Sess 
    saver2.restore (Sess, ' ./var/dict.ckpt-1 ' )
     Print (sess.run (D2))      # 2.0 ### changed before loading value, indicating successfully saved

# ## loading variables 2: with a saver, different sess 
with tf.Session () AS sess:
    saver2.restore (sex, ' ./var/dict.ckpt-1 ' )
     print (sess.run (d2))      # 2.0

# ## load variables 3: Different saver, different Sess 
saver2 = tf.train.Saver ({ ' V2 ' : D2})
with tf.Session() as sess:
    saver2.restore (sex, ' ./var/dict.ckpt-1 ' )
     print (sess.run (d2))      # 2.0

Seen, save and load independent

 

The code was as follows variable Save

After the filename is added 1. global_step

2. save will generate four file data, index, meta, checkpoint

  • data: storing model parameters
  • meta: Calculation FIG storage
  • checkpoint: Path model record store, model_checkpoint_path represents the latest model storage path, all_model_checkpoint_paths on behalf of all models of the storage path

3. Save a maximum of nearly five times the storage

4. repeatedly save only one checkpoint

 

 

 

 

References:

https://www.cnblogs.com/weiyinfu/p/9973022.html tensorflow set dynamically trainable

Guess you like

Origin www.cnblogs.com/yanshw/p/12341295.html