nan problem in tensorflow training

        The training of the network in deep learning is a process of parameter updating. It should be noted that when the input data is not normalized, if the forward propagation result is already [0, 0, 0, 1, 0, 0, 0, 0] in this form, and the real result is [1,0,0,0,0,0,0,0,0], at this time, because the conclusion drawn is not afraid of probability, but a wrong estimate, At this time, backpropagation will make the weight and bias values ​​infinite, resulting in data overflow and the problem of nan.

Solution:

1. Normalize the input data, such as dividing the input image data by 255 to convert it into data between 0-1;

2. For a large number of layers, batch_nomorlization is performed on each layer;

3. Use tf.truncated_normal(0, 0.01, [3,3,1,64]) to set the Weights weight, and the mean of the value is 0, and the variance is smaller;

4. The activation function can use tanh;

5. Reduce the learning rate lr.

Example:

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets('data',one_hot = True)

def add_layer(input_data,in_size, out_size,activation_function=None):
    Weights = tf.Variable(tf.random_normal([in_size,out_size]))
    Biases = tf.Variable(tf.zeros([1, out_size])+0.1)
    Wx_plus_b = tf.add(tf.matmul(input_data, Weights), Biases)
    if activation_function==None:
        outputs = Wx_plus_b
    else:
        outputs = activation_function(Wx_plus_b)
    #return outputs#, Weights
    return {'outdata':outputs, 'w':Weights}

def get_accuracy(t_y):
#    global l1
#    accu = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(l1['outdata'],1),tf.argmax(t_y,1)), dtype = tf.float32))
    global prediction
    accu = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(prediction['outdata'],1),tf.argmax(t_y,1)), dtype = tf.float32))
    return accu

X = tf.placeholder(tf.float32, [None, 784])
Y = tf.placeholder(tf.float32, [None, 10])

#l1 = add_layer(X, 784, 10, tf.nn.softmax)
#cross_entropy = tf.reduce_mean(-tf.reduce_sum(Y*tf.log(l1['outdata']), reduction_indices= [1]))
#l1 = add_layer(X, 784, 1024, tf.nn.relu)

l1 = add_layer(X, 784, 1024, None)
prediction = add_layer(l1['outdata'], 1024, 10, tf.nn.softmax)
cross_entropy = tf.reduce_mean(-tf.reduce_sum(Y*tf.log(prediction['outdata']), reduction_indices= [1]))

optimizer = tf.train.GradientDescentOptimizer(0.000001)
train = optimizer.minimize(cross_entropy)


newW = tf.Variable(tf.random_normal([1024,10]))
newOut = tf.matmul(l1['outdata'],newW)
newSoftMax = tf.nn.softmax(newOut)

init = tf.global_variables_initializer()
with tf.Session() as sess:
    sess.run(init)
    #print(sess.run(l1_Weights))
    for i in range(2):
        X_train, y_train = mnist.train.next_batch(1)
        X_train = X_train/255   #需要进行归一化处理
        #print(sess.run(l1['w'],feed_dict={X:X_train}))
        #print(sess.run(prediction['w'],feed_dict={X:X_train, Y:y_train}))
        #print(sess.run(l1['outdata'],feed_dict={X:X_train, Y:y_train}).shape)
        print(sess.run(prediction['outdata'],feed_dict={X:X_train, Y:y_train}))
        print(sess.run(newOut, feed_dict={X:X_train}))
        print(sess.run(newSoftMax, feed_dict={X:X_train}))
        print(y_train)
        #print(sess.run(l1['outdata'], feed_dict={X:X_train}))
        sess.run(train, feed_dict={X:X_train, Y:y_train})
        if i%100 == 0:
            #print(sess.run(cross_entropy, feed_dict={X:X_train, Y:y_train}))
            accuracy = get_accuracy(mnist.test.labels)
            print(sess.run(accuracy,feed_dict={X:mnist.test.images}))
        
        #if i%100==0:
        #print(sess.run(prediction, feed_dict={X:X_train}))
        #print(sess.run(cross_entropy, feed_dict={X:X_train,Y:y_train}))

Guess you like

Origin http://43.154.161.224:23101/article/api/json?id=324969920&siteId=291194637