How is linear regression with TensorFlow

 

Environmental Anaconda

Ado, the key to the code

 

tensorflow TF AS Import 
Import OS 
os.environ [ 'TF_CPP_MIN_LOG_LEVEL'] = '2' 

tf.app.flags.DEFINE_integer ( "MAX_STEP", 300, "the number of steps of the training model") 
the FLAGS = tf.app.flags.FLAGS 

DEF linear_regression (): 
    '' ' 
    from the linear regression implemented 
    : return: 
    ' '' 
    . # 100. 1 prepared sample characteristic values of X, the target y_true 

    with tf.variable_scope ( "original_data"): 
        #mean is the average 
        #stddev representative of variance 
        = tf.random_normal X-(Shape = (100,1), Mean = 0, = STDDEV. 1) 

        y_true tf.matmul = (X-, [[0.8]]) + 0.7 

    # 2 linear models were established:. 
    with tf.variable_scope ( "linear_model"):  
        Weigh = tf.Variable (initial_value = tf.random_normal (Shape = (. 1,1)))
        BIAS = tf.Variable (initial_value = tf.random_normal (Shape = (. 1,1)))

        = tf.matmul y_predict (X-, Weigh) + BIAS 

    # determines loss function. 3 
    # mean square error ((y-y_repdict) ^ 2 ) / m = average error of each sample 
    with tf.variable_scope ( "Loss"): 
        error tf.reduce_mean = (tf.square (y_predict-y_true)) 

    # gradient descent optimization loss. 4: specify the learning rate 
    with tf.variable_scope ( "gd_optimizer"): 
        Optimizer = tf.train.GradientDescentOptimizer (learning_rate = 0.01) .minimize ( error) 

    # collect variable 
    tf.summary.scalar ( "error", error) 
    tf.summary.histogram ( "weights", Weigh) 
    tf.summary.histogram ( "BIAS", BIAS) 

    # merge variable 
    merge = tf.summary. merge_all ()
 
    # initialize variables
    = tf.global_variables_initializer the init () 

    # create a Saver 
    Saver = tf.train.Saver () 
    # open training session 
    with tf.Session () AS sess: 
        # initialize variables OP 
        sess.run (the init) 
        Print ( "random initialization weight is {}, paranoid as {}. "the format (weigh.eval (), bias.eval ())) 

        # Print (weigh.eval (), bias.eval ()) 
        # saver.restore (Sess,." / checkpoint / linearregression ") 
        # Print (weigh.eval (), bias.eval ()) 
        # create a file event 
        file_writer = tf.summary.FileWriter (logdir =" ./ ", Graph = sess.graph) 
        # training model 

        for in Range I (FLAGS.max_step): 
            sess.run (Optimizer) 
            Summary sess.run = (Merge) 
            file_writer.add_summary (the Summary , I) 
            Print ( "first step {} {error}, {} is a weight, is paranoid {}". format (i, error.eval (), weigh.eval (), bias.eval ())) 
            #checkpoint:The checkpoint file
            #tf.keras:h5
            # saver.save(sess,"./checkpoint/linearregression")

if __name__ == '__main__':
    linear_regression()

  Partial results output:

Step 294 error 7.031372661003843e-06, a weight of [[0.7978232]], paranoia of [[0.69850117]] 
step 295 error 5.66376502320054e-06, a weight of [[0.7978593]], paranoia of [[0.6985256 ]] 
step 296 error 5.646746103593614e-06, a weight of [[0.7978932]], paranoia of [[0.698556]] 
step 297 error 5.33674938196782e-06, a weight of [[0.7979515]], paranoia of [ [0.69858944]] 
step 298 error 5.233380761637818e-06, a weight of [[0.79799336]], paranoia of [[0.6986183]] 
step 299 of error 5.024347956350539e-06, a weight of [[0.7980382]], paranoid of [[0.6986382]]

  

 

Guess you like

Origin www.cnblogs.com/LiuXinyu12378/p/11366803.html