4. Linear Regression

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
x_data = np.random.rand(100)
noise = np.random.normal(0,0.01,x_data.shape)
y_data = x_data*0.1 + 0.2 + noise

plt.scatter(x_data, y_data)
plt.show()

# Construction of a linear model 
D = tf.Variable (np.random.rand (. 1 )) 
K = tf.Variable (np.random.rand (. 1 )) 
Y = K * x_data + D 

# quadratic cost function 
Loss = TF .losses.mean_squared_error (y_data, Y)
 # define a gradient descent optimizer 
optimizer = tf.train.GradientDescentOptimizer (0.3 )
 # minimizing the cost function 
Train = optimizer.minimize (Loss) 

# initialize variables 
the init = tf.global_variables_initializer () 

tf.Session with () AS Sess: 
    sess.run (the init) 
    for I in Range (201 ): 
        sess.run (Train) 
        IF i%20==0:
            print(i,sess.run([k,d]))
    y_pred = sess.run(y)
    plt.scatter(x_data,y_data)
    plt.plot(x_data,y_pred,'r-',lw=3)
    plt.show()
0 [array([0.42558686]), array([0.07772181])]
20 [array([0.24686251]), array([0.1212207])]
40 [array([0.17103131]), array([0.16282419])]
60 [array([0.13410329]), array([0.18308412])]
80 [array([0.1161202]), array([0.19295024])]
100 [array([0.10736286]), array([0.1977548])]
120 [array([0.10309823]), array([0.20009452])]
140 [array([0.10102146]), array([0.2012339])]
160 [array([0.10001012]), array([0.20178875])]
180 [array([0.09951763]), array([0.20205895])]
200 [array([0.09927779]), array([0.20219054])]

Guess you like

Origin www.cnblogs.com/liuwenhua/p/11605338.html