The source code of Li Hongyi's machine learning-pockmon demo is implemented as follows, and the personal understanding of the relevant code is given in the comments.
import numpy as np import matplotlib.pyplot as plt x_data = [338., 333., 328., 207., 226., 25., 179., 60., 208., 606.] y_data = [640., 633., 619., 393., 428., 27., 193., 66., 226., 1591.] # y_data = b + w * x_data x = np.arange(-200, -100, 1) # bias y = np.arange(-5, 5, 0.1) # weight z = np.zeros((len(x), len(y))) # The zeros function indicates that the output array is 301 rows and 101 columns X, Y = np.meshgrid(x, y) # Expand the matrix, X is expanded to a horizontal vector matrix of 11*301, and Y is expanded to a column vector matrix of 101*301 for i in range(len(x)): for j in range(len(y)): b = x[i] w = y[j] z[j][i] = 0 for n in range(len(x_data)): z[j][i] = z[j][i] + (y_data[n] - b - w*x_data[n])**2 # z[j][i] is b=x[i] and When w=y[j], the size of the corresponding Loss Function z[j][i] = z[j][i]/len(x_data) # training set bias value for a single data # ydata = b + w * xdata b = -120 # initial b w = -4 # initial w lr = 1 # learning rate iteration = 100000 # The number of iterations to run # store initial values for plotting b_history = [b] w_history = [w] lr_b = 0 # Assign different learning rate values to b and w respectively lr_w = 0 # iterations for i in range(iteration): # After 100000 iterations, see the final result b_grad = 0.0 # reassign b_grad to 0 w_grad = 0.0 # reassign w_grad to 0 for n in range(len(x_data)): # It should be noted here that the derivation is the L function, so the corresponding variables are w, b, which is to see the movement of w and b on their respective axes # Therefore, x_data, y_data are only data, be sure to distinguish! ! ! b_grad = b_grad - 2.0*(y_data[n] - b - w*x_data[n])*1.0 w_grad = w_grad - 2.0*(y_data[n] - b - w*x_data[n])*x_data[n] # Because these two values are the sum of the sum of the squares of b_grad and w_grad, it is convenient to apply the following Adagrad method lr_b = lr_b + b_grad ** 2 lr_w = lr_w + w_grad ** 2 # update parameters b = b - lr/np. sqrt(lr_b) * b_grad # The Adagrad method is used here (as the number of iterations increases, lr will become smaller and smaller) w = w - lr/np. sqrt(lr_w) * w_grad # This can find minima more efficiently # store parameters for plotting b_history.append(b) w_history.append(w) # plot the figure plt.contourf(x, y, z, 50, alpha=0.5, cmap=plt.get_cmap('jet')) plt.plot([-188.4], [2.67], 'x', ms=12, markeredgewidth=3, color='orange') plt.plot(b_history, w_history, 'o-', ms=3, lw=1.5, color='black') plt.xlim(-200, -100) plt.ylim(-5, 5) plt.xlabel(r'$b$', fontsize=16) plt.ylabel(r'$w$', fontsize=16) plt.show()