Linear regression --Python code implementation

import numpy as np

def computer_error_for_give_point (w, b, points ): # error is calculated between the observed and calculated values, and accumulate, and finally returns the average error
Loss = 0
for I in Range (len (Points)):
X = Points [I, 0]
Y = Points [I,. 1]
Loss + = ((W * X + B) - Y) ** 2
return Loss / a float (len (Points))

# Just below guide update function w and b, later iteration procedure can be updated several times w, b

def get_gradient(w_current, b_current, points, LearningRate):
N = len(points)
w_gradient = 0
b_gradient = 0
for i in range(N):
x = points[i, 0]
y = points[i, 1]
w_gradient += 2/N * ((w_currentx+b_current)-y)x
b_gradient += 2/N * ((w_currentx+b_current)-y)
new_w = w_current - LearningRate
w_gradient
new_b = b_current - LearningRate * b_gradient
return new_w,new_b # 以列表的形式返回

def gradeient_descent_run(w, b, points, learn_rate, iteration):
points = np.array(points)
LearnRate = learn_rate
for i in range(iteration):
w, b = get_gradient(w, b, points, LearnRate)
return w, b

if name == "main":
initialize_w = 0
initialize_b = 0
points = [[10, 10], [9, 9], [8, 8], [7, 7], [6, 6], [5, 5], [4, 4], [3, 3], [2, 2], [1, 1]]
w, b = gradeient_descent_run(initialize_w, initialize_b, points, 0.005, 100)
print(w)
print(b)

Guess you like

Origin www.cnblogs.com/Salted-fish-turn-over/p/11448221.html