第一部分
import sklearn.datasets as datasets
from sklearn.model_selection import train_test_split
import numpy as np
import matplotlib.pyplot as plt
f = lambda x: (x - 4.3) ** 2 + 7.5 * x + 6
g = lambda x: 2 * x - 1.1
v_min = np.random.randint(-10, 10, size=1)[0]
v_min_last = v_min + 1
precision = 0.0001
step = 0.01
max_time = 3000
count = 0
print("随机生成的最小值:--------------------------", v_min)
while True:
if np.abs(v_min - v_min_last) < precision:
break
if count > max_time:
break
v_min_last = v_min
v_min = v_min - g(v_min) * step
print("-----------------------------梯度下降更新的最小值", v_min)
count += 1
第二部分
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
X = np.linspace(2.5, 12, 25)
w = np.random.randint(2, 10, size=1)[0]
b = np.random.randint(-5, 5, size=1)[0]
y = X * w + b + np.random.randn(25) * 2
lr = LinearRegression(fit_intercept=True)
lr.fit(X.reshape(-1, 1), y)
w_ = lr.coef_
b_ = lr.intercept_
class Linear_model(object):
def __init__(self):
self.w = np.random.randn(1)[0]
self.b = np.random.randn(1)[0]
print(f"------------------------起始的斜率{self.w},起始的截距{self.b}")
def model(self, x):
return self.w * x + self.b
def loss(self, x, y):
cost = (y - self.model(x)) ** 2
g_w = 2 * (y - self.model(x)) * (-x)
g_b = 2 * (y - self.model(x)) * (-1)
return g_w, g_b
def gradient_descend(self, g_w, g_b, step=0.01):
self.w = self.w - g_w * step
self.b = self.b - g_b * step
print(f"------------------------当前的斜率{self.w},起始的截距{self.b}")
def fit(self, X, y):
w_last = self.w + 1
b_last = self.b + 1
precision = 0.00001
max_count = 3000
count = 0
while True:
if (np.abs(self.w - w_last) < precision) and (np.abs(self.b - b_last), precision):
break
if count > max_count:
break
g_w = 0
g_b = 0
size = X.shape[0]
for i, j in zip(X, y):
g_w += self.loss(i, j)[0] / size
g_b += self.loss(i, j)[1] / size
self.gradient_descend(g_w, g_b)
count += 1
def coef_(self):
return self.w
def intercepe(self):
return self.b
lm = Linear_model()