梯度下降法实现最简单线性回归问题python实现

梯度下降法是非常常见的优化方法,在神经网络的深度学习中更是必会方法,但是直接从深度学习去实现,会比较复杂。本文试图使用梯度下降来优化最简单的LSR线性回归问题,作为进一步学习的基础。

import numpy as np
import pandas as pd
from numpy import *
from pandas import *
import matplotlib.pyplot as plt


x = np.array([[1,2],[2,1],[3,2.5],[4,3],
              [5,4],[6,5],[7,2.7],[8,4.5],
              [9,2]])

m, n = np.shape(x)
x_data = np.ones((m,n))
x_data[:,:-1] = x[:,:-1]
y_data = x[:,-1]

print(x_data.shape)
print(y_data.shape)
m, n = np.shape(x_data)
theta = np.ones(n)


def batchGradientDescent(maxiter,x,y,theta,alpha):
    xTrains = x.transpose()
    for i in range(0,maxiter):
        hypothesis = np.dot(x,theta)
        loss = (hypothesis-y)
        gradient = np.dot(xTrains,loss)/m
        theta = theta - alpha * gradient
        cost = 1.0/2*m*np.sum(np.square(np.dot(x,np.transpose(theta))-y))
        print("cost: %f"%cost)
    return theta

result = batchGradientDescent(10,x_data,y_data,theta,0.01)
print(result)
newy = np.dot(x_data,result)
fig, ax = plt.subplots()
ax.plot(x[:,0],newy, 'k--')
ax.plot(x[:,0],x[:,1], 'ro')
plt.show()
print("final: " + result)

猜你喜欢

转载自www.cnblogs.com/kidsitcn/p/9889106.html
今日推荐