OLS

import numpy as np
import time
import matplotlib.pyplot as plt

rng = np.random.RandomState(1234)
x = 1 * rng.rand(100, 3)

y = x.dot([[1], [5], [9]])
err = rng.randn(100, 1)
y = y + err

n_sample = x.shape[0]
n_feature = x.shape[1]

print(n_sample, n_feature)
plt.plot(x, y, ".b")


#################  Model  ################
def model(X, theta):
    theta.shape = (n_feature, 1)
    return X.dot(theta)


################  Cost function  #########
def cost(X, y, theta):
    tmp = (model(X, theta) - y).T.dot(model(X, theta) - y)
    return (1 / (2 * n_sample)) * tmp


###############  Gradients  ################
def gradient(X, y, theta):
    grad = np.zeros([n_feature,1])
    grad = (1.0/n_sample)*(X.T.dot(model(X, theta) - y))
    # for j in range(n_feature):
    #     grad[j] = (1/n_sample) * (model(X,theta) - y).T.dot(X[:,j])

    return grad


############# update_theta  ###############
def update_theta(theta, grad, sigma):
    return (theta - sigma * grad)


############ stop_strategy #################
def stop_strategy(cost, update_cost, threshold):
    return (cost - update_cost) < threshold


################# OLS ######################
def OLS(X, y, sigma, threshold):
    theta = np.array([1, 2, 3])
    counter = 0
    while (1):
        J = cost(X, y, theta)
        grad = gradient(X, y, theta)
        print(grad)
        theta_new = update_theta(theta, grad, sigma)
        J_update = cost(X, y, theta_new)
        stop = stop_strategy(J, J_update, threshold)
        if (stop):
            break

        theta = theta_new
        counter = counter + 1

    return theta,counter


theta,counter= OLS(x, y, 0.2, 0.0001)
print(theta)
print(counter)


猜你喜欢

转载自blog.csdn.net/u010016056/article/details/80727648
OLS