一元线性回归模型与多元线性回归模型

"""
《深度学习与图像识别原理实践》
"""
import numpy as np
class SimpleLinearRegressionSelf:
    def __init__(self):
        """
        初始化Simple Linear regression 模型
        """
        self.a_=None
        self.b_=None

    def fit(self,x_train,y_train):
        assert x_train.ndim==1,"一元线性回归模型仅仅处理向量,而不能处理矩阵"
        x_mean=np.mean(x_train)
        y_mean=np.mean(y_train)
        denominator=0.0
        numerator=0.0
        for x_i,y_i in zip(x_train,y_train):
            numerator+=(x_i-x_mean)*(y_i-y_mean)
            denominator+=(x_i-x_mean)**2
        self.a_=numerator/denominator
        self.b_=y_mean-self.a_*x_mean
        return self

    def predicit(self,x_test_group):
        return np.array([self._predict(x_test) for x_test in x_test_group])

    def _predict(self,x_test):
        return self.a_*x_test+self.b_

    def mean_squared_error(self,y_true,y_predict):
        return np.sum((y_true-y_predict)**2)/len(y_true)

    def r_square(self,y_true,y_predict):
        return 1-(self.mean_squared_error(y_true,y_predict))/np.var(y_true)


if __name__ == '__main__':
    x=np.array([1,2,4,6,8])
    y=np.array([2,5,7,8,9])

    lr=SimpleLinearRegressionSelf()
    lr.fit(x,y)
    print(lr.predicit([7]))
    print(lr.r_square([8,9],lr.predicit([6,8])))

多元线性回归模型

import numpy as np
from numpy import linalg

class MLinearRegression:
    def __init__(self):
        self.coef_=None
        self.interception_=None
        self._theta=None

    def fit(self,X_train,y_train):
        """
        :param X_train: 矩阵X大写
        :param y_train: 向量y小写
        :return: self
        """
        assert X_train.shape[0]==y_train.shape[0],"训练集的矩阵行数与标签的函数保持一致"
        ones=np.ones((X_train.shape[0],1))
        #将X矩阵转为X_b矩阵,其中第一列为1,其余不变
        X_b=np.hstack((ones,X_train))
        self._theta=linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y_train)
        self.interception_=self._theta[0]
        self.coef_=self._theta[1:]

        return self
    def predict(self,X_predict):
        ones=np.ones((X_predict.shape[0],1))
        X_b=np.hstack((ones,X_predict))
        return X_b.dot(self._theta)

    def mean_squared_error(self,y_true,y_predict):
        return np.sum((y_true-y_predict)**2)/len(y_true)

    def score(self,X_test,y_test):
        y_predict=self.predict(X_test)
        return 1-(self.mean_squared_error(y_test,y_predict)/(np.var(y_test)))



猜你喜欢

转载自blog.csdn.net/qq_41251963/article/details/108470305