逻辑回归算法的一种实现

逻辑回归推导式

"""逻辑回归算法的一种实现__1"""
import numpy as np
import matplotlib.pyplot as plt

"""加载数据集,将数据集中两列数据分别保存到datamat和labelmat"""
def loadDataSet():   
    dataMat = []
    labelMat = []
    fr = open('/home/jerry/文档/testset.csv')
    for line in fr.readlines():
        lineArr = line.strip().split()
        dataMat.append([1.0,float(lineArr[0]),float(lineArr[1])])
        labelMat.append(int (lineArr[2]))
    fr.close()
    return dataMat,labelMat

"""求解最佳拟合的一组回归系数"""
def gradAscent(dataMatIn,classLables):
    dataMatrix = mat(dataMatIn)
    labelMat = mat(classLables).transpose()
    m,n = shape(dataMatrix)
    alpha = 0.001
    maxCycles = 500
    weights = ones((n,1))
    for k in range(maxCycles):
        h = sigmoid(dataMatrix*weights)
        error = (labelMat - h)
        weights = weights + alpha * dataMatrix.transpose()*error
    return weights.getA()

"""绘制决策边界"""
def plotBestFit(weights):
    dataMat,labelMat = loadDataSet()
    dataArr = np.array(dataMat)  #将datamat转换为numpy数组
    n = np.shape(dataMat)[0]
    xcord1 = [];ycord1 = []  #正样本
    xcord2 = [];ycord2 = []  #负样本
    for i in range(n):
        if int(labelMat[i]) == 1:
            xcord1.append(dataArr[i,1])
            ycord1.append(dataArr[i,2])
        else:
            xcord2.append(dataArr[i,1])
            ycord2.append(dataArr[i,2])
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.scatter(xcord1, ycord1, s = 20, c = 'red', marker = 's', alpha = 0.5)
    ax.scatter(xcord2, ycord2, s = 20, c = 'green', alpha = 0.5)
    x = np.arange(-3.0, 3.0, 0.1)
    y = (-weights[0] - weights[1]*x) / weights[2]
    ax.plot(x,y)
    plt.title('BestFit')
    plt.xlabel('x1')
    plt.ylabel('x2')
    plt.show()
    
if __name__ == '__main__':
    dataMat,labelMat = loadDataSet()
    weights = gradAscent(dataMat,labelMat)
    plotBestFit(weights)

猜你喜欢

转载自blog.csdn.net/jerry_kuan/article/details/83307044