自学机器学习之logistic回归

基于logictic回归和sigmoid函数的分类,sigmoid函数很简单:f(x)= 1/(1+exp(-z))
其中最主要的就是回归系数的确定,
回归系数的确定就采用最优化的思想:比如牛顿法或者梯度下降(上升),
其中梯度下降就要用到高数中梯度和偏导数的概念,具体的数学可以参考高数书,下面直接上代码。

下面代码用python实现logistic回归,主要参考机器学习实战:

from math import  exp

from numpy import *
def loadDataset():

    dataMat = []
    labelMat = []
    fr =  open('testSet.txt')
    for line in fr.readlines():
        lineArr = line.strip().split()#删除每行首尾空格,并生成切片
        dataMat.append([1.0,float(lineArr[0]),float(lineArr[1])])
        labelMat.append(lineArr[2])


    return  dataMat,labelMat

def sigmoid(inx):
    return  1.0/(1+exp(-inx))#构造sigmoid函数

def gradAscrnt(dataMatIn,classLabelIs):
    dataMatrix = mat(dataMatIn)#把列表转换成矩阵
    labelMat = mat(classLabelIs).transpose()#变转换成矩阵并转置
    labelMat = labelMat.astype('float64')
    m,n = shape(dataMatrix)
    alpha = 0.001
    maxCycles = 500
    weights =ones((n,1))
    weights = weights.astype('float64')
    for k in range(maxCycles):
        h = sigmoid(dataMatrix*weights)
        error = (labelMat-h)
        weights = weights+alpha*dataMatrix.transpose()*error
        '''这里梯度上升算法并不是严格意义上的梯度上升,
        这里只是按照差值的方向调整回归系数'''

    return weights

下面画出决策边界

def plotBestFit(weights):
    import matplotlib.pyplot as plt
    dataMat,labelMat=loadDataset()
    dataArr = array(dataMat)
    n = shape(dataArr)[0]
    xcord1 = []; ycord1 = []
    xcord2 = []; ycord2 = []
    for i in range(n):
        if int(labelMat[i])== 1:
            xcord1.append(dataArr[i,1]); ycord1.append(dataArr[i,2])
        else:
            xcord2.append(dataArr[i,1]); ycord2.append(dataArr[i,2])
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.scatter(xcord1, ycord1, s=30, c='red', marker='s')
    ax.scatter(xcord2, ycord2, s=30, c='green')
    x = arange(-3.0, 3.0, 0.1)
    y = (-weights[0]-weights[1]*x)/weights[2]
    ax.plot(x, y)
    plt.xlabel('X1'); plt.ylabel('X2');
    plt.show()

plotBestFit(weights.getA())
'''改进后的随机梯度上升算法'''

def stocGradAscent1(dataMatrix, classLabels, numIter=150):
    m,n = shape(dataMatrix)
    weights = ones(n)
    for j in range(numIter):
        dataIndex = range(m)
        for i in range(m):
            alpha = 4/(1.0+j+i)+0.0001
            randIndex = int(random.uniform(0,len(dataIndex)))
            h = sigmoid(sum(dataMatrix[randIndex]*weights))
            error = classLabels[randIndex] - h
            weights = weights + alpha * error * dataMatrix[randIndex]
            del(dataIndex[randIndex])
    return weights

猜你喜欢

转载自blog.csdn.net/songjinxaing/article/details/79637632