支持向量机实战(四)核函数实现---机器学习

版权声明:转载请注明出处: https://blog.csdn.net/weixin_42398658/article/details/83375864

这节和上一节很像,不同的是,上一篇的是通过支持向量和待分类数据內积进行分类的,只是这里不同的是,在计算內积时使用核函数进行代替,这里参考的是机器学习实战中的核函数,如果前面理解的比较深入,读代码还是很简单的,这里的代码建议不要刚开始就去读核函数定义,建议先从测试核函数的代码读,然后搞清楚核函数的参数都代表什么,同时想想作者为什么这样使用,刚开始理解这些可能有点难,多读几遍,在读代码时建议时刻想着前面的理论,因为代码就是按照前面的理论思路进行写的,最后就是尝试自己写一下,下面的代码关键的地方均已注释,上代码:

先把结果贴出来:

fullSet, iter: 8 i:97, pairs changed 0
fullSet, iter: 8 i:98, pairs changed 0
fullSet, iter: 8 i:99, pairs changed 0
iteration number: 9
there are 18 Support Vectors
the training error rate is: 0.000000
the test error rate is: 0.050000

源码: 

#!/usr/bin/env/python
# -*- coding: utf-8 -*-
# Author: 赵守风
# File name: kernelfunction.py
# Time:2018/10/25
# Email:[email protected]
from numpy import *
import numpy as np
from SMO_simplify import *

# 其实这里看定义函数有点不好理解,虽然理论大家都知道,但是实现方式可以由很多种,大家可以先看看这个函数是
# 如何调用的即核函数测试,就知道这个函数的参数的意义,下面我把测试关键代码拿过来便于分析
'''
dataArr,labelArr = loadDataSet('testSetRBF.txt') 
b,alphas = smoP(dataArr, labelArr, 200, 0.0001, 10000, ('rbf', k1)) #C=200 important
datMat=mat(dataArr); labelMat = mat(labelArr).transpose()
svInd=nonzero(alphas.A>0)[0]
sVs=datMat[svInd] #get matrix of only support vectors
labelSV = labelMat[svInd];
for i in range(m):
        kernelEval = kernelTrans(sVs,datMat[i,:],('rbf', k1))
        predict=kernelEval.T * multiply(labelSV,alphas[svInd]) + b
        if sign(predict)!=sign(labelArr[i]): errorCount += 1
'''
# kernelEval = kernelTrans(sVs,datMat[i,:],('rbf', k1))
# 从这一句我们看到传入的第一个参数X就是sVs,往上追溯可知这是参与內积的训练数据点即支持向量
# 第二个参数A,传入的是datMat[i:0],传入的是一条数据,即待分类的一个数据,原因是每个待分类的数据都需要和支持向量相內积
# kTup[0]第一个元素是选择核函数类型的参数,第二个kTup[1]就更简单了,其实就是径向基的那个参数,大家看这个就懂了
#   K = exp(K / (-1 * kTup[1] ** 2))
def kernelTrans(X, A, kTup):  # calc the kernel or transform data to a higher dimensional space
    m, n = np.shape(X)
    K = np.mat(np.zeros((m, 1)))
    if kTup[0] == 'lin':
        K = X * A.T  # linear kernel
    elif kTup[0] == 'rbf':
        for j in range(m):
            deltaRow = X[j, :] - A
            K[j] = deltaRow * deltaRow.T
        K = np.exp(K / (-1 * kTup[1] ** 2))  # divide in NumPy is element-wise not matrix like Matlab
    else:
        raise NameError('Houston We Have a Problem -- \
    That Kernel is not recognized')
    return K


class optStruct:
    def __init__(self, dataMatIn, classLabels, C, toler, kTup):  # Initialize the structure with the parameters
        self.X = dataMatIn
        self.labelMat = classLabels
        self.C = C
        self.tol = toler
        self.m = np.shape(dataMatIn)[0]
        self.alphas = np.mat(zeros((self.m, 1)))
        self.b = 0
        self.eCache = np.mat(zeros((self.m, 2)))  # first column is valid flag
        self.K = np.mat(zeros((self.m, self.m)))
        for i in range(self.m):  # 调用核函数处理
            self.K[:, i] = kernelTrans(self.X, self.X[i, :], kTup)


def calcEk(oS, k):
    fXk = float(np.multiply(oS.alphas, oS.labelMat).T * oS.K[:, k] + oS.b)
    Ek = fXk - float(oS.labelMat[k])
    return Ek


# 内循环调选第二个alpha的策略
def selectJ(i, oS, Ei):  # this is the second choice -heurstic, and calcs Ej
    maxK = -1;
    maxDeltaE = 0;
    Ej = 0
    oS.eCache[i] = [1, Ei]  # set valid #choose the alpha that gives the maximum delta E
    validEcacheList = np.nonzero(oS.eCache[:, 0].A)[0] # 这个大家还记的啥意思吧,不解释了,不会的翻看前面的看
    if (len(validEcacheList)) > 1:
        for k in validEcacheList:  # loop through valid Ecache values and find the one that maximizes delta E
            if k == i: continue  # don't calc for i, waste of time
            Ek = calcEk(oS, k)
            deltaE = np.abs(Ei - Ek)
            if (deltaE > maxDeltaE):
                maxK = k
                maxDeltaE = deltaE
                Ej = Ek
        return maxK, Ej
    else:  # in this case (first time around) we don't have any valid eCache values
        j = selectJrand(i, oS.m)
        Ej = calcEk(oS, j)
    return j, Ej


def updateEk(oS, k):  # after any alpha has changed update the new value in the cache
    Ek = calcEk(oS, k)
    oS.eCache[k] = [1, Ek]

# 内循环
def innerL(i, oS):
    Ei = calcEk(oS, i)  # 和前面的一样,Ei为第一个alpha的差值
    if ((oS.labelMat[i] * Ei < -oS.tol) and (oS.alphas[i] < oS.C)) or (
            (oS.labelMat[i] * Ei > oS.tol) and (oS.alphas[i] > 0)):
        j, Ej = selectJ(i, oS, Ei)  # 调选第二个参数
        alphaIold = oS.alphas[i].copy()
        alphaJold = oS.alphas[j].copy()
        # 加入约束
        if (oS.labelMat[i] != oS.labelMat[j]):
            L = max(0, oS.alphas[j] - oS.alphas[i])
            H = min(oS.C, oS.C + oS.alphas[j] - oS.alphas[i])
        else:
            L = max(0, oS.alphas[j] + oS.alphas[i] - oS.C)
            H = min(oS.C, oS.alphas[j] + oS.alphas[i])
        if L == H:
            print("L==H")
            return 0
        # 这里就开始使用核函数了,2K12-K11-K22,这些都是核函数,在这里他就是通过调用核函数,而以前的是直接数据內积
        eta = 2.0 * oS.K[i, j] - oS.K[i, i] - oS.K[j, j]  # changed for kernel
        if eta >= 0:
            print("eta>=0")
            return 0
        # 更新alpha值
        oS.alphas[j] -= oS.labelMat[j] * (Ei - Ej) / eta
        oS.alphas[j] = clipAlpha(oS.alphas[j], H, L)
        updateEk(oS, j)  # 更新缓存
        if (abs(oS.alphas[j] - alphaJold) < 0.00001):
            print("j not moving enough")
            return 0
        # 更新另外一个alpha
        oS.alphas[i] += oS.labelMat[j] * oS.labelMat[i] * (alphaJold - oS.alphas[j])  # update i by the same amount as j
        updateEk(oS, i)  # 更新缓存                    #the update is in the oppostie direction
        # 计算b值,同时计算b值也使用到了核函数,大家看代码的时候,时刻回忆前面的理论分析过程,每一句代码都和理论保持同步
        # 这样多看,多理解几次,思路就顺了
        b1 = oS.b - Ei - oS.labelMat[i] * (oS.alphas[i] - alphaIold) * oS.K[i, i] - oS.labelMat[j] * (
                    oS.alphas[j] - alphaJold) * oS.K[i, j]
        b2 = oS.b - Ej - oS.labelMat[i] * (oS.alphas[i] - alphaIold) * oS.K[i, j] - oS.labelMat[j] * (
                    oS.alphas[j] - alphaJold) * oS.K[j, j]
        if (0 < oS.alphas[i]) and (oS.C > oS.alphas[i]):
            oS.b = b1
        elif (0 < oS.alphas[j]) and (oS.C > oS.alphas[j]):
            oS.b = b2
        else:
            oS.b = (b1 + b2) / 2.0
        return 1
    else:
        return 0


# 参数大多数和线性一样的,只是增加了一个kTup即可以选择线性lin也可以选择核函数rbf
def smoP(dataMatIn, classLabels, C, toler, maxIter, kTup=('lin', 0)):  # full Platt SMO
    # kTup参数主要是传给自定义的类,后面使用
    oS = optStruct(np.mat(dataMatIn), np.mat(classLabels).transpose(), C, toler, kTup)
    iter = 0
    entireSet = True
    alphaPairsChanged = 0
    while (iter < maxIter) and ((alphaPairsChanged > 0) or (entireSet)):
        alphaPairsChanged = 0
        if entireSet:  # go over all
            for i in range(oS.m):  # 外循环,遍历所有alppha参数即第一个alpha参数
                alphaPairsChanged += innerL(i, oS)  # 调用内循环,寻找是Ei-Ej差值最大的E,同时在inner使用核函数
                print("fullSet, iter: %d i:%d, pairs changed %d" % (iter, i, alphaPairsChanged))
            iter += 1
        else:  # go over non-bound (railed) alphas
            nonBoundIs = np.nonzero((oS.alphas.A > 0) * (oS.alphas.A < C))[0]
            for i in nonBoundIs:
                alphaPairsChanged += innerL(i, oS)
                print("non-bound, iter: %d i:%d, pairs changed %d" % (iter, i, alphaPairsChanged))
            iter += 1
        if entireSet:
            entireSet = False  # toggle entire set loop
        elif (alphaPairsChanged == 0):
            entireSet = True
        print("iteration number: %d" % iter)
    return oS.b, oS.alphas

# 这个和前面是一样的,其实就是计算w
def calcWs(alphas, dataArr, classLabels):
    X = np.mat(dataArr)
    labelMat = np.mat(classLabels).transpose()
    m, n = np.shape(X)
    w = np.zeros((n, 1))
    for i in range(m):
        w += np.multiply(alphas[i] * labelMat[i], X[i, :].T)
    return w


def testRbf(k1=1.3):
    dataArr, labelArr = loadDataSet('testSetRBF.txt')  # 读取数据
    # 计算 b和alpha
    b, alphas = smoP(dataArr, labelArr, 200, 0.0001, 10000, ('rbf', k1))  # C=200 important
    datMat = np.mat(dataArr);
    labelMat = np.mat(labelArr).transpose()
    svInd = np.nonzero(alphas.A > 0)[0]
    sVs = datMat[svInd]  # get matrix of only support vectors
    labelSV = labelMat[svInd];
    print("there are %d Support Vectors" % np.shape(sVs)[0])
    m, n = np.shape(datMat)
    errorCount = 0
    for i in range(m):
        kernelEval = kernelTrans(sVs, datMat[i, :], ('rbf', k1))
        predict = kernelEval.T * multiply(labelSV, alphas[svInd]) + b
        if sign(predict) != sign(labelArr[i]): errorCount += 1
    print("the training error rate is: %f" % (float(errorCount) / m))
    dataArr, labelArr = loadDataSet('testSetRBF2.txt')
    errorCount = 0
    datMat = np.mat(dataArr);
    labelMat = np.mat(labelArr).transpose()
    m, n = np.shape(datMat)
    for i in range(m):
        kernelEval = kernelTrans(sVs, datMat[i, :], ('rbf', k1))
        predict = kernelEval.T * np.multiply(labelSV, alphas[svInd]) + b
        if np.sign(predict) != np.sign(labelArr[i]): errorCount += 1
    print("the test error rate is: %f" % (float(errorCount) / m))


猜你喜欢

转载自blog.csdn.net/weixin_42398658/article/details/83375864
今日推荐