[机器学习] - 支持向量机(五): SVM程序 - Python

支持向量机:
[机器学习] - 拉格朗日乘子法与KKT条件.
[机器学习] - 支持向量机(一): 硬间隔支持向量机.
[机器学习] - 支持向量机(二): 软间隔支持向量机.
[机器学习] - 支持向量机(三): 序列最小优化算法(SMO算法).
[机器学习] - 支持向量机(四): 核函数.
[机器学习] - 支持向量机(五): SVM程序 - Python.


本文分成两个部分:
(1)手动实现SVM的SMO算法
(2)调用sklearn库中的SVM来实现支持向量机

1 手动实现

#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
1. 实现SVM算法
"""
import numpy as np
import matplotlib.pyplot as plt


def loadDataSet():  #加载文件
    data = list()
    labels = list()
    with open('data/testSet.txt') as f:
        lines = f.readlines()
        for line in lines:
            line = line.rstrip().split('\t')
            data.append([float(line[0]), float(line[1])])
            labels.append(float(line[-1]))
    return data, labels

def sign(x):
    # 符号函数
    if x >= 0:
        return 1
    else:
        return -1

class SVM:
    def predict(self, x):
        # 带入到训练好的函数中
        return sign(np.dot(self.weights, x) + self.b)

    def calcK(self, x1, x2):
        # 计算内积<x1,x2>
        return np.dot(x1, x2)

    def calcG(self, alpha, labels, data, dataSet, b):
        # 计算g(x)
        sum = 0
        for j in range(len(alpha)):
            sum += alpha[j] * labels[j] * self.calcK(data, dataSet[j])  # g(x)的计算
        return sum + b

    def plotSvm(self, dataSet):
        fig = plt.figure()  # 创建图例fig
        ax = fig.add_subplot(1, 1, 1)  # 创建画板
        data = np.array(dataSet)
        ax.scatter(data[:, 0], data[:, 1], color='r')  # 设置了一个标签'one'
        plt.show()

    def train(self, dataSet, labels):   # 训练并返回权重和偏置
        b = 0   # 偏置
        C = 1  # 惩罚系数
        flag = True    # 检验是否全部都满足KKT条件
        maxIter = 10  # 最大循环次数
        iter = 0
        N = len(dataSet)   # 数据的行数
        M = len(dataSet[0])    # 数据的列数,维数
        alpha = np.zeros(N)  # 所有约束项前的系数(待求)
        while iter < maxIter:
            if (iter % 50 == 0):
                print(iter)
            iter += 1
            flag = False
            for i in range(N):  # 外循环
                alpha1_old = alpha[i].copy()   # 未更新的alpha,也就是alpha_old
                y1 = labels[i]  # y1
                x1 = dataSet[i]
                g1 = self.calcG(alpha, labels, x1, dataSet, b)

                alpha_index1 = -1    # 存储不满足KKT条件的alpha_index1
                if alpha1_old == 0 and y1 * g1 < 1:   # 判断是否满足KKT条件 (7.111)
                    alpha_index1 = i
                if alpha1_old > 0 and alpha[i] < C and y1 * g1 != 1:   # (7.112)
                    alpha_index1 = i
                if alpha1_old == C and y1 * g1 > 1:   # (7.1132)
                    alpha_index1 = i
                if alpha_index1 == -1:   # 说明满足KKT条件,跳出循环,继续下一次循环来找alpha1
                    continue

                E1 = g1 - y1    # 计算出E1的值,用于计算|E1-E2|的最大值
                alpha_index2 = -1
                # max_E1_E2 = -1
                # E2 = 0
                if E1 > 0:  # 正的话要找E2的最小值,反之同理
                    selectedE2 = np.inf  # 定义一个无限大的数
                else:
                    selectedE2 = -np.inf  # 定义一个无限小的数
                for j in range(N):  # 内循环
                    if i != j:  # 相等就没法选了
                        yj = labels[j]
                        gj = self.calcG(alpha, labels, dataSet[j], dataSet, b)
                        Ej = gj - yj
                        if E1 > 0:  # 说明要选最小的E2
                            if Ej < selectedE2:
                                selectedE2 = Ej
                                alpha_index2 = j
                        else:
                            if Ej > selectedE2:
                                selectedE2 = Ej
                                alpha_index2 = j
                '''
                   此时选到了alpha2了
                '''
                if (alpha_index2 == -1):
                    continue
                L = 0  # P126末尾两段
                H = C
                E2 = selectedE2
                y2 = labels[alpha_index2]
                alpha2_old = alpha[alpha_index2].copy()
                x2 = dataSet[alpha_index2]
                if (y1 != y2):  # alpha2取值范围必须限制在L<alpha2<H
                    L = np.maximum(0, alpha2_old - alpha1_old)  # L
                    H = np.minimum(C, C + alpha2_old - alpha1_old)  #H
                else:
                    L = np.maximum(0, alpha2_old + alpha1_old - C)  # L
                    H = np.minimum(C, alpha2_old + alpha1_old)    #H
                eta = self.calcK(x1, x1) + self.calcK(x2, x2) - 2 * self.calcK(x1, x2)
                if eta == 0:    # 没法选
                    continue
                alpha2_new_unc = alpha2_old + (y2 * (E1 - E2)) / eta
                if (alpha2_new_unc > H):  # (7.108)
                    alpha2_new = H
                elif (alpha2_new_unc < L):
                    alpha2_new = L
                else:
                    alpha2_new = alpha2_new_unc
                alpha1_new = alpha1_old + y1 * y2 * (alpha2_old - alpha2_new)   #(7.109)

                b1new = -E1 - y1 * self.calcK(x1, x1) * (alpha1_new - alpha1_old) - y2 * self.calcK(x2, x1) * (alpha2_new - alpha2_old) + b #(7.115)
                b2new = -E2 - y1 * self.calcK(x1, x2) * (alpha1_new - alpha1_old) - y2 * self.calcK(x2, x2) * (alpha2_new - alpha2_old) + b #(7.116)

                if (alpha1_new > 0 and alpha1_new < C and alpha2_new > 0 and alpha2_new < C):
                    b = b1new
                else:
                    b = (b1new + b2new) / 2
                alpha[alpha_index1] = alpha1_new
                alpha[alpha_index2] = alpha2_new
                # break
            self.weights = np.dot(np.multiply(alpha, labels), dataSet)   # 权重
            self.b = b
            # print(alpha)
        self.plotSvm(dataSet)
        return self.weights, self.b


if __name__ == '__main__':
    dataSet, labels = loadDataSet()
    svm = SVM()
    weights, b = svm.train(dataSet, labels)
    print(weights, b)

    # 判断分类的正误
    for i, x in enumerate(dataSet[:80]):
        result = svm.predict(x)
        if (int(labels[i]) == result):
            print(True)
        else:
            print(False)
        # print(result)

2 调用库

使用支持向量机实现对手写数字的分类

from sklearn.datasets import load_digits
from sklearn.cross_validation import train_test_split
# from sklearn.model_selection import train_test_split  # python3
from sklearn.preprocessing import StandardScaler
from sklearn.svm import LinearSVC
from sklearn.metrics import classification_report


digits= load_digits()
X_train,X_test,y_train,y_test = train_test_split(digits.data,digits.target,test_size=0.25,random_state=33)
ss = StandardScaler()
X_train = ss.fit_transform(X_train)
X_test = ss.transform(X_test)

lsvc = LinearSVC()
lsvc.fit(X_train,y_train)
y_predict = lsvc.predict(X_test)

print('The Accruacy of Linear SVC is',lsvc.score(X_test,y_test))
print (classification_report(y_test,y_predict,target_names=digits.target_names.astype(str)))

猜你喜欢

转载自blog.csdn.net/qq_41709378/article/details/106807479