机器学习:集成学习

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/baidu_34045013/article/details/81254906

一、问题描述

利用SVM、KNN、bp神经网络等算法进行集成学习,基于MNIST数据集进行手写识别的训练和测试。

二、算法核心思想分析

集成学习是将几个弱分类器结合起来,得到更好的分类结果。使用SVM、KNN和bp神经网络分别训练,将分类结果进行投票,得出最后集成分类器的结果。

三、题目分析

首先读取MNIST数据集,分别对分类器进行训练,测试时,将三个分类器的结果进行投票,最终得出的结果即为集成分类器的结果。其中SVM使用LibSVM。

四、代码及运行结果

mnist_reader.py

# -*- coding: utf-8 -*-
import numpy as np
import struct
import matplotlib.pyplot as plt

# 训练集文件
train_images_idx3_ubyte_file = 'mnist/train-images.idx3-ubyte'
# 训练集标签文件
train_labels_idx1_ubyte_file = 'mnist/train-labels.idx1-ubyte'

# 测试集文件
test_images_idx3_ubyte_file = 'mnist/t10k-images.idx3-ubyte'
# 测试集标签文件
test_labels_idx1_ubyte_file = 'mnist/t10k-labels.idx1-ubyte'


def decode_idx3_ubyte(idx3_ubyte_file):
    """
    解析idx3文件的通用函数
    :param idx3_ubyte_file: idx3文件路径
    :return: 数据集
    """
    # 读取二进制数据
    bin_data = open(idx3_ubyte_file, 'rb').read()

    # 解析文件头信息,依次为魔数、图片数量、每张图片高、每张图片宽
    offset = 0
    fmt_header = '>iiii'
    magic_number, num_images, num_rows, num_cols = struct.unpack_from(fmt_header, bin_data, offset)
    print('魔数:%d, 图片数量: %d张, 图片大小: %d*%d' % (magic_number, num_images, num_rows, num_cols))

    # 解析数据集
    image_size = num_rows * num_cols
    offset += struct.calcsize(fmt_header)
    fmt_image = '>' + str(image_size) + 'B'
    images = np.empty((num_images, num_rows, num_cols))
    for i in range(num_images):
        if (i + 1) % 10000 == 0:
            print('已解析 %d' % (i + 1) + '张')
        images[i] = np.array(struct.unpack_from(fmt_image, bin_data, offset)).reshape((num_rows, num_cols))
        offset += struct.calcsize(fmt_image)
    return images


def decode_idx1_ubyte(idx1_ubyte_file):
    """
    解析idx1文件的通用函数
    :param idx1_ubyte_file: idx1文件路径
    :return: 数据集
    """
    # 读取二进制数据
    bin_data = open(idx1_ubyte_file, 'rb').read()

    # 解析文件头信息,依次为魔数和标签数
    offset = 0
    fmt_header = '>ii'
    magic_number, num_images = struct.unpack_from(fmt_header, bin_data, offset)
    print('魔数:%d, 图片数量: %d张' % (magic_number, num_images))

    # 解析数据集
    offset += struct.calcsize(fmt_header)
    fmt_image = '>B'
    labels = np.empty(num_images)
    for i in range(num_images):
        if (i + 1) % 10000 == 0:
            print('已解析 %d' % (i + 1) + '张')
        labels[i] = struct.unpack_from(fmt_image, bin_data, offset)[0]
        offset += struct.calcsize(fmt_image)
    return labels


def load_train_images(idx_ubyte_file=train_images_idx3_ubyte_file):
    """
    TRAINING SET IMAGE FILE (train-images-idx3-ubyte):
    [offset] [type]          [value]          [description]
    0000     32 bit integer  0x00000803(2051) magic number
    0004     32 bit integer  60000            number of images
    0008     32 bit integer  28               number of rows
    0012     32 bit integer  28               number of columns
    0016     unsigned byte   ??               pixel
    0017     unsigned byte   ??               pixel
    ........
    xxxx     unsigned byte   ??               pixel
    Pixels are organized row-wise. Pixel values are 0 to 255. 0 means background (white), 255 means foreground (black).

    :param idx_ubyte_file: idx文件路径
    :return: n*row*col维np.array对象,n为图片数量
    """
    return decode_idx3_ubyte(idx_ubyte_file)


def load_train_labels(idx_ubyte_file=train_labels_idx1_ubyte_file):
    """
    TRAINING SET LABEL FILE (train-labels-idx1-ubyte):
    [offset] [type]          [value]          [description]
    0000     32 bit integer  0x00000801(2049) magic number (MSB first)
    0004     32 bit integer  60000            number of items
    0008     unsigned byte   ??               label
    0009     unsigned byte   ??               label
    ........
    xxxx     unsigned byte   ??               label
    The labels values are 0 to 9.

    :param idx_ubyte_file: idx文件路径
    :return: n*1维np.array对象,n为图片数量
    """
    return decode_idx1_ubyte(idx_ubyte_file)


def load_test_images(idx_ubyte_file=test_images_idx3_ubyte_file):
    """
    TEST SET IMAGE FILE (t10k-images-idx3-ubyte):
    [offset] [type]          [value]          [description]
    0000     32 bit integer  0x00000803(2051) magic number
    0004     32 bit integer  10000            number of images
    0008     32 bit integer  28               number of rows
    0012     32 bit integer  28               number of columns
    0016     unsigned byte   ??               pixel
    0017     unsigned byte   ??               pixel
    ........
    xxxx     unsigned byte   ??               pixel
    Pixels are organized row-wise. Pixel values are 0 to 255. 0 means background (white), 255 means foreground (black).

    :param idx_ubyte_file: idx文件路径
    :return: n*row*col维np.array对象,n为图片数量
    """
    return decode_idx3_ubyte(idx_ubyte_file)


def load_test_labels(idx_ubyte_file=test_labels_idx1_ubyte_file):
    """
    TEST SET LABEL FILE (t10k-labels-idx1-ubyte):
    [offset] [type]          [value]          [description]
    0000     32 bit integer  0x00000801(2049) magic number (MSB first)
    0004     32 bit integer  10000            number of items
    0008     unsigned byte   ??               label
    0009     unsigned byte   ??               label
    ........
    xxxx     unsigned byte   ??               label
    The labels values are 0 to 9.

    :param idx_ubyte_file: idx文件路径
    :return: n*1维np.array对象,n为图片数量
    """
    return decode_idx1_ubyte(idx_ubyte_file)


def run():
    train_images = load_train_images()
    train_labels = load_train_labels()
    # test_images = load_test_images()
    # test_labels = load_test_labels()

    # zipper = zip(train_images, train_labels)
    # for x, y in zipper:
    #     print((x, y))

    # 查看前十个数据及其标签以读取是否正确
    len = 10
    for i in range(len):
        print("label\n", train_labels[i])
        print("image\n", train_images[i])
        plt.subplot((len-1)/5 + 1, 5, i+1)
        plt.imshow(train_images[i], cmap='gray')
        # plt.show()
    plt.show()
    print('done')


def getdataset():
    X_train = load_train_images()
    y_train = load_train_labels()
    X_test = load_test_images()
    y_test = load_test_labels()
    return X_train, y_train, X_test, y_test


def showMinist():
    y_train = load_train_labels()
    X_train = load_train_images()
    # 看看数据集中的一些样本:每个类别展示一些
    classes = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
    num_classes = len(classes)
    samples_per_class = 7
    for y, cls in enumerate(classes):
        idxs = np.flatnonzero(y_train == y)
        # 一个类别中挑出一些
        idxs = np.random.choice(idxs, samples_per_class, replace=False)
        for i, idx in enumerate(idxs):
            plt_idx = i * num_classes + y + 1
            plt.subplot(samples_per_class, num_classes, plt_idx)
            plt.imshow(X_train[idx].astype('uint8'))
            plt.axis('off')
            if i == 0:
                plt.title(cls)
    plt.show()


if __name__ == '__main__':
    # run()
    showMinist()

svm.py

from libsvm.python.svmutil import *
from libsvm.python.svm import *
import mnist_reader as reader
import numpy as np
import random


class SVM(object):
    def __init__(self):
        self.X_train, self.y_train, self.X_test, self.y_test = reader.getdataset()
        self.X_train = self.X_train/256
        self.X_test = self.X_test/256
        self.y_train = self.y_train.astype(int)
        self.y_test = self.y_test.astype(int)

    def dropsomepicture(self, trainm=5000, testm=500):
        # 下采样
        num_training = trainm
        mask = range(num_training)
        self.X_train = self.X_train[mask]
        self.y_train = self.y_train[mask]

        num_test = testm
        mask = range(num_test)
        self.X_test = self.X_test[mask]
        self.y_test = self.y_test[mask]

        # 预处理:把数据展成一列
        self.X_train = np.reshape(self.X_train, (self.X_train.shape[0], -1))
        self.X_test = np.reshape(self.X_test, (self.X_test.shape[0], -1))

    def train(self):
        train_images = self.X_train.tolist()
        train_labels = self.y_train.tolist()
        test_images = self.X_test.tolist()
        test_labels = self.y_test.tolist()

        self.model = svm_train(train_labels, train_images)

    def test(self):
        test_images = self.X_test.tolist()
        test_labels = self.y_test.tolist()
        p_label, p_acc, p_val = svm_predict(test_labels, test_images, self.model)
        print(p_acc)

    def predict(self, label, image, model):
        p_label, p_acc, p_val = svm_predict(label, image, model)
        return int(p_label[0])


if __name__ == '__main__':
    svm = SVM()
    # svm.dropsomepicture(5000, 10000)
    # svm.train()
    # svm.test()

    ## predict
    svm.dropsomepicture(5000, 10)
    svm.train()
    test_images = svm.X_test.tolist()
    test_labels = svm.y_test.tolist()
    svm_label = [test_labels[0]]
    svm_image = [test_images[0]]
    print(svm_label)
    print(svm.predict(svm_label, svm_image, svm.model))

knn.py

from shaomingshan.classifiers.k_nearest_neighbor import KNearestNeighbor
import numpy as np
import matplotlib.pyplot as plt
import mnist_reader as reader
import time


def time_function(f, *args):
    tic = time.time()
    f(*args)
    toc = time.time()
    return toc - tic


class KNN(object):
    def __init__(self):
        self.X_train, self.y_train, self.X_test, self.y_test = reader.getdataset()
        self.y_train = self.y_train.astype(int)
        self.y_test = self.y_test.astype(int)

    def dropsomepicture(self, trainm=5000, testm=500):
        # 下采样
        num_training = trainm
        mask = range(num_training)
        self.X_train = self.X_train[mask]
        self.y_train = self.y_train[mask]

        num_test = testm
        mask = range(num_test)
        self.X_test = self.X_test[mask]
        self.y_test = self.y_test[mask]

    def show(self):
        # 加载数据集
        X_train, y_train, X_test, y_test = self.X_train, self.y_train, self.X_test, self.y_test

        print('Training data shape: ', X_train.shape)
        print('Training labels shape: ', y_train.shape)
        print('Test data shape: ', X_test.shape)
        print('Test labels shape: ', y_test.shape)

        # 咱们把图像数据展开成一个向量的形式
        X_train = np.reshape(X_train, (X_train.shape[0], -1))
        X_test = np.reshape(X_test, (X_test.shape[0], -1))
        print(
        X_train.shape, X_test.shape)
        # 调用一下KNN分类器,然后训练(其实就是把样本都记下来)
        classifier = KNearestNeighbor()
        classifier.train(X_train, y_train)

        # 一层for循环的方式,高效一点
        dists_two = classifier.compute_distances_no_loops(X_test)

        no_loop_time = time_function(classifier.compute_distances_no_loops, X_test)
        print('No loop version took %f seconds' % no_loop_time)
        pass

    # 交叉验证
    def num_folds_cross(self):
        X_train, y_train, X_test, y_test = self.X_train, self.y_train, self.X_test, self.y_test
        X_test = np.reshape(X_test, (X_test.shape[0], -1))
        X_train = np.reshape(X_train, (X_train.shape[0], -1))
        num_folds = 5
        k_choices = [1, 3, 5, 8, 10, 12, 15, 20, 50, 100]

        X_train_folds = []
        y_train_folds = []

        idxes = range(len(X_train))
        idx_folds = np.array_split(idxes, num_folds)
        for idx in idx_folds:
            #     mask = np.ones(num_training, dtype=bool)
            #     mask[idx] = False
            #     X_train_folds.append( (X_train[mask], X_train[~mask]) )
            #     y_train_folds.append( (y_train[mask], y_train[~mask]) )
            X_train_folds.append(X_train[idx])
            y_train_folds.append(y_train[idx])

        k_to_accuracies = {}

        import sys
        classifier = KNearestNeighbor()
        Verbose = False
        for k in k_choices:
            if Verbose:
                print("processing k=%f" % k)
            else:
                sys.stdout.write('.')
            k_to_accuracies[k] = list()
            for num in range(num_folds):
                if Verbose:
                    print("processing fold#%i/%i" % (num, num_folds))

                X_cv_train = np.vstack([X_train_folds[x] for x in range(num_folds) if x != num])
                y_cv_train = np.hstack([y_train_folds[x].T for x in range(num_folds) if x != num])

                X_cv_test = X_train_folds[num]
                y_cv_test = y_train_folds[num]

                # 训练KNN分类器
                classifier.train(X_cv_train, y_cv_train)

                # 计算和训练集之间图片的距离
                dists = classifier.compute_distances_no_loops(X_cv_test)

                y_cv_test_pred = classifier.predict_labels(dists, k=k)
                # 计算和预测
                num_correct = np.sum(y_cv_test_pred == y_cv_test)
                k_to_accuracies[k].append(float(num_correct) / y_cv_test.shape[0])

        # 输出计算的准确率
        for k in sorted(k_to_accuracies):
            for accuracy in k_to_accuracies[k]:
                print(
                'k = %d, accuracy = %f' % (k, accuracy))
        for k in k_choices:
          accuracies = k_to_accuracies[k]
          plt.scatter([k] * len(accuracies), accuracies)

        accuracies_mean = np.array([np.mean(v) for k,v in sorted(k_to_accuracies.items())])
        accuracies_std = np.array([np.std(v) for k,v in sorted(k_to_accuracies.items())])
        plt.errorbar(k_choices, accuracies_mean, yerr=accuracies_std)
        plt.title('Cross-validation on k')
        plt.xlabel('k')
        plt.ylabel('Cross-validation accuracy')
        plt.show()

    def test(self,best_k = 3):
        X_train, y_train, X_test, y_test = self.X_train, self.y_train, self.X_test, self.y_test
        X_test = np.reshape(X_test, (X_test.shape[0], -1))
        X_train = np.reshape(X_train, (X_train.shape[0], -1))
        classifier = KNearestNeighbor()
        classifier.train(X_train, y_train)
        y_test_pred = classifier.predict(X_test, k = best_k)

        # 输出准确度
        num_correct = np.sum(y_test_pred == y_test)
        accuracy = float(num_correct) / len(X_test)
        print('Got %d / %d correct => accuracy: %f' % (num_correct, len(X_test), accuracy))

    def predictone(self, image, k=5):
        X_train, y_train, X_test, y_test = self.X_train, self.y_train, self.X_test, self.y_test
        X_test = np.array([image])
        X_test = np.reshape(X_test, (X_test.shape[0], -1))
        X_train = np.reshape(X_train, (X_train.shape[0], -1))
        classifier = KNearestNeighbor()
        classifier.train(X_train, y_train)
        return classifier.predict(X_test, k)[0]

    def predictdist(self, dist, k=5):
        X_train, y_train, X_test, y_test = self.X_train, self.y_train, self.X_test, self.y_test
        X_test = np.array(dist)
        X_test = np.reshape(X_test, (X_test.shape[0], -1))
        X_train = np.reshape(X_train, (X_train.shape[0], -1))
        classifier = KNearestNeighbor()
        classifier.train(X_train, y_train)
        return classifier.predict(X_test, k)


if __name__ == '__main__':
    k = KNN()
    k.dropsomepicture(5000, 500)
    k.show()
    # k.num_folds_cross()
    k.test(1)
    # print(k.predictdist(k.X_test[0:5], 5))
    # print(k.y_test[0:5])

mnist_loader.py

"""
mnist_loader
~~~~~~~~~~~~
A library to load the MNIST image data.  For details of the data
structures that are returned, see the doc strings for ``load_data``
and ``load_data_wrapper``.  In practice, ``load_data_wrapper`` is the
function usually called by our neural network code.
"""

# Libraries
# Standard library
import pickle
import gzip

# Third-party libraries
import numpy as np


def load_data():
    """Return the MNIST data as a tuple containing the training data,
    the validation data, and the test data.
    The ``training_data`` is returned as a tuple with two entries.
    The first entry contains the actual training images.  This is a
    numpy ndarray with 50,000 entries.  Each entry is, in turn, a
    numpy ndarray with 784 values, representing the 28 * 28 = 784
    pixels in a single MNIST image.
    The second entry in the ``training_data`` tuple is a numpy ndarray
    containing 50,000 entries.  Those entries are just the digit
    values (0...9) for the corresponding images contained in the first
    entry of the tuple.
    The ``validation_data`` and ``test_data`` are similar, except
    each contains only 10,000 images.
    This is a nice data format, but for use in neural networks it's
    helpful to modify the format of the ``training_data`` a little.
    That's done in the wrapper function ``load_data_wrapper()``, see
    below.
    """
    f = gzip.open('mnist.pkl.gz', 'rb')
    training_data, validation_data, test_data = pickle.load(f, encoding='bytes')
    f.close()
    return training_data, validation_data, test_data


def load_data_wrapper():
    """Return a tuple containing ``(training_data, validation_data,
    test_data)``. Based on ``load_data``, but the format is more
    convenient for use in our implementation of neural networks.
    In particular, ``training_data`` is a list containing 50,000
    2-tuples ``(x, y)``.  ``x`` is a 784-dimensional numpy.ndarray
    containing the input image.  ``y`` is a 10-dimensional
    numpy.ndarray representing the unit vector corresponding to the
    correct digit for ``x``.
    ``validation_data`` and ``test_data`` are lists containing 10,000
    2-tuples ``(x, y)``.  In each case, ``x`` is a 784-dimensional
    numpy.ndarry containing the input image, and ``y`` is the
    corresponding classification, i.e., the digit values (integers)
    corresponding to ``x``.
    Obviously, this means we're using slightly different formats for
    the training data and the validation / test data.  These formats
    turn out to be the most convenient for use in our neural network
    code."""
    tr_d, va_d, te_d = load_data()
    training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]]
    training_results = [vectorized_result(y) for y in tr_d[1]]
    training_data = zip(training_inputs, training_results)
    validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]
    validation_data = zip(validation_inputs, va_d[1])
    test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]
    test_data = zip(test_inputs, te_d[1])
    return training_data, validation_data, test_data


def vectorized_result(j):
    """Return a 10-dimensional unit vector with a 1.0 in the jth
    position and zeroes elsewhere.  This is used to convert a digit
    (0...9) into a corresponding desired output from the neural
    network."""
    e = np.zeros((10, 1))
    e[j] = 1.0
    return e

bp_network.py

import random
import numpy as np


class Network(object):

    def __init__(self, sizes):
        self.num_layers = len(sizes)
        self.sizes = sizes
        self.biases = [np.random.randn(y, 1) for y in sizes[1:]]
        self.weights = [np.random.randn(y, x)
                        for x, y in zip(sizes[:-1], sizes[1:])]

    def feedforward(self, a):
        for b, w in zip(self.biases, self.weights):
            a = sigmoid(np.dot(w, a)+b)
        return a

    def SGD(self, training_data, epochs, mini_batch_size, eta,
            test_data=None):
        if test_data:
            test_data = list(test_data)
            n_test = len(test_data)
        training_data = list(training_data)
        n = len(training_data)
        for j in range(epochs):
            random.shuffle(training_data)
            mini_batches = [
                training_data[k:k+mini_batch_size]
                for k in range(0, n, mini_batch_size)]
            for mini_batch in mini_batches:
                self.update_mini_batch(mini_batch, eta)
            if test_data:
                print("Epoch {0}: {1} / {2}".format(j, self.evaluate(test_data), n_test))
            else:
                print("Epoch {0} complete".format(j))

    def update_mini_batch(self, mini_batch, eta):
        nabla_b = [np.zeros(b.shape) for b in self.biases]
        nabla_w = [np.zeros(w.shape) for w in self.weights]
        for x, y in mini_batch:
            delta_nabla_b, delta_nabla_w = self.backprop(x, y)
            nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]
            nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]
        self.weights = [w-(eta/len(mini_batch))*nw
                        for w, nw in zip(self.weights, nabla_w)]
        self.biases = [b-(eta/len(mini_batch))*nb
                       for b, nb in zip(self.biases, nabla_b)]

    def backprop(self, x, y):
        nabla_b = [np.zeros(b.shape) for b in self.biases]
        nabla_w = [np.zeros(w.shape) for w in self.weights]
        # feedforward
        activation = x
        activations = [x]  # list to store all the activations, layer by layer
        zs = []  # list to store all the z vectors, layer by layer
        for b, w in zip(self.biases, self.weights):
            z = np.dot(w, activation)+b
            zs.append(z)
            activation = sigmoid(z)
            activations.append(activation)
        # backward pass
        delta = self.cost_derivative(activations[-1], y) * \
            sigmoid_prime(zs[-1])
        nabla_b[-1] = delta
        nabla_w[-1] = np.dot(delta, activations[-2].transpose())
        for l in range(2, self.num_layers):
            z = zs[-l]
            sp = sigmoid_prime(z)
            delta = np.dot(self.weights[-l+1].transpose(), delta) * sp
            nabla_b[-l] = delta
            nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())
        return nabla_b, nabla_w

    def evaluate(self, test_data):
        test_results = [(np.argmax(self.feedforward(x)), y)
                        for (x, y) in test_data]
        return sum(int(x == y) for (x, y) in test_results)

    def cost_derivative(self, output_activations, y):
        return output_activations-y


def sigmoid(z):
    return 1.0/(1.0+np.exp(-z))


def sigmoid_prime(z):
    return sigmoid(z)*(1-sigmoid(z))

ensemble_learning.py

from SVM import *
from KNN import *
import bp_network
import mnist_reader as reader
import mnist_loader as mnist_loader
import numpy as np


# SVM
svm = SVM()
svm.dropsomepicture(5000, 500)
svm.train()

# KNN
k = KNN()
k.dropsomepicture(5000, 500)

# bp network
training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
net = bp_network.Network([784, 30, 10])
net.SGD(training_data, 30, 10, 3.0, test_data=test_data)

X_train, y_train, X_test, y_test = reader.getdataset()
mask = 10000
X_test = X_test[range(mask)]
y_test = y_test[range(mask)]
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_test = np.reshape(X_test, (X_test.shape[0], -1))
test_images = X_test.tolist()
test_labels = y_test.tolist()

right = 0
for i in range(len(test_images)):
    svm_label = [test_labels[i]]
    svm_image = [test_images[i]]
    pre_svm = int(svm.predict(svm_label, svm_image, svm.model))
    pre_knn = int(k.predictone(test_images[i], 1))
    pre_bp = int(np.argmax(net.feedforward(X_test[i].reshape((784, 1)))))
    result = -1
    if pre_svm == pre_knn:
        result = pre_svm
    elif pre_svm == pre_bp:
        result = pre_svm
    elif pre_bp == pre_knn:
        result = pre_bp
    else:
        result = pre_bp

    if test_labels[i] == result:
        right += 1
        print(i, "true")
    else:
        print(i, "false")

print("Accuracy:", right/mask)

1、SVM

准确率90.86%

2、KNN

交叉验证发现,k最佳值为1

使用500张图片测试结果为90.6%

3、bp network

迭代30次,准确率为94.98%

4、集成学习

准确率93.76%

五、总结

本实验中SVM、KNN、神经网络算法的精确度都不算差,所以单纯进行投票的方式并不会有显著提升。

 

如有错误请指正

猜你喜欢

转载自blog.csdn.net/baidu_34045013/article/details/81254906