Deep Learning: Implementation of CNN

'''
CNN的实现
image-->Conv-->ReLU-->Pooling-->Affine-->ReLU-->Affine-->Softmax-->
参数:
    input_dim----输入数据的维度:(通道,高,长)
    conv_param----卷积层的超参数(字典)。字典的关键字如下:
        filter_num---滤波器的数量
        filter_size---滤波器的大小
        stride---步幅
        pad---填充
    hidden_size---隐藏层(全连接)的神经元数量
    output_size---输出层(全连接)的神经元数量
    weitght_int_std---初始化时权重的标准差
'''
import pickle
from collections import OrderedDict

import numpy as np
from tensorflow.python.ops.gen_nn_ops import Relu
from tensorflow.python.ops.nn_ops import Convolution
from tensorflow_probability.python.bijectors import Affine


class SimpleConvNet:
    def __init__(self, input_dim=(1, 28, 28),
                conv_param={'filter_num': 30, 'filter_size': 5, 'pad': 0, 'stride': 1},
                hidden_size=100, output_size=10, weight_init_std=0.01):
        # 卷积层的超参数通过名为conv_param的字典传入
        filter_num = conv_param['filter_num']
        filter_size = conv_param['filter_size']
        filter_pad = conv_param['pad']
        filter_stride = conv_param['stride']
        input_size = input_dim[1]
        conv_output_size = (input_size - filter_size + 2*filter_pad)/filter_stride+1
        pool_output_size = int(filter_num * (conv_output_size/2) * (conv_output_size/2))

        self.params = {}
        self.params['W1'] = weight_init_std * np.random.randn(filter_num, input_dim[0], filter_size, filter_size)
        self.params['b1'] = np.zeros(filter_num)
        self.params['W2'] = weight_init_std * np.random.randn(pool_output_size, hidden_size)
        self.params['b2'] = np.zeros(hidden_size)
        self.params['W3'] = weight_init_std * np.random.randn(hidden_size, output_size)
        self.params['b3'] = np.zeros(output_size)



        self.layers = OrderedDict()
        self.layers['Conv1'] = Convolution(self.params['W'],
                                           self.params['b1'],
                                           conv_param['stride'],
                                           conv_param['pad'])

        self.layers['Relu1'] = Relu()
        self.layers['Pool1'] = Pooling(pool_h=2, pool_w=2, stride=2)
        self.layers['Affine1'] = Affine(self.params['W2'], self.params['b2'])
        self.layers['Relu2'] = Relu()
        self.layers['Affine2'] = Affine(self.params['W3'], self.params['b3'])

        self.last_layer = SoftmaxWithLoss()

    def predict(self, x):
        for layer in self.layers.values():
            x = layer.forward(x)

        return x

    def loss(self, x, t):
        """求损失函数
        参数x是输入数据、t是教师标签
        """
        y = self.predict(x)
        return self.last_layer.forward(y, t)

    def accuracy(self, x, t, batch_size=100):
        if t.ndim != 1: t = np.argmax(t, axis=1)

        acc = 0.0

        for i in range(int(x.shape[0] / batch_size)):
            tx = x[i * batch_size:(i + 1) * batch_size]
            tt = t[i * batch_size:(i + 1) * batch_size]
            y = self.predict(tx)
            y = np.argmax(y, axis=1)
            acc += np.sum(y == tt)

        return acc / x.shape[0]

    def numerical_gradient(self, x, t):
      

Supongo que te gusta

Origin blog.csdn.net/weixin_44575717/article/details/124172408
Recomendado
Clasificación