Educoed-- réalise la propagation vers l'avant, la propagation vers l'arrière et la formation du modèle de réseau neuronal

Niveau 1 : Mettre en œuvre la propagation vers l'avant du modèle de réseau de neurones

importer numpy

à partir des couches, importez Convolution, Relu, FullyConnected, MaxPool, SoftmaxWithLoss


 

classe TinyNet :

    def __init__(soi, W_conv1, b_conv1, W_conv2, b_conv2, W_fc, b_fc) :

        ########## Commencer ##########

        self.conv1 = Convolution(W_conv1, b_conv1, stride=1, pad=1)  

        self.relu1 = Relu()  

        self.pool1 = MaxPool(2, 2, foulée=2, pad=0)  

        self.conv2 = Convolution(W_conv2, b_conv2, stride=1, pad=1)  

        self.relu2 = Relu()  

        self.pool2 = MaxPool(2, 2, foulée=2, pad=0)  

        self.fc = Entièrement Connecté(W_fc, b_fc)  

        self.loss = SoftmaxWithLoss()

        ########## Fin ##########

    def avant (soi, x, t):

        ########## Commencer ##########

        x = self.conv1.forward(x)  

        x = self.relu1.forward(x)  

        x = self.pool1.forward(x)  

        x = self.conv2.forward(x)  

        x = self.relu2.forward(x)  

        x = self.pool2.forward(x)  

        x = self.fc.forward(x)  

        perte = self.loss.forward(x, t)  

        retour x, perte 

        ########## Fin ##########

Niveau 2 : Mettre en œuvre la rétropropagation du modèle de réseau de neurones

importer numpy

à partir des couches, importez Convolution, Relu, FullyConnected, MaxPool, SoftmaxWithLoss


 

classe TinyNet :

    def __init__(soi, W_conv1, b_conv1, W_conv2, b_conv2, W_fc, b_fc) :

        self.conv1 = Convolution(W_conv1, b_conv1, stride=1, pad=1)

        self.relu1 = Relu()

        self.pool1 = MaxPool(2, 2, foulée=2, pad=0)

        self.conv2 = Convolution(W_conv2, b_conv2, stride=1, pad=1)

        self.relu2 = Relu()

        self.pool2 = MaxPool(2, 2, foulée=2, pad=0)

        self.fc = Entièrement Connecté(W_fc, b_fc)

        self.loss = SoftmaxWithLoss()


 

    def avant (soi, x, t):

        x = self.conv1.forward(x)

        x = self.relu1.forward(x)

        x = self.pool1.forward(x)

        x = self.conv2.forward(x)

        x = self.relu2.forward(x)

        x = self.pool2.forward(x)

        x = self.fc.forward(x)

        perte = self.loss.forward(x, t)

        retour x, perte

    def vers l'arrière (soi):

        ########## Commencer ##########

        dx = self.loss.backward()  

        dx = self.fc.backward(dx)  

        dx = self.pool2.backward(dx)  

        dx = self.relu2.backward(dx)  

        dx = self.conv2.backward(dx)  

        dx = self.pool1.backward(dx)  

        dx = self.relu1.backward(dx)  

        dx = self.conv1.backward(dx)

        ########## Fin ##########

        retour self.conv1.dW, self.conv1.db, self.conv2.dW, self.conv2.db, self.fc.dW, self.fc.db

Niveau 3 : Mise en œuvre de la formation de descente de gradient pour les réseaux de neurones

importer numpy

à partir des couches, importez Convolution, Relu, FullyConnected, MaxPool, SoftmaxWithLoss


 

classe TinyNet :

    def __init__(soi, W_conv1, b_conv1, W_conv2, b_conv2, W_fc, b_fc) :

        self.conv1 = Convolution(W_conv1, b_conv1, stride=1, pad=1)

        self.relu1 = Relu()

        self.pool1 = MaxPool(2, 2, foulée=2, pad=0)

        self.conv2 = Convolution(W_conv2, b_conv2, stride=1, pad=1)

        self.relu2 = Relu()

        self.pool2 = MaxPool(2, 2, foulée=2, pad=0)

        self.fc = Entièrement Connecté(W_fc, b_fc)

        self.loss = SoftmaxWithLoss()

    def avant (soi, x, t):

        x = self.conv1.forward(x)

        x = self.relu1.forward(x)

        x = self.pool1.forward(x)

        x = self.conv2.forward(x)

        x = self.relu2.forward(x)

        x = self.pool2.forward(x)

        x = self.fc.forward(x)

        perte = self.loss.forward(x, t)

        retour x, perte

    def vers l'arrière (soi):

        dx = self.loss.backward()

        dx = self.fc.backward(dx)

        dx = self.pool2.backward(dx)

        dx = self.relu2.backward(dx)

        dx = self.conv2.backward(dx)

        dx = self.pool1.backward(dx)

        dx = self.relu1.backward(dx)

        dx = self.conv1.backward(dx)

        retour self.conv1.dW, self.conv1.db, self.conv2.dW, self.conv2.db, self.fc.dW, self.fc.db


 

def train_one_iter(W_conv1, b_conv1, W_conv2, b_conv2, W_fc, b_fc, x, t, learning_rate):

    réseau = TinyNet(W_conv1, b_conv1, W_conv2, b_conv2, W_fc, b_fc)

    out, loss = network.forward(x, t)

    dW_conv1, db_conv1, dW_conv2, db_conv2, dW_fc, db_fc = network.backward()

    ########## Commencer ##########

    new_W_conv1 = W_conv1 - dW_conv1 * learning_rate  

    new_b_conv1 = b_conv1 - db_conv1 * learning_rate  

    new_W_conv2 = W_conv2 - dW_conv2 * learning_rate  

    new_b_conv2 = b_conv2 - db_conv2 * learning_rate  

    nouveau_W_fc = W_fc - dW_fc * taux_d'apprentissage  

    nouveau_b_fc = b_fc - db_fc * taux_d'apprentissage 

    ########## Fin ##########

    renvoie new_W_conv1, new_b_conv1, new_W_conv2, new_b_conv2, new_W_fc, new_b_fc

Je suppose que tu aimes

Origine blog.csdn.net/qq_57409899/article/details/124992904
conseillé
Classement