Implement the simplest 3-layer neural network code using various languages

The following is the simplest 3-layer neural network code implemented in Python:

```python

import numpy as np

# Define the structure of the neural network

n_input = 2 #The number of nodes in the input layer

n_hidden = 3 #The number of nodes in the hidden layer

n_output = 1 #Number of nodes in the output layer

# Initialize the weights of the neural network

weights = {

    'hidden': np.random.uniform(size=(n_input, n_hidden)),

    'output': np.random.uniform(size=(n_hidden, n_output))

}

# Perform forward propagation

def forward_propagation(X):

    hidden_layer_input = np.dot(X, weights['hidden'])

    hidden_layer_output = sigmoid(hidden_layer_input)

    output_layer_input = np.dot(hidden_layer_output, weights['output'])

    output_layer_output = sigmoid(output_layer_input)

    return output_layer_output

#Define activation function (sigmoid)

def sigmoid(x):

    return 1 / (1 + np.exp(-x))

# Sample input

X = np.array([[1, 2], [3, 4], [5, 6]])

# Perform forward propagation and print output

output = forward_propagation(X)

print(output)

```

This simple 3-layer neural network has 2 input nodes, 3 hidden layer nodes, and 1 output node. During forward propagation, the input data is passed through the weights and activation functions in the hidden layer, and a predicted value is finally output. The weights in the above code are randomly initialized, and the output of the hidden layer is obtained by calculating the dot product of the input and the weight, and then passing the result to the activation function (the sigmoid function is used here). Then the dot product of the output of the hidden layer and the weight of the output layer is calculated, and the final output is obtained through the activation function.

The following is the simplest 3-layer neural network code implemented in C language:

```c

#include <stdio.h>

#include <math.h>

#define N_INPUT 2 //Number of nodes in the input layer

#define N_HIDDEN 3 //Number of nodes in hidden layer

#define N_OUTPUT 1 // Number of nodes in the output layer

// function declaration

double sigmoid(double x);

void forward_propagation(double input[N_INPUT], double hidden_output[N_HIDDEN], double output[N_OUTPUT]);

int main() {

    //Initialize the weights of the neural network

    double weights_hidden[N_INPUT][N_HIDDEN] = {

        {0.1, 0.4, 0.7},

        {0.2, 0.5, 0.8}

    };

    double weights_output[N_HIDDEN][N_OUTPUT] = {

        {0.6},

        {0.9},

        {0.3}

    };

    // sample input

    double input[N_INPUT] = {1, 2};

    // Perform forward propagation and print output

    double hidden_output[N_HIDDEN];

    double output[N_OUTPUT];

    forward_propagation(input, hidden_output, output);

    printf("%lf\n", output[0]);

    return 0;

}

//Perform forward propagation

void forward_propagation(double input[N_INPUT], double hidden_output[N_HIDDEN], double output[N_OUTPUT]) {

    // Calculate the output of the hidden layer

    for (int i = 0; i < N_HIDDEN; i++) {

        double hidden_input = 0;

        for (int j = 0; j < N_INPUT; j++) {

            hidden_input += input[j] * weights_hidden[j][i];

        }

        hidden_output[i] = sigmoid(hidden_input);

    }

    // Calculate the output of the output layer

    for (int i = 0; i < N_OUTPUT; i++) {

        double output_input = 0;

        for (int j = 0; j < N_HIDDEN; j++) {

            output_input += hidden_output[j] * weights_output[j][i];

        }

        output[i] = sigmoid(output_input);

    }

}

//Define activation function (sigmoid)

double sigmoid(double x) {

    return 1 / (1 + exp(-x));

Guess you like

Origin blog.csdn.net/ls1300005/article/details/131712463