Increase or decrease network 20220101

Modern C++ has a fully connected neural network with any number of layers and any number of nodes in each layer. The code includes basic gradient descent, requiring two sets of input data {1,0}, {1,1}, and two training target data targets. {1}, {0}... The program starts to read a text file "s1.txt", and if there is a string in the text file, "{2,4,3,1}", the network structure is {2 ,4,3,1} (that is, the input layer has 2 Nodes, the output layer has 1 node, the first hidden layer has 4 nodes, and the second hidden layer has 3 nodes! Then the program reads the text file "s2. txt" reads the second string in the text file, such as: "{2,4,1}" and reconstructs the neural network again. This time the input layer has 2 nodes, the output layer has 1 node, and the hidden layer There are 4 nodes, and backpropagation training is done again!

#include <iostream>
#include <fstream>
#include <sstream>
#include <vector>
#include <cmath>
#include <random>

using namespace std;

// Sigmoid 函数和它的导数
double sigmoid(double x) {
    return 1 / (1 + exp(-x));
}

double sigmoid_derivative(double x) {
    return x * (1 - x);
}

// 全连接神经网络
class NeuralNetwork {
public:
    vector<int> layers;
    vector<vector<double>> weights;
    vector<vector<double>> outputs;

    NeuralNetwork(vector<int> layers) : layers(layers) {
        random_device rd;
        mt19937 gen(rd());
        uniform_real_distribution<> dis(0, 1);

        for (int i = 1; i < layers.size(); ++i) {
            vector<double> weightLayer;
            for (int j = 0; j < layers[i - 1] * layers[i]; ++j) {
                weightLayer.push_back(dis(gen));
            }
            weights.push_back(weightLayer);
        }
    }

    vector<double> forward(vector<double> input) {
        outputs.clear();
        outputs.push_back(input);

        for (int i = 1; i < layers.size(); ++i) {
            vector<double> output(layers[i]);
            int k = 0;
            for (int j = 0; j < layers[i]; ++j) {
                double sum = 0;
                for (int l = 0; l < layers[i - 1]; ++l) {
                    sum += outputs[i - 1][l] * weights[i - 1][k++];
                }
                output[j] = sigmoid(sum);
            }
            outputs.push_back(output);
        }
        return outputs.back();
    }

    vector<double> forwarOut(vector<double> input) {
        outputs.clear();
        outputs.push_back(input);

        cout << endl<<" [";
        for (int jj = 0; jj < input.size(); ++jj) { cout << input[jj]; }
        cout << "]; " << endl;
        for (int i = 1; i < layers.size(); ++i) {
            vector<double> output(layers[i]);
            int k = 0;
            for (int j = 0; j < layers[i]; ++j) {
                double sum = 0;
                for (int l = 0; l < layers[i - 1]; ++l) {
                    sum += outputs[i - 1][l] * weights[i - 1][k++];
                }
                output[j] = sigmoid(sum);
                cout << output[j] << "], ";
            }
            outputs.push_back(output);
            cout << output[0] << "}; " << endl;
            cout << "}};  " << endl;
        }//for110i
        return outputs.back();
    }//forwarOut
    //---------------------------------------------------------------------

    void train(vector<double> input, vector<double> target, double lr) {
        forward(input);
        vector<vector<double>> deltas(layers.size());
        for (int i = layers.size() - 1; i >= 0; --i) {
            deltas[i].resize(layers[i]);
            if (i == layers.size() - 1) {
                for (int j = 0; j < layers[i]; ++j) {
                    double error = target[j] - outputs[i][j];
                    deltas[i][j] = error * sigmoid_derivative(outputs[i][j]);
                }
            }
            else {
                int k = 0;
                for (int j = 0; j < layers[i]; ++j) {
                    double error = 0;
                    for (int l = 0; l < layers[i + 1]; ++l) {
                        error += weights[i][k++] * deltas[i + 1][l];
                    }
                    deltas[i][j] = error * sigmoid_derivative(outputs[i][j]);
                }
            }
        }

        for (int i = layers.size() - 1; i > 0; --i) {
            int k = 0;
            for (int j = 0; j < layers[i]; ++j) {
                for (int l = 0; l < layers[i - 1]; ++l) {
                    weights[i - 1][k++] += lr * deltas[i][j] * outputs[i - 1][l];
                }
            }
        }
    }
};

// 从文件读取层信息
vector<int> readLayersFromFile(const string& filename) {
    ifstream file(filename);
    string str;
    if (file) {
        getline(file, str);
    }
    stringstream ss(str.substr(1, str.length() - 2));
    string token;
    vector<int> layers;
    while (getline(ss, token, ',')) {
        layers.push_back(stoi(token));
    }
    return layers;
}

int main() {
    // 第一次从文件s1.txt读取网络结构并构建网络
    vector<int> layers1 = readLayersFromFile("\/s1.txt");
    NeuralNetwork nn1(layers1);

    // 使用{1, 0} 和 {1, 1}训练
    vector<vector<double>> inputs1 = { {1, 0}, {1, 1},{0,1},{0,0} };    //{ {1, 0}, {1, 1} };
    vector<vector<double>> targets1 = { {1}, {0},{1},{0} };

    for (int epoch = 0; epoch < 1000; ++epoch) {
        for (int i = 0; i < inputs1.size(); ++i) {
            nn1.train(inputs1[i], targets1[i], 0.5);
        }
    }

    // 第二次从文件s2.txt读取网络结构并构建网络
    vector<int> layers2 = readLayersFromFile("\/s2.txt");
    NeuralNetwork nn2(layers2);

    // 再次使用{1, 0} 和 {1, 1}训练
    for (int epoch = 0; epoch < 5000; ++epoch) {
        for (int i = 0; i < inputs1.size(); ++i) {
            nn2.train(inputs1[i], targets1[i], 0.5);
        }
    }

    nn2.forwarOut( {0,1}  );

    cout << endl;
    nn2.forwarOut({ 1,1 });

    cout << endl;
    nn2.forwarOut({ 1,0 });

    cout << endl;
    nn2.forwarOut({ 0,0 });

    return 0;
}

Guess you like

Origin blog.csdn.net/aw344/article/details/132619932