win7 x64 vs2015 c++ 使用 tensorflow - 1 简单神经网络训练,预测

  这是一个有一个中间层 一个输出层的神经网络demo.

  输入数据为 1维数据.中间层10个神经元 输出成10个神经元. 其中一些都是临时为了测试修改的, 数据也是写固定了.

后面补充可以动态输入数据的情况. 完整工程网盘路径: 附带了 tensorflow lib 所以很大.

  链接:https://pan.baidu.com/s/1fwP7-D1-Usz1Ec5cY-RaYg 密码:0hlb

  
#include <fstream>
#include <utility>
#include <vector>
#include <Eigen/Core>
#include <Eigen/Dense>

#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/image_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/default_device.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/util/command_line_flags.h"

#include "tensorflow/cc/client/client_session.h"
#include "tensorflow/cc/framework/gradients.h"
using namespace std;
using namespace tensorflow;
using namespace tensorflow::ops;
using tensorflow::Flag;
using tensorflow::Tensor;
using tensorflow::Status;
using tensorflow::string;
using tensorflow::int32;

void TestTrain10Value()
{
    Scope root = Scope::NewRootScope();

    //定义placeholder
    Placeholder phX = Placeholder(root, DT_DOUBLE);
    Placeholder phY = Placeholder(root, DT_DOUBLE);

    //定义神经网络中间层
    //一列代表一个神经元。列表神经元个数
    //定义10个神经元,每个神经元1维数据.
    auto varWL1 = Variable(root, { 1, 10 }, DT_DOUBLE);
    auto Weights_L1 = Assign(root, varWL1, { { 0.001, 0.002, 0.012, 0.003, 0.004, 0.001, 0.002, 0.0015, 0.0018, 0.007 } });
    //b 的列 必须和 的列对上。也就是10个神经元需要10个b
    auto varBL1 = Variable(root, { 1, 10 }, DT_DOUBLE);
    //     auto biases_L1 = Assign(root, vBL1, ZerosLike(root, { 1, 10 }));
    auto biases_L1 = Assign(root, varBL1, { { 0.010, 0.011, 0.012, 0.009, 0.002, 0.003, 0.001, 0.007, 0.005, 0.009 } });
    auto matmul = MatMul(root, phX, varWL1);
    auto Wx_plus_b_L1 = Add(root, matmul, varBL1);
//    auto Wx_plus_b_L1 = Add(root, MatMul(root, phX, varWL1), varBL1);
    //    auto Wx_plus_b_L1 = Add(root, Mul(root, phX, varWL1), varBL1);
    auto L1 = Sin(root, Wx_plus_b_L1);
    //layer2, 10个神经元 每个神经元 W维度为10
    auto varWL2 = Variable(root, { 10, 10 }, DT_DOUBLE);
    //auto Weights_L1 = Assign(root, varWL1, { { 0.01, 0.02}, {0.01, 0.02 }});
    auto Weights_L2 = Assign(root, varWL2, { { 0.001, 0.002, 0.012, 0.003, 0.004, 0.001, 0.002, 0.0015, 0.0018, 0.007 },
    { 0.001, 0.002, 0.012, 0.003, 0.004, 0.001, 0.002, 0.0015, 0.0018, 0.007 } ,
    { 0.001, -0.002, 0.012, 0.003, 0.004, 0.001, 0.002, 0.0015, 0.0018, 0.007 } ,
    { 0.001, 0.002, -0.012, 0.003, 0.004, 0.001, 0.002, 0.0015, 0.0018, 0.007 },
    { 0.001, 0.002, 0.012, -0.003, 0.004, 0.001, 0.002, 0.0015, 0.0018, 0.007 },
    { 0.001, 0.002, 0.012, 0.003,-0.004, 0.001, 0.002, 0.0015, 0.0018, 0.007 },
    { 0.001, 0.002, 0.012, 0.003, 0.004, -0.001, 0.002, 0.0015, 0.0018, 0.007 },
    { 0.001, 0.002, 0.012, 0.003, 0.004, 0.001, -0.002, 0.0015, 0.0018, 0.007 },
    { 0.001, 0.002, 0.012, 0.003, 0.004, 0.001, 0.002, -0.0015, 0.0018, 0.007 },
    { 0.001, 0.002, 0.012, 0.003, 0.004, 0.001, 0.002, 0.0015, -0.0018, 0.007 }
    });
    //b 的列 必须和 w的列对上。
    auto varBL2 = Variable(root, { 1, 10 }, DT_DOUBLE);
    //     auto biases_L1 = Assign(root, vBL1, ZerosLike(root, { 1, 10 }));
    auto biases_L2 = Assign(root, varBL2, { { 0.010, 0.011, 0.012, 0.009, 0.002, 0.003, 0.001, 0.007, 0.005, 0.009 } });
    auto Wx_plus_b_L12 = Add(root, MatMul(root, L1, varWL2), varBL2);
    //    auto Wx_plus_b_L1 = Add(root, Mul(root, phX, varWL1), varBL1);
    auto L2 = Sin(root, Wx_plus_b_L12);
    // 定义神经网络输出层
    //
    auto prediction = L2;
    //
    //损失函数
    auto square = Square(root, Subtract(root, prediction, phY));
    //auto loss = Subtract(root, phY, prediction );//ReduceMean(root, square, { 0, 1 });

    auto loss = square;// ReduceMean(root, square, { 0, 1 });

    std::vector<Output> grad_outputs;
    TF_CHECK_OK(AddSymbolicGradients(root, { loss }, { varWL1, varBL1, varWL2, varBL2 }, &grad_outputs));

    // update the weights and bias using gradient descent
    auto apply_w1 = ApplyGradientDescent(root, varWL1, Cast(root, 0.03, DT_DOUBLE), { grad_outputs[0] });
    auto apply_b1 = ApplyGradientDescent(root, varBL1, Cast(root, 0.03, DT_DOUBLE), { grad_outputs[1] });
    auto apply_w2 = ApplyGradientDescent(root, varWL2, Cast(root, 0.03, DT_DOUBLE), { grad_outputs[2] });
    auto apply_b2 = ApplyGradientDescent(root, varBL2, Cast(root, 0.03, DT_DOUBLE), { grad_outputs[3] });
    // 使用梯度下降法训练

    //auto train_step = train.GradientDescentOptimizer(0.3).minimize(loss);
    //    auto train_step = ApplyGradientDescent(root, loss, { 1 }, { 0.3 });
    //x= 0.3, y=0.6
    std::vector<Tensor> outputs;
    ClientSession session(root);
    //TF_CHECK_OK(session.Run({ Weights_L1, biases_L1 }, &outputs));
    TF_CHECK_OK(session.Run({ Weights_L1, biases_L1, Weights_L2, biases_L2 }, &outputs));
    //     std::cout << outputs[0].matrix<double>() << std::endl << outputs[1].matrix<double>() << std::endl;
    //     //{{ 5.0 },{4.0}}代表2个1维数据, {{ 5.0 ,4.0}}代表1个2维数据
    //     TF_CHECK_OK(session.Run({ { phX,{ { 5.0 },{4.0} } } }, { Wx_plus_b_L1 }, {}, &outputs));
    //     std::cout << outputs[0].matrix<double>() << std::endl;

    for (int i = 0; i < 20000; ++i) {

        TF_CHECK_OK(session.Run({ { phX,{ { 1.0 },{ 2.0 },{ 3.0 },{ 4.0 },{ 5.0 },{ 6.0 },{ 7.0 },{ 8.0 } ,{ 9.0 },{ 10.0 } } },
        { phY,{ { 1.0 , 0.0, 0.0 , 0.0, 0.0 , 0.0, 0.0 , 0.0, 0.0 , 0.0 },{ 0.0 , 1.0, 0.0 , 0.0, 0.0 , 0.0, 0.0 , 0.0, 0.0 , 0.0 },
        { 0.0 , 0.0, 1.0 , 0.0, 0.0 , 0.0, 0.0 , 0.0, 0.0 , 0.0 },{ 0.0 , 0.0, 0.0 , 1.0, 0.0 , 0.0, 0.0 , 0.0, 0.0 , 0.0 },
        { 0.0 , 0.0, 0.0 , 0.0, 1.0 , 0.0, 0.0 , 0.0, 0.0 , 0.0 },{ 0.0 , 0.0, 0.0 , 0.0, 0.0 , 1.0, 0.0 , 0.0, 0.0 , 0.0 },
        { 0.0 , 0.0, 0.0 , 0.0, 0.0 , 0.0, 1.0 , 0.0, 0.0 , 0.0 },{ 0.0 , 0.0, 0.0 , 0.0, 0.0 , 0.0, 0.0 , 1.0, 0.0 , 0.0 },
        { 0.0 , 0.0, 0.0 , 0.0, 0.0 , 0.0, 0.0 , 0.0, 1.0 , 0.0 },{ 0.0 , 0.0, 0.0 , 0.0, 0.0 , 0.0, 0.0 , 0.0, 0.0 , 1.0 } } }
        },
        { apply_w1, apply_b1, apply_w2, apply_b2 },
            &outputs));


        if (i % 100 == 0) {
            TF_CHECK_OK(session.Run({ { phX,{ { 1.0 },{ 2.0 },{ 3.0 },{ 4.0 },{ 5.0 },{ 6.0 },{ 7.0 },{ 8.0 } ,{ 9.0 },{ 10.0 } } },
            { phY,{
                { 1.0 , 0.0, 0.0 , 0.0, 0.0 , 0.0, 0.0 , 0.0, 0.0 , 0.0 },{ 0.0 , 1.0, 0.0 , 0.0, 0.0 , 0.0, 0.0 , 0.0, 0.0 , 0.0 },
                { 0.0 , 0.0, 1.0 , 0.0, 0.0 , 0.0, 0.0 , 0.0, 0.0 , 0.0 },{ 0.0 , 0.0, 0.0 , 1.0, 0.0 , 0.0, 0.0 , 0.0, 0.0 , 0.0 },
                { 0.0 , 0.0, 0.0 , 0.0, 1.0 , 0.0, 0.0 , 0.0, 0.0 , 0.0 },{ 0.0 , 0.0, 0.0 , 0.0, 0.0 , 1.0, 0.0 , 0.0, 0.0 , 0.0 },
                { 0.0 , 0.0, 0.0 , 0.0, 0.0 , 0.0, 1.0 , 0.0, 0.0 , 0.0 },{ 0.0 , 0.0, 0.0 , 0.0, 0.0 , 0.0, 0.0 , 1.0, 0.0 , 0.0 },
                { 0.0 , 0.0, 0.0 , 0.0, 0.0 , 0.0, 0.0 , 0.0, 1.0 , 0.0 },{ 0.0 , 0.0, 0.0 , 0.0, 0.0 , 0.0, 0.0 , 0.0, 0.0 , 1.0 } } }
            },
            { loss },
                &outputs));
            std::cout << "Loss after " << i << " steps " << outputs[0].matrix<double>() << std::endl;
        }
        //std::cout << outputs[0].matrix<double>() << " " << outputs[1].matrix<double>() << std::endl;
    }
    
    TF_CHECK_OK(session.Run({ { phX,{ { 1.8 } } } }, { prediction }, &outputs));
    //     cout << "DNN output: " << *outputs[0].matrix<double>().data() << endl;
    cout << "DNN output: " << outputs[0].matrix<double>() << std::endl;
    TF_CHECK_OK(session.Run({ { phX,{ { 2.0 } } } }, { prediction }, &outputs));
    //     cout << "DNN output: " << *outputs[0].matrix<double>().data() << endl;
    cout << "DNN output: " << outputs[0].matrix<double>() << std::endl;
    TF_CHECK_OK(session.Run({ { phX,{ { 3.0 } } } }, { prediction }, &outputs));
    //     cout << "DNN output: " << *outputs[0].matrix<double>().data() << endl;
    cout << "DNN output: " << outputs[0].matrix<double>() << std::endl;
}

猜你喜欢

转载自blog.csdn.net/zsyddl2/article/details/80973388