用C++实现矩阵的运算和手写数字识别

Matrix.hpp

#ifndef MATRIX_HPP
#define MATRIX_HPP
#include <vector>
#include <initializer_list>
#include<iostream>
#include <ostream>
#include <functional>

/*原则
1.成员变量尾部有“_”
2.头文件内不允许写using namespace 这种语法
3.传值的时候,非基础类型,一般会传递引用,使得效率更高,避免拷贝
4.对于空,在C语言里,大部分是指,malloc分配出来的内存没有初始化的情况
    -此时malloc分配内存内的值是随机的
    -此时new分配的内存内的值也是随机的
    -但是,在c++容器里,比如stl对象中的vectoe,list等,他们分配发的空间,
    如果不进行初始化,则内部的值是0

*/
/*
1. 只能够表达2维的矩阵形式,即使是向量,也将会用matrix表示
    -我们只表达float格式的矩阵,不表达其他形式
2.矩阵的乘法,通过矩阵的乘法形式
3.要能够求解逆矩阵
4.可以通过指定行和列进行矩阵的创建
5.可以允许使用{1,2,3}进行数据填充的方式创建
6.能够与标量进行常规的加减法
*/
//防止重复定义
//创建一个矩阵的类
class Matrix{
    
    
    public:
        Matrix() = default;
        Matrix(int rows,int cols,const std::initializer_list<float>&data){
    
    
                rows_ = rows;
                cols_ =cols;
                //隐式转换,其实是执行了vector的操作
                data_=data;
                //1.data的元素为空,说明是不指定数据情况下进行创建
                //2.data 元素不空,说明是指定数据情况下创建
                    //a.元素数量等于rows*cols
                    //b.元素数量小于rows*cols
                if(data_.empty()){
    
    
                     
                     data_.resize(rows*cols);
                }else{
    
    
                    // if(data_.size() < rows*cols ){
    
    
                    //     data_.resize(rows*cols);
                    // }
                    if(data_.size() != rows*cols ){
    
    
                         printf("Invalid construct.\n");
                     }
                }
        }//加const使其变成引用,不进行拷贝
        // rows()后面加const 表示常量函数 不会去修改内部成员变量
        int  rows() const{
    
    
            return rows_;
        }
        int  cols() const{
    
    
            return cols_;
        }
        const std::vector<float> & data(){
    
    
            return data_;
        }
        // 获取矩阵的指针
        const float* ptr()const{
    
    
        		return data_.data();
				}
				float* ptr(){
    
    
					return data_.data();
				}
        //可以修改和读取
        //操作符重载
        float& operator() (int ir, int ic){
    
    
           //行主序
            int index =  ir * cols_ + ic;
            return data_[index];

        }
        //只能读取
        //操作符重载
        const float& operator() (int ir, int ic)const{
    
    
            //data在内存中是连续的
            //比如说我们有3x3的矩阵,那么
            //data_ 就等于{1,2,3,4,5,6,7,8,9}
            //就代表
            /*
            1 2 3
            4 5 6
            7 8 9
            */
           //如果要访问2行 0列,此时应该对应是7
           //把2d的索引,映射会1d
           //行主序
            int index =  ir * cols_ + ic;
            return data_[index];

        }

        // float mul(float value)
         Matrix element_wise(const std::function<float(float)>& func) const{
    
    
            Matrix output(*this);
            for(int i =0;i<output.data_.size();++i){
    
    
                output.data_[i] = func(output.data_[i]);
            }
            return output;

        }
        //矩阵乘法
        Matrix operator*(const Matrix& value) const{
    
    
            //目前只支持同样大小的矩阵做元素级别操作
            Matrix output(*this);
            auto pleft = output.data_.data();
            auto pright = value.data_.data();
            for(int i=0; i<output.data_.size();++i){
    
    
                *pleft++ *= *pright++;
            }
            return output;
        }
        Matrix operator*(float value) const{
    
    
            // Matrix output = *this;//实例的引用,相当于是克隆了一份
            // Matrix output(*this);
            // for(int i =0;i<output.data_.size();++i){
    
    
            //     output.data_[i] *= value;
            // }
            // return output;
            //c11的特性,相当于lambda表达式 []相当于捕获列表,捕获方式有值传递和引用传递
            return element_wise(
                // [&](float x)->float
                [&value](float x)->float
                {
    
    
                    return x*value ;
                    }
                );
        }
        Matrix operator+(float value) const{
    
    
            // Matrix output = *this;//实例的引用,相当于是克隆了一份
            // Matrix output(*this);
            // for(int i =0;i<output.data_.size();++i){
    
    
            //     output.data_[i] *= value;
            // }
            // return output;
            //c11的特性,相当于lambda表达式 []相当于捕获列表,捕获方式有值传递和引用传递
            return element_wise(
                // [&](float x)->float
                [&value](float x)->float
                {
    
    
                    return x+value ;
                    }
                );
        }
        Matrix operator-(float value) const{
    
    
            // Matrix output = *this;//实例的引用,相当于是克隆了一份
            // Matrix output(*this);
            // for(int i =0;i<output.data_.size();++i){
    
    
            //     output.data_[i] *= value;
            // }
            // return output;
            //c11的特性,相当于lambda表达式 []相当于捕获列表,捕获方式有值传递和引用传递
            return element_wise(
                // [&](float x)->float
                [&value](float x)->float
                {
    
    
                    return x-value ;
                    }
                );
        }
        Matrix operator/(float value) const{
    
    
            // Matrix output = *this;//实例的引用,相当于是克隆了一份
            // Matrix output(*this);
            // for(int i =0;i<output.data_.size();++i){
    
    
            //     output.data_[i] *= value;
            // }
            // return output;
            //c11的特性,相当于lambda表达式 []相当于捕获列表,捕获方式有值传递和引用传递
            return element_wise(
                // [&](float x)->float
                [&value](float x)->float
                {
    
    
                    return x/value ;
                    }
                );
        }
        //将gemm函数放到类里面
        Matrix gemm(const Matrix& other, bool ta ,bool tb,float alpha,float beta);
        Matrix inverse(const Matrix& a);

    private:
        int rows_ = 0;
        int cols_ = 0;
        std::vector<float> data_;
        

};

//全局操作符重载
std::ostream& operator<<(std::ostream& out, const Matrix& m){
    
    
    // printf("global operator\n");
    //常引用只能调用常函数
    printf("Matrix(%dx%d)\n",m.rows(),m.cols());
    for(int ir = 0; ir < m.rows(); ++ir){
    
    
        for(int ic = 0; ic < m.cols(); ++ic){
    
    
            // std::cout  << ir<<ic << " ";
            std::cout  << m(ir,ic) << "\t";
        }
        std::cout<< "\n";
    }

    return out;

}
//声明和实现进行分离
Matrix operator*(float value,const Matrix& m);
//实现矩阵的乘法,判断需不需要进行转置
Matrix gemm(const Matrix& a,bool ta,const Matrix& b,bool tb, float alpha =1,0f,float beta =0.0f);
#endif //MATRIX_HPP

matrix.cpp

#include "matrix.hpp"
#include <openblas/cblas.h>
 //将gemm函数放到类里面
 //先声明后使用
Matrix operator*(float value,const Matrix& m) {
    
    
    return m*value;
}
Matrix Matrix::gemm(const Matrix& other, bool ta,bool tb ,float alpha,float beta){
    
    
        //如果直接写gemm 就表示是当前作用域下的gemm 添加:: 表示全局作用域的gemm
        return ::gemm(*this,ta,other,tb,alpha,beta);
        
        }
Matrix gemm(const Matrix& a,bool ta,const Matrix& b,bool tb){
    
    
    //AB = C
    //A^T B = C
    //A B^T=C
    //AB*scale +bais
    //C = ta(A) tb(B)*alpha+beta;
    int ta_rows = ta?a.cols():a.rows();
    int ta_cols = ta?a.rows():a.cols();
    int tb_rows = tb?b.cols():b.rows();
    int tb_cols = tb?b.rows():b.cols();
    Matrix c(ta_rows,tb_cols);
    int m = ta_rows;
    int n = tb_cols;
    int k = ta_cols;
		//为了解决步长不等于列数的情况
		int lda = a.cols();//A矩阵每一行所需要的步长
		int ldb = b.cols();
		int ldc = c.cols();
    cblas_segmm(
    	CblasRowMajor,
    	ta?CblasTrans:CblasNoTrans,
    	tb?CblasTrans:CblasNoTrans,
    	m,n,k,alpha,a.ptr(),lad,b.ptr(),ldb,beta,c.ptr(),ldc
    )
    return c;
//求逆矩阵
Matrix inverse(const Matrix& a){
    
    

    if(a.rows() != a.cols()){
    
    
        printf("Invalid to compute inverse matrix by %d x %d\n", a.rows(), a.cols());
        return Matrix();
    }

    Matrix output = a;
    int n = a.rows();
    int *ipiv = new int[n];

    /* LU分解 */
    int code = LAPACKE_sgetrf(LAPACK_COL_MAJOR, n, n, output.ptr(), n, ipiv);
    if(code == 0){
    
    
        /* 使用LU分解求解通用逆矩阵 */
        code = LAPACKE_sgetri(LAPACK_COL_MAJOR, n, output.ptr(), n, ipiv);
    }

    if(code != 0){
    
    
        printf("LAPACKE inverse matrix failed, code = %d\n", code);
        return Matrix();
    }

    delete[] ipiv;
    return output;
}


autodiff.hpp

#ifndef AUTODIFF_HPP
#define AUTODIFF_HPP


#include <memory>
#include <vector>
#include <functional>
#include <random>
#include "matrix.hpp"

class ExpressionContainer{
    
    
public:
    virtual const char* type() = 0;
    virtual Matrix forward() = 0;
    virtual void backward(const Matrix& gradient) = 0;
    virtual bool requires_grad() const{
    
    return requires_grad_;}

protected:
    bool requires_grad_ = false;
};

class Parameter;
class Expression{
    
    
public:
    Expression(){
    
    };
    Expression(const Matrix& value);

    const char* type(){
    
    return data_ ? data_->type() : "nullptr";}
    Matrix forward();
    void backward();

    Expression view(const std::vector<int>& shape);
    Expression power();
    Expression gemm(const Expression& other);
    std::shared_ptr<ExpressionContainer> data() const{
    
    return data_;}
    std::vector<Parameter> params() const{
    
    return params_;};

protected:
    std::shared_ptr<ExpressionContainer> data_;
    std::vector<int> forward_output_shape_;
    std::vector<Parameter> params_;
};

class Parameter : public Expression{
    
    
public:
    Parameter();
    Parameter(float value);
    Parameter(const std::vector<float>& value);
    Parameter(const Matrix& value);
    const Matrix& gradient() const;
    const Matrix& value() const;
    Matrix& gradient();
    Matrix& value();
    operator Matrix&();
    operator const Matrix&()const;
    virtual bool requires_grad() const;
};

class Add : public Expression{
    
    
public:
    Add();
    Add& operator()(const Expression& left, const Expression& right);
};

class Sub : public Expression{
    
    
public:
    Sub();
    Sub& operator()(const Expression& left, const Expression& right);
};

class Multiply : public Expression{
    
    
public:
    Multiply();
    Multiply& operator()(const Expression& left, const Expression& right);
};

class View : public Expression{
    
    
public:
    View(const std::vector<int>& shape);
    View& operator()(const Expression& x);
};

class Power : public Expression{
    
    
public:
    Power();
    Power& operator()(const Expression& value);
};

class MatMul : public Expression{
    
    
public:
    MatMul();
    MatMul& operator()(const Expression& left, const Expression& right);
};

class Linear : public Expression{
    
    
public:
    Linear(int input, int output, bool bias=true);
    Linear& operator()(const Expression& x);

    Parameter& weight();
    Parameter& bias();
};

class Conv2d : public Expression{
    
    
public:
    Conv2d(int input, int output, int ksize, int stride, int padding, bool bias=true);
    Conv2d& operator()(const Expression& x);

    Parameter& weight();
    Parameter& bias();
};

class ReLU : public Expression{
    
    
public:
    ReLU();
    ReLU& operator()(const Expression& left);
};

class Sigmoid : public Expression{
    
    
public:
    Sigmoid();
    Sigmoid& operator()(const Expression& left);
};

class Log : public Expression{
    
    
public: 
    Log();
    Log& operator()(const Expression& left);
};

class SigmoidCrossEntropyLoss : public Expression{
    
    
public:
    SigmoidCrossEntropyLoss();
    SigmoidCrossEntropyLoss& operator()(const Expression& predict, const Expression& label);
};

Expression operator+(const Expression& a, const Expression& b);
Expression operator+(float a, const Expression& b);
Expression operator+(const Expression& a, float b);
Expression operator-(const Expression& a, const Expression& b);
Expression operator-(float a, const Expression& b);
Expression operator-(const Expression& a, float b);
Expression operator*(const Expression& a, const Expression& b);
Expression operator*(float a, const Expression& b);
Expression operator*(const Expression& a, float b);
std::default_random_engine& get_random_engine();
Matrix create_normal_distribution_matrix(const std::vector<int>& shape, float mean=0.0f, float stddev=1.0f);

#endif // AUTODIFF_HPP

autodiff.cpp

#include "autodiff.hpp"
#include <stdio.h>
#include <string.h>
#include <cmath>
#include <random>
#include <iostream>

using namespace std;


default_random_engine& get_random_engine(){
    
    
    static default_random_engine global_random_engine;
    return global_random_engine;
}

Matrix create_normal_distribution_matrix(const vector<int>& shape, float mean, float stddev){
    
    

    normal_distribution<float> norm(mean, stddev);
    Matrix out(shape);
    auto& engine = get_random_engine();
    auto p = out.ptr();
    for(int i = 0; i < out.numel(); ++i)
        *p++ = norm(engine);
    return out;
}

class MatrixContainer : public ExpressionContainer{
    
    
public:
    MatrixContainer(const Matrix& value, bool requires_grad = false){
    
    value_ = value;requires_grad_=requires_grad;}
    virtual const char* type() override{
    
    return "Matrix";}
    virtual Matrix forward() override{
    
    return value_;}
    virtual void backward(const Matrix& gradient) override{
    
    if(requires_grad_) gradient_ += gradient;}
    const Matrix& gradient() const{
    
    return gradient_;}
    const Matrix& value() const{
    
    return value_;}
    Matrix& gradient(){
    
    return gradient_;}
    Matrix& value(){
    
    return value_;}

private:
    Matrix value_;
    Matrix gradient_{
    
    {
    
    1, 1}, {
    
    0}};
};


class AddContainer : public ExpressionContainer{
    
    
public:
    void assign(const Expression& left, const Expression& right);
    virtual const char* type() override;
    virtual Matrix forward() override;
    virtual void backward(const Matrix& gradient) override;

private:
    std::shared_ptr<ExpressionContainer> left_;
    std::shared_ptr<ExpressionContainer> right_;
    int broadcast_ = 0;
};

class SubContainer : public ExpressionContainer{
    
    
public:
    void assign(const Expression& left, const Expression& right);
    virtual const char* type() override;
    virtual Matrix forward() override;
    virtual void backward(const Matrix& gradient) override;

private:
    std::shared_ptr<ExpressionContainer> left_;
    std::shared_ptr<ExpressionContainer> right_;
};

class MultiplyContainer : public ExpressionContainer{
    
    
public:
    void assign(const Expression& left, const Expression& right);
    virtual const char* type() override;
    virtual Matrix forward() override;
    virtual void backward(const Matrix& gradient) override;

private:
    Matrix left_save_;
    Matrix right_save_;
    std::shared_ptr<ExpressionContainer> left_;
    std::shared_ptr<ExpressionContainer> right_;
};

class ViewContainer : public ExpressionContainer{
    
    
public:
    ViewContainer(const vector<int>& shape);
    void assign(const Expression& value);
    virtual const char* type() override;
    virtual Matrix forward() override;
    virtual void backward(const Matrix& gradient) override;

private:
    std::vector<int> shape_, old_shape_;
    std::shared_ptr<ExpressionContainer> value_;
};

class PowerContainer : public ExpressionContainer{
    
    
public:
    void assign(const Expression& value);
    virtual const char* type() override;
    virtual Matrix forward() override;
    virtual void backward(const Matrix& gradient) override;

private:
    Matrix value_save_;
    std::shared_ptr<ExpressionContainer> value_;
};

class MatMulContainer : public ExpressionContainer{
    
    
public:
    virtual const char* type() override;
    virtual Matrix forward() override;
    virtual void backward(const Matrix& gradient) override;
    void assign(const Expression& left, const Expression& right);

private:
    Matrix left_save_;
    Matrix right_save_;
    std::shared_ptr<ExpressionContainer> left_;
    std::shared_ptr<ExpressionContainer> right_;
};

class LinearContainer : public ExpressionContainer{
    
    
public:
    LinearContainer(int input, int output, bool bias);
    virtual const char* type() override;
    virtual Matrix forward() override;
    virtual void backward(const Matrix& gradient) override;
    void assign(const Expression& x);

    Parameter& weight();
    Parameter& bias();

private:
    Matrix x_save_;
    std::shared_ptr<ExpressionContainer> x_;
    std::shared_ptr<Parameter> weight_, bias_;
    int input_ = 0;
    int output_ = 0;
    bool hasbias_ = true;
};

class Conv2dContainer : public ExpressionContainer{
    
    
public:
    Conv2dContainer(int input, int output, int ksize, int stride, int padding, bool bias);
    virtual const char* type() override;
    virtual Matrix forward() override;
    virtual void backward(const Matrix& gradient) override;
    void assign(const Expression& x);

    Parameter& weight();
    Parameter& bias();

private:
    std::vector<int> x_shape_;
    Matrix column_, output_save_, grad_save_;
    std::shared_ptr<ExpressionContainer> x_;
    std::shared_ptr<Parameter> weight_, bias_;
    int input_ = 0;
    int output_ = 0;
    int ksize_ = 0;
    int stride_ = 0;
    int padding_ = 0;
    bool hasbias_ = true;
    int oh_ = 0, ow_ = 0;
};

class ReLUContainer : public ExpressionContainer{
    
    
public:
    void assign(const Expression& left);
    virtual const char* type() override;
    virtual Matrix forward() override;
    virtual void backward(const Matrix& gradient) override;

private:
    Matrix save_forward_;
    std::shared_ptr<ExpressionContainer> left_;
};

class SigmoidContainer : public ExpressionContainer{
    
    
public:
    void assign(const Expression& left);
    virtual const char* type() override;
    virtual Matrix forward() override;
    virtual void backward(const Matrix& gradient) override;

private:
    Matrix save_forward_;
    std::shared_ptr<ExpressionContainer> left_;
};

class LogContainer : public ExpressionContainer{
    
    
public:
    void assign(const Expression& left);
    virtual const char* type() override;
    virtual Matrix forward() override;
    virtual void backward(const Matrix& gradient) override;

private:
    Matrix save_forward_;
    std::shared_ptr<ExpressionContainer> left_;
};

class SigmoidCrossEntropyLossContainer : public ExpressionContainer{
    
    
public:
    void assign(const Expression& predict, const Expression& label);
    virtual const char* type() override;
    virtual Matrix forward() override;
    virtual void backward(const Matrix& gradient) override;

private:
    Matrix save_gradient_;
    std::shared_ptr<ExpressionContainer> predict_;
    std::shared_ptr<ExpressionContainer> label_;
};



Parameter::Parameter(){
    
    
    data_.reset(new MatrixContainer(Matrix(), true));
}

Parameter::Parameter(float value){
    
    
    data_.reset(new MatrixContainer(Matrix({
    
    1, 1}, {
    
    value}), true));
}

Parameter::Parameter(const vector<float>& value){
    
    
    data_.reset(new MatrixContainer(Matrix({
    
    (int)value.size(), 1}, value), true));
}

Parameter::Parameter(const Matrix& value){
    
    
    data_.reset(new MatrixContainer(value, true));
}

bool Parameter::requires_grad() const{
    
    
    return data_->requires_grad();
}

const Matrix& Parameter::gradient() const{
    
    
    MatrixContainer* ptr = (MatrixContainer*)data_.get();
    return ptr->gradient();
}

const Matrix& Parameter::value() const{
    
    
    MatrixContainer* ptr = (MatrixContainer*)data_.get();
    return ptr->value();
}

Matrix& Parameter::gradient(){
    
    
    MatrixContainer* ptr = (MatrixContainer*)data_.get();
    return ptr->gradient();
}

Parameter::operator Matrix&(){
    
    
    MatrixContainer* ptr = (MatrixContainer*)data_.get();
    return ptr->value();
}

Parameter::operator const Matrix&()const{
    
    
    MatrixContainer* ptr = (MatrixContainer*)data_.get();
    return ptr->value();
}

Matrix& Parameter::value(){
    
    
    MatrixContainer* ptr = (MatrixContainer*)data_.get();
    return ptr->value();
}



void AddContainer::assign(const Expression& left, const Expression& right){
    
    
    left_ = left.data();
    right_ = right.data();
    requires_grad_ = left_->requires_grad() || right_->requires_grad();
}

const char* AddContainer::type(){
    
    
    return "Add";
}

Matrix AddContainer::forward(){
    
    

    auto lm = left_->forward();
    auto rm = right_->forward();
    Matrix* pa, *pb;
    tie(pa, pb, broadcast_) = Matrix::check_broadcast(&lm, &rm);
    if(pa != &lm) broadcast_ += 4;  // 0无广播,1右边列向量,2右边行向量, 3右边是标量, 4无广播,5左边列向量,6左边行向量, 7左边是标量
    return lm + rm;
}

void AddContainer::backward(const Matrix& gradient){
    
    

    if(!requires_grad_) return;

    Matrix g = gradient;
    Matrix reduce = gradient;
    if(broadcast_ == 1 || broadcast_ == 5){
    
    
        // 列向量
        reduce = g.reduce_sum_by_col();
    }else if(broadcast_ == 2 || broadcast_ == 6){
    
    
        // 行向量
        reduce = g.reduce_sum_by_row();
    }else if(broadcast_ == 3 || broadcast_ == 7){
    
    
        reduce = g.reduce_sum_all();
    }

    if(broadcast_ > 3)
        std::swap(g, reduce);
    
    if(left_->requires_grad())
        left_->backward(g);

    if(right_->requires_grad())
        right_->backward(reduce);
}

Add::Add(){
    
    
    data_.reset(new AddContainer());
}

Add& Add::operator()(const Expression& left, const Expression& right){
    
    
    std::dynamic_pointer_cast<AddContainer>(data_)->assign(left, right);
    return *this;
}



void SubContainer::assign(const Expression& left, const Expression& right){
    
    
    left_ = left.data();
    right_ = right.data();
    requires_grad_ = left_->requires_grad() || right_->requires_grad();
}

const char* SubContainer::type(){
    
    
    return "Sub";
}

Matrix SubContainer::forward(){
    
    
    return left_->forward() - right_->forward();
}

void SubContainer::backward(const Matrix& gradient){
    
    

    if(left_->requires_grad())
        left_->backward(gradient);
    
    if(right_->requires_grad())
        right_->backward(-gradient);
}

Sub::Sub(){
    
    
    data_.reset(new SubContainer());
}

Sub& Sub::operator()(const Expression& left, const Expression& right){
    
    
    std::dynamic_pointer_cast<SubContainer>(data_)->assign(left, right);
    return *this;
}



void MultiplyContainer::assign(const Expression& left, const Expression& right){
    
    
    left_ = left.data();
    right_ = right.data();
    requires_grad_ = left_->requires_grad() || right_->requires_grad();
}

const char* MultiplyContainer::type(){
    
    
    return "Multiply";
}

Matrix MultiplyContainer::forward(){
    
    
    left_save_  = left_->forward();
    right_save_ = right_->forward();
    return left_save_ * right_save_;
}

void MultiplyContainer::backward(const Matrix& gradient){
    
    

    if(left_->requires_grad())
        left_->backward(right_save_ * gradient);

    if(right_->requires_grad())
        right_->backward(left_save_ * gradient);
}

Multiply::Multiply(){
    
    
    data_.reset(new MultiplyContainer());
}

Multiply& Multiply::operator()(const Expression& left, const Expression& right){
    
    
    std::dynamic_pointer_cast<MultiplyContainer>(data_)->assign(left, right);
    return *this;
}


ViewContainer::ViewContainer(const vector<int>& shape){
    
    
    shape_ = shape;
}

void ViewContainer::assign(const Expression& value){
    
    
    value_ = value.data();
    requires_grad_ = value_->requires_grad();
}

const char* ViewContainer::type(){
    
    
    return "View";
}

Matrix ViewContainer::forward(){
    
    

    auto x = value_->forward();
    old_shape_ = x.shape();
    return x.view(shape_);
}

void ViewContainer::backward(const Matrix& gradient){
    
    

    if(!requires_grad_) return;
    value_->backward(gradient.view(old_shape_));
}

View::View(const vector<int>& shape){
    
    
    data_.reset(new ViewContainer(shape));
}

View& View::operator()(const Expression& value){
    
    
    std::dynamic_pointer_cast<ViewContainer>(data_)->assign(value);
    return *this;
}



void PowerContainer::assign(const Expression& value){
    
    
    value_ = value.data();
    requires_grad_ = value_->requires_grad();
}

const char* PowerContainer::type(){
    
    
    return "Power";
}

Matrix PowerContainer::forward(){
    
    
    value_save_ = value_->forward();
    return value_save_.power(2.0f);
}

void PowerContainer::backward(const Matrix& gradient){
    
    

    if(!requires_grad_) return;
    value_->backward(value_save_ * gradient * 2.0f);
}

Power::Power(){
    
    
    data_.reset(new PowerContainer());
}

Power& Power::operator()(const Expression& value){
    
    
    std::dynamic_pointer_cast<PowerContainer>(data_)->assign(value);
    return *this;
}



void MatMulContainer::assign(const Expression& left, const Expression& right){
    
    
    left_  = left.data();
    right_ = right.data();
    requires_grad_ = left_->requires_grad() || right_->requires_grad();
}

const char* MatMulContainer::type(){
    
    
    return "MatMul";
}

Matrix MatMulContainer::forward(){
    
    
    left_save_  = left_->forward();
    right_save_ = right_->forward();
    return left_save_.gemm(right_save_);
}

void MatMulContainer::backward(const Matrix& gradient){
    
    

    if(left_->requires_grad())
        left_->backward(gradient.gemm(right_save_, false, true));
    
    if(right_->requires_grad())
        right_->backward(left_save_.gemm(gradient, true));
}

MatMul::MatMul(){
    
    
    data_.reset(new MatMulContainer());
}

MatMul& MatMul::operator()(const Expression& left, const Expression& right){
    
    
    std::dynamic_pointer_cast<MatMulContainer>(data_)->assign(left, right);
    return *this;
}


LinearContainer::LinearContainer(int input, int output, bool bias){
    
    
    input_ = input;
    output_ = output;
    hasbias_ = bias;

    float fan_in_fan_out = 1.0f / sqrt((float)(input_ + output_));
    weight_.reset(new Parameter(create_normal_distribution_matrix({
    
    input_, output_}, 0, fan_in_fan_out)));
    if(hasbias_) bias_.reset(new Parameter(Matrix({
    
    1, output_})));
}

Parameter& LinearContainer::weight(){
    
    
    return *weight_.get();
}

Parameter& LinearContainer::bias(){
    
    
    return *bias_.get();
}

void LinearContainer::assign(const Expression& x){
    
    
    x_ = x.data();
    requires_grad_ = true;
}

const char* LinearContainer::type(){
    
    
    return "Linear";
}

Matrix LinearContainer::forward(){
    
    
    x_save_  = x_->forward();
    auto x = x_save_.gemm(weight_->value());
    if(hasbias_)
        x += bias_->value();
    return x;
}

void LinearContainer::backward(const Matrix& gradient){
    
    

    if(x_->requires_grad())
        x_->backward(gradient.gemm(weight_->value(), false, true));
    
    if(weight_->requires_grad())
        weight_->data()->backward(x_save_.gemm(gradient, true));

    if(hasbias_ && bias_->requires_grad())
        bias_->data()->backward(gradient.reduce_sum_by_row());
}

Parameter& Linear::weight(){
    
    
    return std::dynamic_pointer_cast<LinearContainer>(data_)->weight();
}

Parameter& Linear::bias(){
    
    
    return std::dynamic_pointer_cast<LinearContainer>(data_)->bias();
}

Linear::Linear(int input, int output, bool bias){
    
    
    data_.reset(new LinearContainer(input, output, bias));
    params_ = {
    
    this->weight(), this->bias()};
}

Linear& Linear::operator()(const Expression& x){
    
    
    std::dynamic_pointer_cast<LinearContainer>(data_)->assign(x);
    return *this;
}


Conv2dContainer::Conv2dContainer(int input, int output, int ksize, int stride, int padding, bool bias){
    
    
    input_ = input;
    output_ = output;
    ksize_ = ksize;
    stride_ = stride;
    padding_ = padding;
    hasbias_ = bias;

    float fan_in_fan_out = 2.0f / sqrt((float)(input_ + output_));
    weight_.reset(new Parameter(create_normal_distribution_matrix({
    
    output_, input_, ksize_, ksize_}, 0, fan_in_fan_out)));
    if(hasbias_) bias_.reset(new Parameter(
        create_normal_distribution_matrix({
    
    1, output_}, 0, 1)
    ));
}

Parameter& Conv2dContainer::weight(){
    
    
    return *weight_.get();
}

Parameter& Conv2dContainer::bias(){
    
    
    return *bias_.get();
}

void Conv2dContainer::assign(const Expression& x){
    
    
    x_ = x.data();
    requires_grad_ = true;
}

const char* Conv2dContainer::type(){
    
    
    return "Conv2d";
}

Matrix Conv2dContainer::forward(){
    
    

    auto xval = x_->forward();
    x_shape_  = xval.shape();

    int xb, xc, xh, xw;
    xb = x_shape_[0];
    xc = x_shape_[1];
    xh = x_shape_[2];
    xw = x_shape_[3];

    ow_ = (x_shape_[3] + padding_ * 2 - ksize_) / stride_ + 1;
    oh_ = (x_shape_[2] + padding_ * 2 - ksize_) / stride_ + 1;
    int col_w = ow_ * oh_;
    int col_h = ksize_ * ksize_ * input_;
    column_.resize({
    
    xb, col_h, col_w});
    output_save_.resize({
    
    xb, output_, oh_, ow_});

    for(int ib = 0; ib < xb; ++ib){
    
    
        auto subcolumn = column_.reference_d0(ib);
        for(int ic = 0; ic < xc; ++ic){
    
    
            for(int oy = 0; oy < oh_; ++oy){
    
    
                for(int ox = 0; ox < ow_; ++ox){
    
    
                    int column_x = ox + oy * ow_;
                    for(int ky = 0; ky < ksize_; ++ky){
    
    
                        int column_idx =  (ky + ic * ksize_) * ksize_;
                        int iy = oy * stride_ + ky - padding_;
                        if(iy < 0 || iy >= xh) continue;

                        for(int kx = 0; kx < ksize_; ++kx){
    
    
                            int column_y = column_idx + kx;
                            int ix = ox * stride_ + kx - padding_;
                            if(ix < 0 || ix >= xw) continue;
                            subcolumn(column_y, column_x) = xval(ib, ic, iy, ix);
                        }
                    }
                }
            }
        }

        auto bout  = weight_->value().view({
    
    output_, -1}).gemm(subcolumn).view({
    
    output_, oh_, ow_});
        auto pout  = bout.ptr();
        auto osptr = output_save_.ptr(ib);

        if(hasbias_){
    
    
            auto pbias = this->bias_->value().ptr();
            for(int ot = 0; ot < output_; ++ot, ++pbias){
    
    
                for(int j = 0; j < ow_ * oh_; ++j)
                    *osptr++ = *pout++ + *pbias;
            }
        }else{
    
    
            memcpy(osptr, pout, sizeof(float) * output_ * oh_ * ow_);
        }
    }
    return output_save_;
}

void Conv2dContainer::backward(const Matrix& gradient){
    
    

    int xb, xc, xh, xw;
    xb = x_shape_[0];
    xc = x_shape_[1];
    xh = x_shape_[2];
    xw = x_shape_[3];

    if(weight_->requires_grad()){
    
    
        for(int ib = 0; ib < xb; ++ib){
    
    
            auto subcolumn = column_.reference_d0(ib);
            Matrix g_ib = gradient.reference_d0(ib).view({
    
    output_, -1});
            auto grad = g_ib.gemm(subcolumn, false, true).view(weight_->value().shape());
            weight_->data()->backward(grad);
        }
    }

    if(hasbias_ && bias_->requires_grad()){
    
    
        
        Matrix bias_grad({
    
    output_});
        auto po = bias_grad.ptr();
        auto pg = gradient.ptr();
        size_t area = gradient.count_of(2);
        for(int i = 0; i < gradient.size(0); ++i){
    
    
            for(int ic = 0; ic < gradient.size(1); ++ic){
    
    
                auto& bias_value = po[ic];
                for(int j = 0; j < area; ++j)
                    bias_value += *pg++;
            }
        }
        bias_->data()->backward(bias_grad);
    }

    if(x_->requires_grad()){
    
    

        grad_save_.resize(x_shape_);
        grad_save_.fill_zero_();
        
        auto kcol = this->weight().value().view({
    
    output_, -1});
        for(int ib = 0; ib < xb; ++ib){
    
    

            Matrix g_ib = gradient.reference_d0(ib).view({
    
    output_, -1});
            auto dcolumn = kcol.gemm(g_ib, true);

            for(int ic = 0; ic < xc; ++ic){
    
    
                for(int oy = 0; oy < oh_; ++oy){
    
    
                    for(int ox = 0; ox < ow_; ++ox){
    
    
                        int column_x = ox + oy * ow_;
                        for(int ky = 0; ky < ksize_; ++ky){
    
    
                            int column_idx =  (ky + ic * ksize_) * ksize_;
                            int iy = oy * stride_ + ky - padding_;
                            if(iy < 0 || iy >= xh) continue;

                            for(int kx = 0; kx < ksize_; ++kx){
    
    
                                int column_y = column_idx + kx;
                                int ix = ox * stride_ + kx - padding_;

                                if(ix < 0 || ix >= xw) continue;
                                grad_save_(ib, ic, iy, ix) += dcolumn(column_y, column_x);
                            }
                        }
                    }
                }
            }
        }
        x_->backward(grad_save_);
    }
}

Parameter& Conv2d::weight(){
    
    
    return std::dynamic_pointer_cast<Conv2dContainer>(data_)->weight();
}

Parameter& Conv2d::bias(){
    
    
    return std::dynamic_pointer_cast<Conv2dContainer>(data_)->bias();
}

Conv2d::Conv2d(int input, int output, int ksize, int stride, int padding, bool bias){
    
    
    data_.reset(new Conv2dContainer(input, output, ksize, stride, padding, bias));
    params_ = {
    
    this->weight(), this->bias()};
}

Conv2d& Conv2d::operator()(const Expression& x){
    
    
    std::dynamic_pointer_cast<Conv2dContainer>(data_)->assign(x);
    return *this;
}



void ReLUContainer::assign(const Expression& left){
    
    
    left_ = left.data();
    requires_grad_ = left_->requires_grad();
}

const char* ReLUContainer::type(){
    
    
    return "ReLU";
}

Matrix ReLUContainer::forward(){
    
    

    save_forward_ = left_->forward().copy();

    auto p = save_forward_.ptr();
    for(int i = 0; i < save_forward_.numel(); ++i, ++p)
        *p = std::max(0.0f, *p);
    return save_forward_;
}

void ReLUContainer::backward(const Matrix& gradient){
    
    

    if(!requires_grad_) return;
    Matrix out = gradient.copy();
    auto psave = save_forward_.ptr();
    auto pout  = out.ptr();
    for(int i = 0; i < save_forward_.numel(); ++i){
    
    
        if(psave[i] <= 0)
            pout[i] = 0;
    }
    left_->backward(out);
}

ReLU::ReLU(){
    
    
    data_.reset(new ReLUContainer());
}

ReLU& ReLU::operator()(const Expression& left){
    
    
    std::dynamic_pointer_cast<ReLUContainer>(data_)->assign(left);
    return *this;
}



void SigmoidContainer::assign(const Expression& left){
    
    
    left_ = left.data();
    requires_grad_ = left_->requires_grad();
}

Matrix SigmoidContainer::forward(){
    
    

    save_forward_ = left_->forward().copy();
    for(int i = 0; i < save_forward_.numel(); ++i){
    
    
        auto& x = save_forward_.ptr()[i];
        // if(x < 0){
    
    
        //     x = exp(x) / (1.0f + exp(x));
        // }else{
    
    
        //     x = 1.0f / (1.0f + exp(-x));
        // }
        x = 1.0f / (1.0f + exp(-x));
    }
    return save_forward_;
}

void SigmoidContainer::backward(const Matrix& gradient){
    
    

    if(!requires_grad_) return;
    Matrix out = gradient.copy();
    for(int i = 0; i < save_forward_.numel(); ++i){
    
    
        auto& x = save_forward_.ptr()[i];
        out.ptr()[i] *= x * (1 - x);
    }
    left_->backward(out);
}

const char* SigmoidContainer::type(){
    
    
    return "Sigmoid";
}

Sigmoid::Sigmoid(){
    
    
    data_.reset(new SigmoidContainer());
}

Sigmoid& Sigmoid::operator()(const Expression& left){
    
    
    std::dynamic_pointer_cast<SigmoidContainer>(data_)->assign(left);
    return *this;
}



void LogContainer::assign(const Expression& left){
    
    
    left_ = left.data();
    requires_grad_ = left_->requires_grad();
}

Matrix LogContainer::forward(){
    
    

    save_forward_ = left_->forward().copy();
    for(int i = 0; i < save_forward_.numel(); ++i){
    
    
        auto& x = save_forward_.ptr()[i];
        x = log(x);
    }
    return save_forward_;
}

void LogContainer::backward(const Matrix& gradient){
    
    

    if(!requires_grad_) return;

    Matrix out = gradient.copy();
    for(int i = 0; i < save_forward_.numel(); ++i){
    
    
        auto& x = save_forward_.ptr()[i];
        out.ptr()[i] /= x;
    }
    left_->backward(out);
}

const char* LogContainer::type(){
    
    
    return "Log";
}

Log::Log(){
    
    
    data_.reset(new LogContainer());
}

Log& Log::operator()(const Expression& left){
    
    
    std::dynamic_pointer_cast<LogContainer>(data_)->assign(left);
    return *this;
}



void SigmoidCrossEntropyLossContainer::assign(const Expression& predict, const Expression& label){
    
    
    predict_ = predict.data();
    label_ = label.data();
    requires_grad_ = predict_->requires_grad() || label_->requires_grad();
}

Matrix SigmoidCrossEntropyLossContainer::forward(){
    
    

    auto fprob = predict_->forward().sigmoid();
    auto flabel = label_->forward();

    if(requires_grad_)
        save_gradient_ = (fprob - flabel) * (1.0f / fprob.size(0));
    
    float eps = 1e-5;
    float sum_loss  = 0;
    auto pred_ptr   = fprob.ptr();
    auto onehot_ptr = flabel.ptr();
    int numel       = fprob.numel();
    for(int i = 0; i < numel; ++i, ++pred_ptr, ++onehot_ptr){
    
    
        auto y = *onehot_ptr;
        auto p = *pred_ptr;
        p = max(min(p, 1 - eps), eps);
        sum_loss += -(y * log(p) + (1 - y) * log(1 - p));
    }
    return Matrix({
    
    1, 1}, {
    
    sum_loss / fprob.size(0)});
}

void SigmoidCrossEntropyLossContainer::backward(const Matrix& gradient){
    
    

    if(!requires_grad_) return;
    predict_->backward(save_gradient_ * gradient);
}

const char* SigmoidCrossEntropyLossContainer::type(){
    
    
    return "SigmoidCrossEntropyLoss";
}

SigmoidCrossEntropyLoss::SigmoidCrossEntropyLoss(){
    
    
    data_.reset(new SigmoidCrossEntropyLossContainer());
}

SigmoidCrossEntropyLoss& SigmoidCrossEntropyLoss::operator()(const Expression& predict, const Expression& label){
    
    
    std::dynamic_pointer_cast<SigmoidCrossEntropyLossContainer>(data_)->assign(predict, label);
    return *this;
}



Expression operator+(const Expression& a, const Expression& b){
    
    
    return Add()(a, b);
}

Expression operator+(float a, const Expression& b){
    
    
    return Add()(Matrix({
    
    1, 1}, {
    
    a}), b);
}

Expression operator+(const Expression& a, float b){
    
    
    return Add()(a, Matrix({
    
    1, 1}, {
    
    b}));
}

Expression operator-(const Expression& a, const Expression& b){
    
    
    return Sub()(a, b);
}

Expression operator-(float a, const Expression& b){
    
    
    return Sub()(Matrix({
    
    1, 1}, {
    
    a}), b);
}

Expression operator-(const Expression& a, float b){
    
    
    return Sub()(a, Matrix({
    
    1, 1}, {
    
    b}));
}

Expression operator*(const Expression& a, const Expression& b){
    
    
    return Multiply()(a, b);
}

Expression operator*(float a, const Expression& b){
    
    
    return Multiply()(Matrix({
    
    1, 1}, {
    
    a}), b);
}

Expression operator*(const Expression& a, float b){
    
    
    return Multiply()(a, Matrix({
    
    1, 1}, {
    
    b}));
}

Expression Expression::view(const std::vector<int>& shape){
    
    
    return View(shape)(*this);
}

Expression Expression::power(){
    
    
    return Power()(*this);
}

Expression Expression::gemm(const Expression& other){
    
    
    return MatMul()(*this, other);
}

Expression::Expression(const Matrix& value){
    
    
    data_.reset(new MatrixContainer(value, false));
}

Matrix Expression::forward(){
    
    
    auto output = data_->forward();
    forward_output_shape_ = output.shape();
    return output;
}

void Expression::backward(){
    
    

    Matrix grad(forward_output_shape_);
    grad.fill_(1);
    data_->backward(grad);
}

main.cpp

//#include<iostream>
//#include "matrix.hpp"
//int main(){
    
    
//		Matrix J()
    // std::cout<<"hello"<< std::endl;
//    Matrix m1(3,3,{1,2,3,4,5,6,7,8,9});
//    Matrix m2(3,3,{1,2,3,4,5,6,7,8,9});
    // std::cout<<m.cols()<<std::endl<<m.rows()<<std::endl;
    // auto data = m.data();
    // for(int i=0; i<data.size();++i){
    
    
    //     std::cout<<data[i]<<std::endl;
    // }
    // m(2,0) = 123.5;
    // std::cout<<"以下是数据部分:"<<std::endl;
    // for(int ir = 0; ir < m.rows(); ++ir){
    
    
    //     for(int ic = 0; ic < m.cols(); ++ic){
    
    
    //         // std::cout  << ir<<ic << " ";
    //         std::cout  << m(ir,ic) << "\t";
    //     }
    //     std::cout<< "\n";
    // }
    //全局操作符存在于全局作用域
//    std::cout<<m1*m2;
    //std::cout<<gemm(m1,false,m2,false,1.0f,0.0f)//;
 //   return 0;
//}

#include "autodiff.hpp"
#include <vector>
#include <string>
#include <iostream>
#include <fstream>
#include <cmath>
#include <tuple>
#include <iomanip>
#include <stdarg.h>
#include <memory.h>
#include <random>
#include <algorithm>
#include <chrono>

using namespace std;

namespace Application{
    
    

    static default_random_engine global_random_engine;

    namespace logger{
    
    

        #define INFO(...)  Application::logger::__printf(__FILE__, __LINE__, __VA_ARGS__)

        void __printf(const char* file, int line, const char* fmt, ...){
    
    

            va_list vl;
            va_start(vl, fmt);

            // None   = 0,     // 无颜色配置
            // Black  = 30,    // 黑色
            // Red    = 31,    // 红色
            // Green  = 32,    // 绿色
            // Yellow = 33,    // 黄色
            // Blue   = 34,    // 蓝色
            // Rosein = 35,    // 品红
            // Cyan   = 36,    // 青色
            // White  = 37     // 白色
            /* 格式是: \e[颜色号m文字\e[0m   */
            printf("\e[32m[%s:%d]:\e[0m ", file, line);
            vprintf(fmt, vl);
            printf("\n");
        }
    };

    namespace io{
    
    
//__attribute__((packed))表示不做内存字节对齐
        struct __attribute__((packed)) mnist_labels_header_t{
    
    
            unsigned int magic_number;
            unsigned int number_of_items;
        };

        struct __attribute__((packed)) mnist_images_header_t{
    
    
            unsigned int magic_number;
            unsigned int number_of_images;
            unsigned int number_of_rows;
            unsigned int number_of_columns;
        };

        unsigned int inverse_byte(unsigned int v){
    
    
            unsigned char* p = (unsigned char*)&v;
            std::swap(p[0], p[3]);
            std::swap(p[1], p[2]);
            return v;
        }

        /* 加载mnist数据集 */
        tuple<Matrix, Matrix> load_data(const string& image_file, const string& label_file){
    
    

            Matrix images, labels;
      /*
       这行代码定义了一个名为"fimage"的对象,类型
为"fstream",用于文件操作。第一个参数"image_file"是与该对象相关联的文件名。第二个参数"ios::binary"指定以二进制模式打开文件,即数据将以原始字节的形式读取或写入。第三个参数"ios::in"指定该文件将用于输入操作(即读取)。

因此,这行代码创建了一个名为"fimage"的fstream对象,它与名为"image_file"的文件相关联,并以二进制模式打开以进行读取操作。
            */
            fstream fimage(image_file, ios::binary | ios::in);
            fstream flabel(label_file, ios::binary | ios::in);

            mnist_images_header_t images_header;
            mnist_labels_header_t labels_header;
            fimage.read((char*)&images_header, sizeof(images_header));
            flabel.read((char*)&labels_header, sizeof(labels_header));

            images_header.number_of_images = inverse_byte(images_header.number_of_images);
            labels_header.number_of_items  = inverse_byte(labels_header.number_of_items);

            images.resize({
    
    (int)images_header.number_of_images, 1, 28, 28});
            labels.resize({
    
    (int)labels_header.number_of_items, 10});

            std::vector<unsigned char> buffer(images.numel());
            fimage.read((char*)buffer.data(), buffer.size());

            for(int i = 0; i < buffer.size(); ++i)
                images.ptr()[i] = (buffer[i] / 255.0f - 0.1307f) / 0.3081f;
                //images.ptr()[i] = (buffer[i] - 127.5f) / 127.5f;

            buffer.resize(labels.size(0));
            flabel.read((char*)buffer.data(), buffer.size());
            for(int i = 0; i < buffer.size(); ++i)
                labels.ptr(i)[buffer[i]] = 1;   // onehot
            return make_tuple(images, labels);
        }

        void print_image(const float* ptr, int rows, int cols){
    
    

            for(int i = 0;i < rows * cols; ++i){
    
    

                //int pixel = ptr[i] * 127.5 + 127.5;
                int pixel = (ptr[i] * 0.3081f + 0.1307f) * 255.0f;
                if(pixel < 20)
                    printf("--- ");
                else
                    printf("%03d ", pixel);

                if((i + 1) % cols == 0)
                    printf("\n");
            }
        }

        bool save_model(const string& file, const vector<Parameter>& params){
    
    

            ofstream out(file, ios::binary | ios::out);
            if(!out.is_open()){
    
    
                INFO("Open %s failed.", file.c_str());
                return false;
            }

            unsigned int header_file[] = {
    
    0x3355FF11, params.size()};
            out.write((char*)header_file, sizeof(header_file));

            for(auto& p : params){
    
    
                auto& m = p.value();
                m.save(out);
            }
            return out.good();
        }

        bool load_model(const string& file, vector<Parameter>& params){
    
    

            ifstream in(file, ios::binary | ios::in);
            if(!in.is_open()){
    
    
                INFO("Open %s failed.", file.c_str());
                return false;
            }

            unsigned int header_file[2];
            in.read((char*)header_file, sizeof(header_file));

            if(header_file[0] != 0x3355FF11){
    
    
                INFO("Invalid params file: %s", file.c_str());
                return false;
            }

            params.resize(header_file[1]);
            for(int i = 0; i < params.size(); ++i){
    
    
                auto& m = params[i].value();
                m.load(in);
            }
            return in.good();
        }
    };

    namespace tools{
    
    

        vector<int> range(int end){
    
    
            vector<int> out(end);
            for(int i = 0; i < end; ++i)
                out[i] = i;
            return out;
        }

        double timenow(){
    
    
            return chrono::duration_cast<chrono::microseconds>(chrono::system_clock::now().time_since_epoch()).count() / 1000.0;
        }
    };

    namespace nn{
    
    

        class Model{
    
    
        public:
            Model(): 
                conv1_(1, 6, 5, 2, 2),   // 28*28*1 -> 14*14*6
                conv2_(6, 16, 3, 2, 1),   // 14*14*6 -> 7*7*6
                conv3_(16, 16, 3, 1, 0),   // 7*7*6 -> 5*5*6
                fc1_(400, 80),
                fc2_(80, 10)
            {
    
    
                params_ = {
    
    
                    conv1_.weight(), conv1_.bias(), 
                    conv2_.weight(), conv2_.bias(), 
                    conv3_.weight(), conv3_.bias(), 
                    fc1_.weight(), fc1_.bias(),
                    fc2_.weight(), fc2_.bias()
                };
            }

            Expression operator()(Expression x){
    
    
                x = relu1_(conv1_(x));
                x = relu2_(conv2_(x));
                x = relu3_(conv3_(x));
                x = x.view({
    
    -1, 400});
                x = relu4_(fc1_(x));
                x = fc2_(x);
                return x;
            }

            vector<Parameter>& params(){
    
    
                return params_;
            }

        private:
            Conv2d conv1_, conv2_, conv3_;
            Linear fc1_, fc2_;
            ReLU relu1_, relu2_, relu3_, relu4_;
            vector<Parameter> params_;
        };

        float eval_test_accuracy(const Matrix& probability, const Matrix& labels){
    
    

            int success = 0;
            for(int i = 0; i < probability.size(0); ++i){
    
    
                auto row_ptr = probability.ptr(i);
                int predict_label = std::max_element(row_ptr, row_ptr + probability.size(1)) - row_ptr;
                if(labels.ptr(i)[predict_label] == 1)
                    success++;
            }
            return success / (float)probability.size(0);
        }
    };

    namespace optimizer{
    
    

        class Optimizer{
    
    
        public:
            virtual void step() = 0;
            virtual void zero_grad(){
    
    
                for(auto& param : params_)
                    param.gradient().fill_(0);
            }

            virtual float lr() const{
    
    return lr_;}
            virtual void set_lr(float newlr){
    
    lr_ = newlr;}

        protected:
            vector<Parameter> params_;
            float lr_ = 0;
        };

        class SGDMomentum : public Optimizer{
    
    
        public:
            SGDMomentum(const vector<Parameter>& params, float lr, float momentum=0.9, float l2_regularization=0){
    
    
                params_   = params;
                lr_       = lr;
                momentum_ = momentum;
                delta_momentums_.resize(params_.size());
                l2_regularization_ = l2_regularization;
            }

            // 提供对应的参数params,和对应的梯度grads,进行参数的更新
            void step(){
    
    

                for(int i =0 ; i < params_.size(); ++i){
    
    
                    auto& delta_momentum = delta_momentums_[i];
                    auto& param          = params_[i].value();
                    auto& grad           = params_[i].gradient();

                    if(delta_momentum.empty())
                        delta_momentum.resize(param.shape());
                    
                    delta_momentum = momentum_ * delta_momentum - lr_ * grad;
                    if(l2_regularization_ != 0)
                        delta_momentum -= l2_regularization_ * param;
                    
                    param += delta_momentum;
                }
            }
        
        private:
            vector<Matrix> delta_momentums_;
            float momentum_ = 0;
            float l2_regularization_ = 0;
        };

        class AdamW : public Optimizer{
    
    
        public:
            AdamW(const vector<Parameter>& params, float lr=1e-3, float beta1=0.9, float beta2=0.999, float l2_regularization=0){
    
    
                params_   = params;
                lr_       = lr;
                beta1_    = beta1;
                beta2_    = beta2;
                l2_regularization_ = l2_regularization;
                m_.resize(params_.size());
                v_.resize(params_.size());
            }

            void step(){
    
    
                
                t_ ++;
                for(int i = 0; i < params_.size(); ++i){
    
    
                    auto& m      = m_[i];
                    auto& v      = v_[i];
                    auto& param  = params_[i].value();
                    auto& grad   = params_[i].gradient();

                    if(m.empty())
                        m.resize(param.shape());

                    if(v.empty())
                        v.resize(param.shape());
                    
                    m = beta1_ * m + (1 - beta1_) * grad;
                    v = beta2_ * v + (1 - beta2_) * grad.power(2.0f);
                    auto mt = m / (1 - std::pow(beta1_, t_));
                    auto vt = v / (1 - std::pow(beta2_, t_));
                    param -= lr_ * mt / (vt.sqrt() + eps_) + l2_regularization_ * param;
                }
            }

        private:
            vector<Matrix> m_, v_;
            float t_ = 0;
            float eps_ = 1e-7;
            float momentum_ = 0;
            float beta1_ = 0;
            float beta2_ = 0;
            float l2_regularization_ = 0;
        };
    };
    
    int run(){
    
    

        Matrix trainimages, trainlabels;
        Matrix valimage, vallabels;
        tie(trainimages, trainlabels) = io::load_data("mnist/train-images.idx3-ubyte", "mnist/train-labels.idx1-ubyte");
        tie(valimage, vallabels)      = io::load_data("mnist/t10k-images.idx3-ubyte",  "mnist/t10k-labels.idx1-ubyte");
        
        int num_images  = trainimages.size(0);
        int num_output  = 10;
        int num_epoch   = 10;
        float lr        = 1e-2;
        int batch_size  = 128;
        float momentum  = 0.9f;
        int num_batch_per_epoch = num_images / batch_size;
        auto image_indexs       = tools::range(num_images);

        nn::Model model;
        //optimizer::SGDMomentum optim(model.params(), lr, momentum);
        optimizer::AdamW optim(model.params(), lr, 0.9, 0.99, 1e-5);

        int total_batch = 0;
        auto t0 = tools::timenow();
        SigmoidCrossEntropyLoss lossfn;
        for(int epoch = 0; epoch < num_epoch; ++epoch){
    
    

            if(epoch == 5)
                optim.set_lr(optim.lr() * 0.1);

            // 打乱索引
            std::shuffle(image_indexs.begin(), image_indexs.end(), global_random_engine);
            
            // 开始循环所有的batch
            for(int ibatch = 0; ibatch < num_batch_per_epoch; ++ibatch, ++total_batch){
    
    

                // 前向过程
                auto x           = trainimages.slice(image_indexs, ibatch * batch_size, batch_size);
                auto y           = trainlabels.slice(image_indexs, ibatch * batch_size, batch_size);
                auto loss        = lossfn(model(x), y);
                auto lossval     = loss.forward().item();
                optim.zero_grad();
                loss.backward();
                optim.step();

                if((total_batch + 1) % 50 == 0){
    
    
                    auto t1 = tools::timenow();
                    auto batchs_time = t1 - t0;
                    t0 = t1;
                    INFO("Epoch %.2f / %d, Loss: %f, LR: %f [ %.2f ms / 50 batch ]", epoch + ibatch / (float)num_batch_per_epoch, num_epoch, lossval, optim.lr(), batchs_time);
                }
            }

            //模型对测试集进行测试,并打印精度
            auto test_probability = model(valimage).forward().sigmoid();
            float accuracy        = nn::eval_test_accuracy(test_probability, vallabels);
            float test_loss       = lossfn(test_probability, vallabels).forward().item();
            INFO("Test Accuracy: %.2f %%, Loss: %f", accuracy * 100, test_loss);
        }

        INFO("Save to model.bin .");
        io::save_model("model.bin", model.params());
        //io::load_model("model.bin", model.params());

        for(int i = 0; i < valimage.size(0); ++i){
    
    

            auto input            = valimage.reference_d0(i).unsqueeze();
            auto test_probability = model(input).forward().sigmoid();

            int ilabel = test_probability.argmax(0);
            float prob = test_probability(0, ilabel);

            io::print_image(input.ptr(), 28, 28);
            INFO("Predict %d, Confidence = %f", ilabel, prob);

            printf("Prass [Enter] to next.");
            getchar();
        }
        return 0;
    }
};

// void check_model(){
    
    

//     Application::nn::Model model;
//     Parameter p(create_normal_distribution_matrix({3, 1, 28, 28}));
//     auto out = model(p);
//     auto output_tensor = out.forward();
//     out.backward();

//     for(int i = 0; i < model.params().size(); ++i){
    
    
//         auto p = model.params()[i];
//         char nb[100];
//         sprintf(nb, "param%d.bin", i);
//         p.value().save(nb);

//         sprintf(nb, "param%d.grad.bin", i);
//         p.gradient().save(nb);
//     }

//     p.value().save("input.bin");
//     p.gradient().save("input.grad.bin");
//     output_tensor.save("output.bin");
// }

int main(){
    
    
    return Application::run();
}

check-grad.py

import numpy as np
import torch
import torch.nn

def dtype2int(dtype : np.dtype):
    
    if dtype == np.float32:
        return 0
    elif dtype == np.float16:
        return 1
    elif dtype == np.float64:
        return 2
    elif dtype == np.int32:
        return 3
    elif dtype == np.uint32:
        return 4
    elif dtype == np.int64:
        return 5

    assert False, f"Unsupport dtype {dtype}"


def int2dtype(itype : int):
    
    if itype == 0:
        return np.float32
    elif itype == 1:
        return np.float16
    elif itype == 2:
        return np.float64
    elif itype == 3:
        return np.int32
    elif itype == 4:
        return np.uint32
    elif itype == 5:
        return np.int64

    assert False, f"Unsupport itype {itype}"


def load_tensor(file):
            
    with open(file, "rb") as f:
        binary_data = f.read()
        
    magic_number, ndims, dtype = np.frombuffer(binary_data, np.uint32, count=3, offset=0)
    assert magic_number == 0xFCCFE2E2, f"{file} not a tensor file."
    
    dims = np.frombuffer(binary_data, np.uint32, count=ndims, offset=3 * 4)
    np_dtype = int2dtype(dtype)
    return np.frombuffer(binary_data, np_dtype, offset=(ndims + 3) * 4).reshape(*dims)


def save_tensor(file, tensor : np.ndarray):
            
    with open(file, "wb") as f:
        f.write(np.array([0xFCCFE2E2, tensor.ndim, dtype2int(tensor.dtype)] + list(tensor.shape), dtype=np.uint32).tobytes())
        f.write(tensor.tobytes())


def load_tt(file):
    t = load_tensor(file)
    return torch.tensor(t)


class Model(torch.nn.Module):
    def __init__(self):
        super().__init__()

        self.conv1 = torch.nn.Conv2d(1, 5, 3, 1, 0)
        self.conv1.weight.data[:] = load_tt("workspace/param0.bin")
        self.conv1.bias.data[:] = load_tt("workspace/param1.bin")
        self.fc1 = torch.nn.Linear(3380, 10)
        self.fc1.weight.data[:] = load_tt("workspace/param2.bin").T
        self.fc1.bias.data[:] = load_tt("workspace/param3.bin")

    def forward(self, x):
        x = torch.relu(self.conv1(x)).view(-1, 3380)
        return self.fc1(x)

# 这个程序用来检验c++的求导过程是否正确,需要配合c++中的参数导出部分

model = Model()
input = load_tensor("workspace/input.bin")
output = load_tensor("workspace/output.bin")
input_p = torch.nn.parameter.Parameter(torch.tensor(input))
output_torch = model(input_p)
output_torch.sum().backward()
print(np.abs(output_torch.detach().numpy() - output).sum(), "=======outputgrad")

input_grad = load_tensor("workspace/input.grad.bin")
print(np.abs(input_p.grad.data - input_grad).sum(), "=======input")

p0_grad = load_tensor("workspace/param0.grad.bin")
print(np.abs(model.conv1.weight.grad.data - p0_grad).max(), "=======conv1.weight.grad")

p1_grad = load_tensor("workspace/param1.grad.bin")
print(np.abs(model.conv1.bias.grad.data - p1_grad).sum())

p2_grad = load_tensor("workspace/param2.grad.bin")
print(np.abs(model.fc1.weight.grad.data - p2_grad.T).sum())

p3_grad = load_tensor("workspace/param3.grad.bin")
print(np.abs(model.fc1.bias.grad.data - p3_grad).sum())

Makefile

# 1.参数定义
cc        := g++
name      := pro
workdir   := workspace
srcdir    := src
objdir    := objs
stdcpp    := c++11

# 定义cpp的路径查找和依赖项mk文件
cpp_srcs := $(shell find $(srcdir) -name "*.cpp")
cpp_objs := $(cpp_srcs:.cpp=.cpp.o)
cpp_objs := $(cpp_objs:$(srcdir)/%=$(objdir)/%)
cpp_mk   := $(cpp_objs:.cpp.o=.cpp.mk)

# 定义cu文件的路径查找和依赖项mk文件
cu_srcs := $(shell find $(srcdir) -name "*.cu")
cu_objs := $(cu_srcs:.cu=.cu.o)
cu_objs := $(cu_objs:$(srcdir)/%=$(objdir)/%)
cu_mk   := $(cu_objs:.cu.o=.cu.mk)

# 定义opencv和cuda需要用到的库文件
link_sys       := stdc++ dl openblas
link_librarys  := $(link_sys)

# 定义头文件路径,请注意斜杠后边不能有空格
# 只需要写路径,不需要写-I
include_paths := src eigen3.4.90 OpenBLAS0.3.17Static/include

# 定义库文件路径,只需要写路径,不需要写-L
library_paths := OpenBLAS0.3.17Static/lib

# 把library path给拼接为一个字符串,例如a b c => a:b:c
# 然后使得LD_LIBRARY_PATH=a:b:c
empty := 
library_path_export := $(subst $(empty) $(empty),:,$(library_paths))

# 把库路径和头文件路径拼接起来成一个,批量自动加-I、-L、-l
run_paths     := $(foreach item,$(library_paths),-Wl,-rpath=$(item))
include_paths := $(foreach item,$(include_paths),-I$(item))
library_paths := $(foreach item,$(library_paths),-L$(item))
link_librarys := $(foreach item,$(link_librarys),-l$(item))

# 如果是其他显卡,请修改-gencode=arch=compute_75,code=sm_75为对应显卡的能力
# 显卡对应的号码参考这里:https://developer.nvidia.com/zh-cn/cuda-gpus#compute
# 如果是 jetson nano,提示找不到-m64指令,请删掉 -m64选项。不影响结果
cpp_compile_flags := -std=$(stdcpp) -w -g -O3 -m64 -fPIC -fopenmp -pthread
link_flags        := -pthread -fopenmp -Wl,-rpath='$$ORIGIN'

cpp_compile_flags += $(include_paths)
link_flags        += $(library_paths) $(link_librarys) $(run_paths)

# 如果头文件修改了,这里的指令可以让他自动编译依赖的cpp或者cu文件
ifneq ($(MAKECMDGOALS), clean)
-include $(cpp_mk) $(cu_mk)
endif

$(name)   : $(workdir)/$(name)

all       : $(name)
run       : $(name)
	@cd $(workdir) && ./$(name) $(run_args)

$(workdir)/$(name) : $(cpp_objs) $(cu_objs)
	@echo Link $@
	@mkdir -p $(dir $@)
	@$(cc) $^ -o $@ $(link_flags)

$(objdir)/%.cpp.o : $(srcdir)/%.cpp
	@echo Compile CXX $<
	@mkdir -p $(dir $@)
	@$(cc) -c $< -o $@ $(cpp_compile_flags)

# 编译cpp依赖项,生成mk文件
$(objdir)/%.cpp.mk : $(srcdir)/%.cpp
	@echo Compile depends C++ $<
	@mkdir -p $(dir $@)
	@$(cc) -M $< -MF $@ -MT $(@:.cpp.mk=.cpp.o) $(cpp_compile_flags)
    
# 定义清理指令
clean :
	@rm -rf $(objdir) $(workdir)/$(name)

# 防止符号被当做文件
.PHONY : clean run $(name)

# 导出依赖库路径,使得能够运行起来
export LD_LIBRARY_PATH:=$(library_path_export)

猜你喜欢

转载自blog.csdn.net/qq_44089890/article/details/130631249