Compile MNN and deploy mnist in Ubuntu20.04 environment

Compile MNN in Ubuntu20.04 environment

Compilation environment preparation

  • cmake (version 3.10 or above is recommended)
  • protobuf (use 3.0 or above)
  • gcc (use 4.9 or above)

1.gcc installation

sudo apt update
sudo apt install build-essential

2. cmake installation

  • Download cmake from the cmake official website, select the linux version to download Download
    address: cmakeinsert image description here
  • Open the command terminal in the folder where the cmake source code is located, and unzip the file

tar -zxv -f cmake-3.23.0-rc3.tar.gz

  • Enter the unzipped folder and execute

./bootstrap

If the following error occurs, you need to install libssl-dev, and then execute again after installation./bootstrap

sudo apt-get install libssl-dev

insert image description here

  • Compile and build cmake

make

  • install cmake

sudo make install

  • After installation, enter cmake --versionto verify whether the installation is complete

3. Install protobuf

  • install dependencies

sudo apt-get install autoconf automake libtool curl make g++ unzip libffi-dev -y

  • Download protobuf source code
    Download address: protobuf
  • Unzip the compressed package

tar -zxv -f protobuf-cpp-3.20.0-rc-1.tar.gz

  • Enter the decompressed folder to generate the configuration file

cd protobuf-3.20.0-rc-1/
./autogen.sh

  • Configuration Environment

./configure

  • Compile the source code

make

  • Install

sudo make install

  • Refresh dynamic library

sudo ldconfig

  • Enter protoc --versionto see if the installation is successful

Compile MNN

official document

  • Download the source code of MNN and decompress it.
    Download link: MNN
  • decompress

unzip MNN-master.zip

  • Enter the unzipped folder and execute

cd MNN
./schema/generate.sh

  • Local compilation, after the compilation is completed, the dynamic library of MNN appears locally

mkdir build && cd build && cmake … && make -j8

Android compile

  1. Download and install NDK at https://developer.android.com/ndk/downloads/, it is recommended to use the latest stable version
  2. Set NDK environment variables in .bashrc or .bash_profile, for example: export ANDROID_NDK=/Users/username/path/to/android-ndk-r14b
  3. cd /path/to/MNN
  4. ./schema/generate.sh
  5. ./tools/script/get_model.sh (optional, the model is only required for demo projects). Note that get_model.sh needs to compile the model conversion tool in advance, see here.
  6. cd project/android
  7. Compile armv7 dynamic library: mkdir build_32 && cd build_32 && .../build_32.sh
  8. Compile armv8 dynamic library: mkdir build_64 && cd build_64 && .../build_64.sh

deploy mnist

Install the opencv library

Deploying mnist also requires the opencv library, so you need to install opencv. You
can install it by cloning the OpenCV source code according to this article. After
the blog
is installed, write a simple opencv program to verify whether it is available
. Create a main.cpp file and use gcc compile and run

g++ main.cpp -o output `pkg-config --cflags --libs opencv4`
./output

#include <opencv2/highgui.hpp>
#include "opencv2/imgcodecs/legacy/constants_c.h"
#include "opencv2/imgproc/types_c.h"
#include <iostream>
int main( int argc, char** argv ) {
    
    
    cv::Mat image;
    image = cv::imread("test.jpg" , CV_LOAD_IMAGE_COLOR);	      
    if(! image.data ) {
    
    
        std::cout <<  "Could not open or find the image" << std::endl ;
	    return -1;
    }

    std::cout << "image wide: "<< image.cols << ",image high: " << image.rows << ",image channels: "<< image.channels() << std::endl;
    /* display image
    cv::namedWindow( "Display window", cv::WINDOW_AUTOSIZE );
    cv::imshow( "Display window", image );		    
    cv::waitKey(0);
    */
    size_t y,x;// y is row, x is col
    int c;     // c is channel
    y = x = 250;
    c = 2;
    // row_ptr is the head point of y row
    unsigned char *row_ptr = image.ptr<unsigned char>(y);
    // data_ptr points to pixel data
    unsigned char *data_ptr = &row_ptr[x * image.channels()]; 
    unsigned char data =  data_ptr[c];

    // use cv::Mat::at() to get the pixel value
    // unsigned char is not printable
    // std::cout << std::isprint(data)<<std::isprint(image.at<cv::Vec3b>(y,x)[c]) << std::endl;
    std::cout << "pixel value at y, x ,c"<<static_cast<unsigned>(image.at<cv::Vec3b>(y,x)[c]) << std::endl;
    return 0;
}

If the following situation occurs during operation, it means that the path of the library is not configured correctly
insert image description here
. Solution:

sudo vi /etc/ld.so.conf.d/opencv.conf
in the /etc/ld.so.conf.d/ directory sudo vim /etc/ld.so.conf.d/opencv.conf

Write two lines:

/usr/local/lib
~/opencv_build/opencv/build/lib (here refers to the lib under the opencv path you installed)

Save and exit, run sudo ldconfig, solve the problem,
and run again ./outputto output the correct information, indicating that the opencv installation is successful

deploy mnist

Create a c++ file, write the program according to the reasoning process of the MNN official document, the mnn model download address

#include "Backend.hpp"
#include "Interpreter.hpp"
#include "MNNDefine.h"
#include "Interpreter.hpp"
#include "Tensor.hpp"
#include <math.h>
#include <opencv2/opencv.hpp>
#include <iostream>
#include <stdio.h>
using namespace MNN;
using namespace cv;

int main(void)
{
    
    
   // 填写自己的测试图像和mnn模型文件路径
    std::string image_name = "test.jpg";
    const char* model_name = "mnist.mnn";
    // 一些任务调度中的配置参数
    int forward = MNN_FORWARD_CPU;
    // int forward = MNN_FORWARD_OPENCL;
    int precision  = 2;
    int power      = 0;
    int memory     = 0;
    int threads    = 1;
    int INPUT_SIZE = 28;

    cv::Mat raw_image    = cv::imread(image_name.c_str());
    //imshow("image", raw_image);
    int raw_image_height = raw_image.rows;
    int raw_image_width  = raw_image.cols;
    cv::Mat image;
    cv::resize(raw_image, image, cv::Size(INPUT_SIZE, INPUT_SIZE));
    // 1. 创建Interpreter, 通过磁盘文件创建: static Interpreter* createFromFile(const char* file);
    std::shared_ptr<Interpreter> net(Interpreter::createFromFile(model_name));
    MNN::ScheduleConfig config;
    // 2. 调度配置,
    // numThread决定并发数的多少,但具体线程数和并发效率,不完全取决于numThread
    // 推理时,主选后端由type指定,默认为CPU。在主选后端不支持模型中的算子时,启用由backupType指定的备选后端。
    config.numThread = threads;
    config.type      = static_cast<MNNForwardType>(forward);
    MNN::BackendConfig backendConfig;
    // 3. 后端配置
    // memory、power、precision分别为内存、功耗和精度偏好
    backendConfig.precision = (MNN::BackendConfig::PrecisionMode)precision;
    backendConfig.power = (MNN::BackendConfig::PowerMode) power;
    backendConfig.memory = (MNN::BackendConfig::MemoryMode) memory;
    config.backendConfig = &backendConfig;
    // 4. 创建session
    auto session = net->createSession(config);
    net->releaseModel();

    clock_t start = clock();
    // preprocessing
    image.convertTo(image, CV_32FC3);
    image = image / 255.0f;
    // 5. 输入数据
    // wrapping input tensor, convert nhwc to nchw
    std::vector<int> dims{
    
    1, INPUT_SIZE, INPUT_SIZE, 3};
    auto nhwc_Tensor = MNN::Tensor::create<float>(dims, NULL, MNN::Tensor::TENSORFLOW);
    auto nhwc_data   = nhwc_Tensor->host<float>();
    auto nhwc_size   = nhwc_Tensor->size();
    ::memcpy(nhwc_data, image.data, nhwc_size);

    std::string input_tensor = "data";
    // 获取输入tensor
    // 拷贝数据, 通过这类拷贝数据的方式,用户只需要关注自己创建的tensor的数据布局,
    // copyFromHostTensor会负责处理数据布局上的转换(如需)和后端间的数据拷贝(如需)。
    auto inputTensor  = net->getSessionInput(session, nullptr);
    inputTensor->copyFromHostTensor(nhwc_Tensor);

    // 6. 运行会话
    net->runSession(session);

    // 7. 获取输出
    std::string output_tensor_name0 = "dense1_fwd";
    // 获取输出tensor
    MNN::Tensor *tensor_scores  = net->getSessionOutput(session, output_tensor_name0.c_str());

    MNN::Tensor tensor_scores_host(tensor_scores, tensor_scores->getDimensionType());
    // 拷贝数据
    tensor_scores->copyToHostTensor(&tensor_scores_host);

    // post processing steps
    auto scores_dataPtr  = tensor_scores_host.host<float>();

    // softmax
    float exp_sum = 0.0f;
    for (int i = 0; i < 10; ++i)
    {
    
    
        float val = scores_dataPtr[i];
        exp_sum += val;
    }
    // get result idx
    int  idx = 0;
    float max_prob = -10.0f;
    for (int i = 0; i < 10; ++i)
    {
    
    
        float val  = scores_dataPtr[i];
        float prob = val / exp_sum;
        if (prob > max_prob)
        {
    
    
            max_prob = prob;
            idx      = i;
        }
    }
    printf("the result is %d\n", idx);

    return 0;
}

Write CMakeLists.txt, pay attention to replace the address of MNN inside with the address of MNN compiled by yourself

cmake_minimum_required(VERSION 3.10)
project(mnist)

set(CMAKE_CXX_STANDARD 11)

find_package(OpenCV REQUIRED)
set(MNN_DIR /home/chen/MNN) 
include_directories(${MNN_DIR}/include)
include_directories(${MNN_DIR}/include/MNN)
include_directories(${MNN_DIR}/tools)
include_directories(${MNN_DIR}/tools/cpp)
include_directories(${MNN_DIR}/source)
include_directories(${MNN_DIR}/source/backend)
include_directories(${MNN_DIR}/source/core)

LINK_DIRECTORIES(${MNN_DIR}/build)
add_executable(mnist main.cpp)
target_link_libraries(mnist -lMNN ${OpenCV_LIBS})

compile

cmake. After
the make
compilation is completed, the mnist executable file will appear. Enter and ./mnistrun, and you can see that the handwritten digits can be predicted successfully.
insert image description here

Guess you like

Origin blog.csdn.net/qq_40042726/article/details/123503544