ncnn笔记(三)运行examples

1、运行mobilenetv2ssdlite

模型下载来自:https://github.com/nihui/ncnn-assets

git clone https://github.com/nihui/ncnn-assets.git

从中找到需要的mobilenetv2_ssdlite_voc.bin和mobilenetv2_ssdlite_voc.param。

找一张图片如下图,运行:

mobilenetv2ssdlite.exe test.jpg
8 = 0.96635 at 3.13 48.38 359.55 x 350.81

2、运行yolov3

 .\yolov3.exe 000000000575.jpg
12 = 0.81045 at 10.71 56.92 368.16 x 368.90
14 = 0.30102 at 312.06 56.70 156.94 x 239.00

检测还是非常准确的,分类就只能呵呵一笑了。可能是因为模型文件的问题,模型文件标签的顺序跟程序中的不一致导致结果标签标错了。所以可能是分类和检测都很准确。

算上之前的一篇 ncnn笔记(二)运行examples 这里一共运行了三个例子了,是时候说一下代码的使用了。

ncnn使用

第一步、先看包含的头文件

shufflenetv2.cpp

#include "net.h"

#include <algorithm>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <stdio.h>
#include <vector>

mobilenetv2ssdlite.cpp

#include "net.h"

#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <stdio.h>
#include <vector>

yolov3.cpp

#include "net.h"

#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <stdio.h>
#include <vector>

所以使用ncnn包含头文件只需要包含一个头文件。

#include "net.h"

第二步、使用的函数接口

shufflenetv2.cpp

static int detect_shufflenetv2(const cv::Mat& bgr, std::vector<float>& cls_scores)
{
    ncnn::Net shufflenetv2;

    shufflenetv2.opt.use_vulkan_compute = true;

    // https://github.com/miaow1988/ShuffleNet_V2_pytorch_caffe
    // models can be downloaded from https://github.com/miaow1988/ShuffleNet_V2_pytorch_caffe/releases
    shufflenetv2.load_param("shufflenet_v2_x0.5.param");
    shufflenetv2.load_model("shufflenet_v2_x0.5.bin");

    ncnn::Mat in = ncnn::Mat::from_pixels_resize(bgr.data, ncnn::Mat::PIXEL_BGR, bgr.cols, bgr.rows, 224, 224);

    const float norm_vals[3] = {1 / 255.f, 1 / 255.f, 1 / 255.f};
    in.substract_mean_normalize(0, norm_vals);

    ncnn::Extractor ex = shufflenetv2.create_extractor();

    ex.input("data", in);

    ncnn::Mat out;
    ex.extract("fc", out);

    // manually call softmax on the fc output
    // convert result into probability
    // skip if your model already has softmax operation
    {
        ncnn::Layer* softmax = ncnn::create_layer("Softmax");

        ncnn::ParamDict pd;
        softmax->load_param(pd);

        softmax->forward_inplace(out, shufflenetv2.opt);

        delete softmax;
    }

    out = out.reshape(out.w * out.h * out.c);

    cls_scores.resize(out.w);
    for (int j = 0; j < out.w; j++)
    {
        cls_scores[j] = out[j];
    }

    return 0;
}

mobilenetv2ssdlite.cpp

static int detect_mobilenetv2(const cv::Mat& bgr, std::vector<Object>& objects)
{
    ncnn::Net mobilenetv2;

    mobilenetv2.opt.use_vulkan_compute = true;

    mobilenetv2.register_custom_layer("Silence", Noop_layer_creator);

    // original pretrained model from https://github.com/chuanqi305/MobileNetv2-SSDLite
    // https://github.com/chuanqi305/MobileNetv2-SSDLite/blob/master/ssdlite/voc/deploy.prototxt
    // the ncnn model https://github.com/nihui/ncnn-assets/tree/master/models
    mobilenetv2.load_param("mobilenetv2_ssdlite_voc.param");
    mobilenetv2.load_model("mobilenetv2_ssdlite_voc.bin");

    const int target_size = 300;

    int img_w = bgr.cols;
    int img_h = bgr.rows;

    ncnn::Mat in = ncnn::Mat::from_pixels_resize(bgr.data, ncnn::Mat::PIXEL_BGR, bgr.cols, bgr.rows, target_size, target_size);

    const float mean_vals[3] = {127.5f, 127.5f, 127.5f};
    const float norm_vals[3] = {1.0 / 127.5, 1.0 / 127.5, 1.0 / 127.5};
    in.substract_mean_normalize(mean_vals, norm_vals);

    ncnn::Extractor ex = mobilenetv2.create_extractor();

    ex.input("data", in);

    ncnn::Mat out;
    ex.extract("detection_out", out);

    //     printf("%d %d %d\n", out.w, out.h, out.c);
    objects.clear();
    for (int i = 0; i < out.h; i++)
    {
        const float* values = out.row(i);

        Object object;
        object.label = values[0];
        object.prob = values[1];
        object.rect.x = values[2] * img_w;
        object.rect.y = values[3] * img_h;
        object.rect.width = values[4] * img_w - object.rect.x;
        object.rect.height = values[5] * img_h - object.rect.y;

        objects.push_back(object);
    }

    return 0;
}

yolov3.cpp

static int detect_yolov3(const cv::Mat& bgr, std::vector<Object>& objects)
{
    ncnn::Net yolov3;

    yolov3.opt.use_vulkan_compute = true;

    // original pretrained model from https://github.com/eric612/MobileNet-YOLO
    // param : https://drive.google.com/open?id=1V9oKHP6G6XvXZqhZbzNKL6FI_clRWdC-
    // bin : https://drive.google.com/open?id=1DBcuFCr-856z3FRQznWL_S5h-Aj3RawA
    // the ncnn model https://github.com/nihui/ncnn-assets/tree/master/models
    yolov3.load_param("mobilenetv2_yolov3.param");
    yolov3.load_model("mobilenetv2_yolov3.bin");

    const int target_size = 352;

    int img_w = bgr.cols;
    int img_h = bgr.rows;

    ncnn::Mat in = ncnn::Mat::from_pixels_resize(bgr.data, ncnn::Mat::PIXEL_BGR, bgr.cols, bgr.rows, target_size, target_size);

    const float mean_vals[3] = {127.5f, 127.5f, 127.5f};
    const float norm_vals[3] = {0.007843f, 0.007843f, 0.007843f};
    in.substract_mean_normalize(mean_vals, norm_vals);

    ncnn::Extractor ex = yolov3.create_extractor();

    ex.input("data", in);

    ncnn::Mat out;
    ex.extract("detection_out", out);

    //     printf("%d %d %d\n", out.w, out.h, out.c);
    objects.clear();
    for (int i = 0; i < out.h; i++)
    {
        const float* values = out.row(i);

        Object object;
        object.label = values[0];
        object.prob = values[1];
        object.rect.x = values[2] * img_w;
        object.rect.y = values[3] * img_h;
        object.rect.width = values[4] * img_w - object.rect.x;
        object.rect.height = values[5] * img_h - object.rect.y;

        objects.push_back(object);
    }

    return 0;
}

总结调用套路

1、构造一个对象 xxnet

ncnn::Net xxnet;

2、设置valkan参数(非必须)

xxnet.opt.use_vulkan_compute = true;

3、加载模型

xxnet.load_param("xxnet.param");
xxnet.load_model("xxnet.bin");

4、图像预处理(一般是颜色空间转换、缩放、去均值、归一化等),下面仅供参考

    const int target_size = 352;

    int img_w = bgr.cols;
    int img_h = bgr.rows;

    ncnn::Mat in = ncnn::Mat::from_pixels_resize(bgr.data, ncnn::Mat::PIXEL_BGR, bgr.cols, bgr.rows, target_size, target_size);

    const float mean_vals[3] = {127.5f, 127.5f, 127.5f};
    const float norm_vals[3] = {0.007843f, 0.007843f, 0.007843f};
    in.substract_mean_normalize(mean_vals, norm_vals);

5、读取模型并实现推理

 ncnn::Extractor ex = xxnet.create_extractor();

ex.input("data", in);

ncnn::Mat out;
ex.extract("detection_out", out);

6、对结果的解析提取

猜你喜欢

转载自blog.csdn.net/juluwangriyue/article/details/108804971
今日推荐