[]how to use caffe model with TensorRT c++

//IHostMemory *gieModelStream {nullptr};
    //const char* prototxt = "./googlenet/test_20181010.prototxt";//argv[1];
    //const char* caffemodel = "./googlenet/lane_area_lx1890_iter_320000_20181010.caffemodel";//argv[2];
    //std::vector<std::string> output_blobnames;
    //output_blobnames.push_back(OUTPUT_BLOB_NAME_1);
    //output_blobnames.push_back(OUTPUT_BLOB_NAME_2);
    //caffeToGIEModel(prototxt, caffemodel, output_blobnames, 1, &plugin_factory, gieModelStream, true);
    //caffeToGIEModel_serialize(prototxt, caffemodel, output_blobnames, 1, &plugin_factory, gieModelStream, true, s);
    plugin_factory.destroyPlugin();
    //std::vector<std::string>().swap(output_blobnames);
    // deserialize the engine
    IRuntime* runtime = createInferRuntime(gLogger);
    //ICudaEngine* engine = runtime->deserializeCudaEngine(gieModelStream->data(), gieModelStream->size(), &plugin_factory);
    ICudaEngine* engine = runtime->deserializeCudaEngine(ss.data(), ss.size(), &plugin_factory);
    //if (gieModelStream)
        //gieModelStream->destroy();
    IExecutionContext *context = engine->createExecutionContext();

猜你喜欢

转载自www.cnblogs.com/happyamyhope/p/10931449.html