关于TensorRT的介绍可以参考: http://blog.csdn.net/fengbingchun/article/details/78469551

这里实现在构建阶段将TensorRT model序列化存到本地文件,然后在部署阶段直接load TensorRT model序列化的文件进行推理,mnist_infer.cpp文件内容如下:

#include <iostream>
#include <string>
#include <tuple>
#include <fstream>
#include <memory>#include <cuda_runtime_api.h>
#include <NvInfer.h>
#include <NvCaffeParser.h>
#include <opencv2/opencv.hpp>#include "common.hpp"// 序列化TensorRT模型,然后load TensorRT模型进行推理namespace {
typedef std::tuple<int, int, int, std::string, std::string> DATA_INFO; // intput width, input height, output size, input blob name, output blob nameint caffeToGIEModel(const std::string& deployFile,    // name for caffe prototxtconst std::string& modelFile, // name for model const std::vector<std::string>& outputs, // network outputsunsigned int maxBatchSize,   // batch size - NB must be at least as large as the batch we want to run with)Logger logger, const std::string& engine_file)
{// create the buildernvinfer1::IBuilder* builder = nvinfer1::createInferBuilder(logger);// parse the caffe model to populate the network, then set the outputsnvinfer1::INetworkDefinition* network = builder->createNetwork();nvcaffeparser1::ICaffeParser* parser = nvcaffeparser1::createCaffeParser();const nvcaffeparser1::IBlobNameToTensor* blobNameToTensor = parser->parse(deployFile.c_str(), modelFile.c_str(), *network, nvinfer1::DataType::kFLOAT);// specify which tensors are outputsfor (auto& s : outputs)network->markOutput(*blobNameToTensor->find(s.c_str()));// Build the enginebuilder->setMaxBatchSize(maxBatchSize);builder->setMaxWorkspaceSize(1 << 20);nvinfer1::ICudaEngine* engine = builder->buildCudaEngine(*network);CHECK(engine != nullptr);// we don't need the network any more, and we can destroy the parsernetwork->destroy();parser->destroy();// serialize the engine, then close everything downnvinfer1::IHostMemory* gieModelStream = engine->serialize(); // GIE modelfprintf(stdout, "allocate memory size: %d bytes\n", gieModelStream->size());std::ofstream outfile(engine_file.c_str(), std::ios::out | std::ios::binary);if (!outfile.is_open()) {fprintf(stderr, "fail to open file to write: %s\n", engine_file.c_str());return -1;}unsigned char* p = (unsigned char*)gieModelStream->data();outfile.write((char*)p, gieModelStream->size());outfile.close();engine->destroy();builder->destroy();if (gieModelStream) gieModelStream->destroy();    nvcaffeparser1::shutdownProtobufLibrary();return 0;
}int doInference(nvinfer1::IExecutionContext& context, const float* input, float* output, int batchSize, const DATA_INFO& info)
{const nvinfer1::ICudaEngine& engine = context.getEngine();// input and output buffer pointers that we pass to the engine - the engine requires exactly IEngine::getNbBindings(),// of these, but in this case we know that there is exactly one input and one output.CHECK(engine.getNbBindings() == 2);void* buffers[2];// In order to bind the buffers, we need to know the names of the input and output tensors.// note that indices are guaranteed to be less than IEngine::getNbBindings()int inputIndex = engine.getBindingIndex(std::get<3>(info).c_str()), outputIndex = engine.getBindingIndex(std::get<4>(info).c_str());// create GPU buffers and a streamcheckCudaErrors(cudaMalloc(&buffers[inputIndex], batchSize * std::get<1>(info) * std::get<0>(info) * sizeof(float)));checkCudaErrors(cudaMalloc(&buffers[outputIndex], batchSize * std::get<2>(info) * sizeof(float)));cudaStream_t stream;checkCudaErrors(cudaStreamCreate(&stream));// DMA the input to the GPU,  execute the batch asynchronously, and DMA it back:checkCudaErrors(cudaMemcpyAsync(buffers[inputIndex], input, batchSize * std::get<1>(info) * std::get<0>(info) * sizeof(float), cudaMemcpyHostToDevice, stream));context.enqueue(batchSize, buffers, stream, nullptr);checkCudaErrors(cudaMemcpyAsync(output, buffers[outputIndex], batchSize * std::get<2>(info) * sizeof(float), cudaMemcpyDeviceToHost, stream));cudaStreamSynchronize(stream);// release the stream and the bufferscudaStreamDestroy(stream);checkCudaErrors(cudaFree(buffers[inputIndex]));checkCudaErrors(cudaFree(buffers[outputIndex]));return 0;
}} // namesapceint test_mnist_infer()
{// 1. build phase// stuff we know about the network and the caffe input/output blobsconst DATA_INFO info(28, 28, 10, "data", "prob");const std::string deploy_file { "models/mnist.prototxt" };const std::string model_file { "models/mnist.caffemodel" };const std::string mean_file { "models/mnist_mean.binaryproto" };const std::string engine_file { "tensorrt_mnist.model" };Logger logger; // multiple instances of IRuntime and/or IBuilder must all use the same loggerCHECK(caffeToGIEModel(deploy_file, model_file, std::vector<std::string>{std::get<4>(info)}, 1, logger, engine_file) == 0);// 2. deploy phase// parse the mean file and   subtract it from the imagenvcaffeparser1::ICaffeParser* parser = nvcaffeparser1::createCaffeParser();nvcaffeparser1::IBinaryProtoBlob* meanBlob = parser->parseBinaryProto(mean_file.c_str());parser->destroy();std::ifstream in_file(engine_file.c_str(), std::ios::in | std::ios::binary);if (!in_file.is_open()) {fprintf(stderr, "fail to open file to write: %s\n", engine_file.c_str());return -1;}std::streampos begin, end;begin = in_file.tellg();in_file.seekg(0, std::ios::end);end = in_file.tellg();std::size_t size = end - begin;fprintf(stdout, "engine file size: %d bytes\n", size);in_file.seekg(0, std::ios::beg);std::unique_ptr<unsigned char[]> engine_data(new unsigned char[size]);in_file.read((char*)engine_data.get(), size);in_file.close();// deserialize the engine nvinfer1::IRuntime* runtime = nvinfer1::createInferRuntime(logger);    nvinfer1::ICudaEngine* engine = runtime->deserializeCudaEngine((const void*)engine_data.get(), size, nullptr);nvinfer1::IExecutionContext* context = engine->createExecutionContext();const float* meanData = reinterpret_cast<const float*>(meanBlob->getData());const std::string image_path{ "images/digit/" };for (int i = 0; i < 10; ++i) {const std::string image_name = image_path + std::to_string(i) + ".png";cv::Mat mat = cv::imread(image_name, 0);if (!mat.data) {fprintf(stderr, "read image fail: %s\n", image_name.c_str());return -1;}cv::resize(mat, mat, cv::Size(std::get<0>(info), std::get<1>(info)));mat.convertTo(mat, CV_32FC1);float data[std::get<1>(info)*std::get<0>(info)];const float* p = (float*)mat.data;for (int j = 0; j < std::get<1>(info)*std::get<0>(info); ++j) {data[j] = p[j] - meanData[j];}// run inferencefloat prob[std::get<2>(info)];doInference(*context, data, prob, 1, info);float val{-1.f};int idx{-1};for (int t = 0; t < std::get<2>(info); ++t) {if (val < prob[t]) {val = prob[t];idx = t;}}fprintf(stdout, "expected value: %d, actual value: %d, probability: %f\n", i, idx, val);}meanBlob->destroy();// destroy the enginecontext->destroy();engine->destroy();runtime->destroy();return 0;
}

测试图像如下:

测试结果如下:与http://blog.csdn.net/fengbingchun/article/details/78552908 结果一致

测试代码编译步骤如下(ReadMe.txt):
在Linux下通过CMake编译TensorRT_Test中的测试代码步骤:
1. 将终端定位到CUDA_Test/prj/linux_tensorrt_cmake,依次执行如下命令:$ mkdir build$ cd build$ cmake ..$ make (生成TensorRT_Test执行文件)$ ln -s ../../../test_data/models  ./ (将models目录软链接到build目录下)$ ln -s ../../../test_data/images  ./ (将images目录软链接到build目录下)$ ./TensorRT_Test
2. 对于有需要用OpenCV参与的读取图像的操作,需要先将对应文件中的图像路径修改为Linux支持的路径格式

GitHub: https://github.com/fengbingchun/CUDA_Test

TensorRT Samples: MNIST(serialize TensorRT model)相关推荐

  1. TensorRT Samples: MNIST(Plugin, add a custom layer)

    关于TensorRT的介绍可以参考:http://blog.csdn.net/fengbingchun/article/details/78469551 以下是参考TensorRT 2.1.2中的sa ...

  2. TensorRT Samples: MNIST

    关于TensorRT的介绍可以参考:  http://blog.csdn.net/fengbingchun/article/details/78469551 以下是参考TensorRT 2.1.2中的 ...

  3. TensorRT Samples: MNIST API

    关于TensorRT的介绍可以参考:  http://blog.csdn.net/fengbingchun/article/details/78469551 以下是参考TensorRT 2.1.2中的 ...

  4. TensorRT/samples/common/argsParser.h源碼研讀

    TensorRT/samples/common/argsParser.h源碼研讀 argsParser.h namespace struct的繼承 caffe特有參數 UFF格式 不需要typedef ...

  5. tensorrt安装_利用TensorRT对深度学习进行加速

    前言 TensorRT是什么,TensorRT是英伟达公司出品的高性能的推断C++库,专门应用于边缘设备的推断,TensorRT可以将我们训练好的模型分解再进行融合,融合后的模型具有高度的集合度.例如 ...

  6. tensorrt安装_基于TensorRT的BERT推断加速与服务部署

    BERT的出现真是广大NLPer的福音,在很多任务上能取得显著提升.不例外,作者在工作过程中也使用了BERT进行下游任务训练,但在感叹BERT真香的时候,它及其漫长的推断时间让人感到很为难.本文就记录 ...

  7. TensorRT Samples: CharRNN

    关于TensorRT的介绍可以参考:  http://blog.csdn.net/fengbingchun/article/details/78469551 以下是参考TensorRT 2.1.2中的 ...

  8. TensorRT Samples: GoogleNet

    关于TensorRT的介绍可以参考:  http://blog.csdn.net/fengbingchun/article/details/78469551 以下是参考TensorRT 2.1.2中的 ...

  9. 7.TensorRT中文版开发教程-----TensorRT中的INT8量化详解

    7. 如何使用TensorRT中的INT8 点击此处加入NVIDIA开发者计划 7.1. Introduction to Quantization TensorRT 支持使用 8 位整数来表示量化的浮 ...

最新文章

  1. 谷歌BERT预训练源码解析(二):模型构建
  2. 获取结构体中变量的偏移量
  3. 华为错误报告在哪个文件夹_华为手机隐藏的这7个秘密小技能,现在开启,手机还能再用3年...
  4. 单例模式,双重检查实现线程安全
  5. 【MATLAB统计分析与应用100例】案例006:matlab数据的标准化变换
  6. exe程序的启动过程
  7. 【报告分享】中国年轻用户电商消费洞察报告:寻找电商换道增长机遇.pdf(附下载链接)...
  8. jave类命名_Java重命名文件– Jave移动文件
  9. c语言编译音乐简谱,单片机音乐曲谱_单片机c语言音乐简谱代码
  10. 需要一个用于Postgres的UUID生成器?这里有两种设置方法
  11. Javashop连锁门店管理系统带您玩转获客
  12. 英文单词和数字断行不折叠
  13. Java Web 高性能开发,第 2 部分: 前端的高性能
  14. linux ARM64 中断底层处理代码分析
  15. 如何设置UISwitch的大小
  16. 计算机一级mcoffice考试题型,计算机一级MSOffice考试试题
  17. 使用ROS melodic下 控制真实UR5机器人 手把手教程
  18. 一起实践神经网络INT8量化系列教程(一)
  19. python中selenium(模拟登陆)+pytesseract(自动识别验证码)应用例子之查询住房公积金
  20. micropython教程nucleo-f767zi开发板_NUCLEO-F767ZI开发板评测

热门文章

  1. 深蓝学院的深度学习理论与实践课程:第一章
  2. Linux那些事儿之我是Sysfs(3)设备模型上层容器
  3. 【opencv】(7) 图像匹配、直方图、图像均衡化
  4. mysql innodb 设置,Mysql5.5 InnoDB存储引擎简单设置
  5. 【禅模式】如何进入极度专注的心流状态,让前端开发能力发挥到最大?设置VSCode禅模式快捷键Alt+F切换全屏,适合演示代码使用
  6. 设置VSCode快捷键vue生成代码片段
  7. Ubuntu 14.04 64bit上curl-7.37源码包中的sample 源码示例研究
  8. SYNCHRO 4D可视化调度学习教程 SYNCHRO 4D: Visual Scheduling
  9. ldconfig及 LD_LIBRARY_PATH
  10. Ubuntu10.04安装Flash插件