关于TensorRT的介绍可以参考:  http://blog.csdn.net/fengbingchun/article/details/78469551

以下是参考TensorRT 2.1.2中的sampleMNIST.cpp文件改写的实现对手写数字0-9识别的测试代码,各个文件内容如下:

common.hpp:

#ifndef FBC_TENSORRT_TEST_COMMON_HPP_
#define FBC_TENSORRT_TEST_COMMON_HPP_#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <NvInfer.h>template< typename T >
static inline int check_Cuda(T result, const char * const func, const char * const file, const int line)
{if (result) {fprintf(stderr, "Error CUDA: at %s: %d, error code=%d, func: %s\n", file, line, static_cast<unsigned int>(result), func);cudaDeviceReset(); // Make sure we call CUDA Device Reset before exitingreturn -1;}
}template< typename T >
static inline int check(T result, const char * const func, const char * const file, const int line)
{if (result) {fprintf(stderr, "Error: at %s: %d, error code=%d, func: %s\n", file, line, static_cast<unsigned int>(result), func);return -1;}
}#define checkCudaErrors(val) check_Cuda((val), __FUNCTION__, __FILE__, __LINE__)
#define checkErrors(val) check((val), __FUNCTION__, __FILE__, __LINE__)#define CHECK(x) { \if (x) {} \else { fprintf(stderr, "Check Failed: %s, file: %s, line: %d\n", #x, __FILE__, __LINE__); return -1; } \
}// Logger for GIE info/warning/errors
class Logger : public nvinfer1::ILogger
{void log(Severity severity, const char* msg) override{// suppress info-level messagesif (severity != Severity::kINFO)std::cout << msg << std::endl;}
};#endif // FBC_TENSORRT_TEST_COMMON_HPP_

mnist.cpp:

#include <iostream>
#include <string>
#include <tuple>#include <cuda_runtime_api.h>
#include <NvInfer.h>
#include <NvCaffeParser.h>
#include <opencv2/opencv.hpp>#include "common.hpp"// reference: TensorRT-2.1.2/samples/sampleMNIST/sampleMNIST.cpptypedef std::tuple<int, int, int, std::string, std::string> DATA_INFO; // intput width, input height, output size, input blob name, output blob namestatic int caffeToGIEModel(const std::string& deployFile,   // name for caffe prototxtconst std::string& modelFile, // name for model const std::vector<std::string>& outputs, // network outputsunsigned int maxBatchSize,   // batch size - NB must be at least as large as the batch we want to run with)nvinfer1::IHostMemory *&gieModelStream, // output buffer for the GIE modelLogger logger)
{// create the buildernvinfer1::IBuilder* builder = nvinfer1::createInferBuilder(logger);// parse the caffe model to populate the network, then set the outputsnvinfer1::INetworkDefinition* network = builder->createNetwork();nvcaffeparser1::ICaffeParser* parser = nvcaffeparser1::createCaffeParser();const nvcaffeparser1::IBlobNameToTensor* blobNameToTensor = parser->parse(deployFile.c_str(), modelFile.c_str(), *network, nvinfer1::DataType::kFLOAT);// specify which tensors are outputsfor (auto& s : outputs)network->markOutput(*blobNameToTensor->find(s.c_str()));// Build the enginebuilder->setMaxBatchSize(maxBatchSize);builder->setMaxWorkspaceSize(1 << 20);nvinfer1::ICudaEngine* engine = builder->buildCudaEngine(*network);CHECK(engine != nullptr);// we don't need the network any more, and we can destroy the parsernetwork->destroy();parser->destroy();// serialize the engine, then close everything downgieModelStream = engine->serialize();engine->destroy();builder->destroy();nvcaffeparser1::shutdownProtobufLibrary(); / Notereturn 0;
}static int doInference(nvinfer1::IExecutionContext& context, const float* input, float* output, int batchSize, const DATA_INFO& info)
{const nvinfer1::ICudaEngine& engine = context.getEngine();// input and output buffer pointers that we pass to the engine - the engine requires exactly IEngine::getNbBindings(),// of these, but in this case we know that there is exactly one input and one output.CHECK(engine.getNbBindings() == 2);void* buffers[2];// In order to bind the buffers, we need to know the names of the input and output tensors.// note that indices are guaranteed to be less than IEngine::getNbBindings()int inputIndex = engine.getBindingIndex(std::get<3>(info).c_str()), outputIndex = engine.getBindingIndex(std::get<4>(info).c_str());// create GPU buffers and a streamcheckCudaErrors(cudaMalloc(&buffers[inputIndex], batchSize * std::get<1>(info) * std::get<0>(info) * sizeof(float)));checkCudaErrors(cudaMalloc(&buffers[outputIndex], batchSize * std::get<2>(info) * sizeof(float)));cudaStream_t stream;checkCudaErrors(cudaStreamCreate(&stream));// DMA the input to the GPU,  execute the batch asynchronously, and DMA it back:checkCudaErrors(cudaMemcpyAsync(buffers[inputIndex], input, batchSize * std::get<1>(info) * std::get<0>(info) * sizeof(float), cudaMemcpyHostToDevice, stream));context.enqueue(batchSize, buffers, stream, nullptr);checkCudaErrors(cudaMemcpyAsync(output, buffers[outputIndex], batchSize * std::get<2>(info) * sizeof(float), cudaMemcpyDeviceToHost, stream));cudaStreamSynchronize(stream);// release the stream and the bufferscudaStreamDestroy(stream);checkCudaErrors(cudaFree(buffers[inputIndex]));checkCudaErrors(cudaFree(buffers[outputIndex]));return 0;
}int test_mnist()
{// stuff we know about the network and the caffe input/output blobsconst DATA_INFO info(28, 28, 10, "data", "prob");const std::string deploy_file {"models/mnist.prototxt"};const std::string model_file {"models/mnist.caffemodel"};const std::string mean_file {"models/mnist_mean.binaryproto"};const std::vector<std::string> output_blobs_name{std::get<4>(info)};Logger logger; // multiple instances of IRuntime and/or IBuilder must all use the same logger// create a GIE model from the caffe model and serialize it to a streamnvinfer1::IHostMemory* gieModelStream{ nullptr };caffeToGIEModel(deploy_file, model_file, output_blobs_name, 1, gieModelStream, logger);// parse the mean file and    subtract it from the imagenvcaffeparser1::ICaffeParser* parser = nvcaffeparser1::createCaffeParser();nvcaffeparser1::IBinaryProtoBlob* meanBlob = parser->parseBinaryProto(mean_file.c_str());parser->destroy();// deserialize the engine nvinfer1::IRuntime* runtime = nvinfer1::createInferRuntime(logger);nvinfer1::ICudaEngine* engine = runtime->deserializeCudaEngine(gieModelStream->data(), gieModelStream->size(), nullptr);nvinfer1::IExecutionContext* context = engine->createExecutionContext();const float* meanData = reinterpret_cast<const float*>(meanBlob->getData());const std::string image_path{ "images/digit/" };for (int i = 0; i < 10; ++i) {const std::string image_name = image_path + std::to_string(i) + ".png";cv::Mat mat = cv::imread(image_name, 0);if (!mat.data) {fprintf(stderr, "read image fail: %s\n", image_name.c_str());return -1;}cv::resize(mat, mat, cv::Size(std::get<0>(info), std::get<1>(info)));mat.convertTo(mat, CV_32FC1);float data[std::get<1>(info)*std::get<0>(info)];const float* p = (float*)mat.data;for (int j = 0; j < std::get<1>(info)*std::get<0>(info); ++j) {data[j] = p[j] - meanData[j];}// run inferencefloat prob[std::get<2>(info)];doInference(*context, data, prob, 1, info);float val{-1.f};int idx{-1};for (int t = 0; t < std::get<2>(info); ++t) {if (val < prob[t]) {val = prob[t];idx = t;}}fprintf(stdout, "expected value: %d, actual value: %d, probability: %f\n", i, idx, val);}meanBlob->destroy();if (gieModelStream) gieModelStream->destroy();// destroy the enginecontext->destroy();engine->destroy();runtime->destroy();return 0;
}

测试图像如下:

执行结果如下:

测试代码编译步骤如下(ReadMe.txt):

在Linux下通过CMake编译TensorRT_Test中的测试代码步骤:
1. 将终端定位到CUDA_Test/prj/linux_tensorrt_cmake,依次执行如下命令:$ mkdir build$ cd build$ cmake ..$ make (生成TensorRT_Test执行文件)$ ln -s ../../../test_data/models  ./ (将models目录软链接到build目录下)$ ln -s ../../../test_data/images  ./ (将images目录软链接到build目录下)$ ./TensorRT_Test
2. 对于有需要用OpenCV参与的读取图像的操作,需要先将对应文件中的图像路径修改为Linux支持的路径格式

GitHub: https://github.com/fengbingchun/CUDA_Test

TensorRT Samples: MNIST相关推荐

  1. TensorRT Samples: MNIST(Plugin, add a custom layer)

    关于TensorRT的介绍可以参考:http://blog.csdn.net/fengbingchun/article/details/78469551 以下是参考TensorRT 2.1.2中的sa ...

  2. TensorRT Samples: MNIST(serialize TensorRT model)

    关于TensorRT的介绍可以参考: http://blog.csdn.net/fengbingchun/article/details/78469551 这里实现在构建阶段将TensorRT mod ...

  3. TensorRT Samples: MNIST API

    关于TensorRT的介绍可以参考:  http://blog.csdn.net/fengbingchun/article/details/78469551 以下是参考TensorRT 2.1.2中的 ...

  4. TensorRT/samples/common/argsParser.h源碼研讀

    TensorRT/samples/common/argsParser.h源碼研讀 argsParser.h namespace struct的繼承 caffe特有參數 UFF格式 不需要typedef ...

  5. TensorRT Samples: CharRNN

    关于TensorRT的介绍可以参考:  http://blog.csdn.net/fengbingchun/article/details/78469551 以下是参考TensorRT 2.1.2中的 ...

  6. TensorRT Samples: GoogleNet

    关于TensorRT的介绍可以参考:  http://blog.csdn.net/fengbingchun/article/details/78469551 以下是参考TensorRT 2.1.2中的 ...

  7. TensorRT(6)-INT8 inference

    这一节通过官方例程 介绍 INT8 inference mode. 例程位于 /usr/src/tensorrt/samples/sampleINT8 ,是基于mnist的,大体流程是一致的. 流程同 ...

  8. TensorRT(3)-C++ API使用:mnist手写体识别

    本节将介绍如何使用tensorRT C++ API 进行网络模型创建. 1 使用C++ API 进行 tensorRT 模型创建 还是通过 tensorRT官方给的一个例程来学习. 还是mnist手写 ...

  9. TensorRT(2)-基本使用:mnist手写体识别

    结合 tensorRT官方给出的一个例程,介绍tensorRT的使用. 这个例程是mnist手写体识别.例程位于目录: /usr/src/tensorrt/samples/sampleMNIST 文件 ...

最新文章

  1. 编辑Linux系统实验,linux操作系统实验vi编辑器的使用
  2. Asp.net导出Excel
  3. HDU 5119 Happy Matt Friends ——(背包DP)
  4. Control usage: (1) Windows Phone 7: Popup control
  5. ios模拟器快捷键操作
  6. python画图程序有图-Python海龟画图工具绘制叮当猫程序
  7. 全国计算机等级考试题库二级C操作题100套(第36套)
  8. 【转】17.Qt界面布局管理详解
  9. Mysql 取用逗号分隔的字串的子串的方法:SUBSTRING_INDEX
  10. nodejs实践录:基于koa的简单web服务器
  11. vs2017+配置工程的编译路径(输出目录和中间目录)
  12. 三年JAVA开发经验如何做到年薪35万
  13. 机器学习 声音 分角色_机器学习对儿童电视节目角色的痴迷
  14. 计算机毕业设计Java-ssm博物馆交流平台源码+系统+数据库+lw文档
  15. Windows10上安装VS2017社区版操作步骤
  16. ubuntu 18.04.6 内网PXE实战preseed案例
  17. pack_padded_sequence;pad_packed_sequence
  18. 项目管理中的进度控制与目标计划
  19. (SWAT-4)SWAT中水文响应单元划分(HRU)分析
  20. PLC通讯实现-C#实现汇川PLC-AM401以太网通讯ModBus TCP

热门文章

  1. 使用pycharm将自己项目代码上传github(保姆教程)
  2. torch量化的流程
  3. adc 接收cube_官方的stm32cube软件教程实例ADC操作代码(官方自带的,可以无视
  4. mysql 5.7 驱动_这些 MySQL 调优配置,你都知道吗?
  5. 神经网络输出大小(卷积层及池化层)
  6. webstorm2018修改运行web page端口号,并且让web在本地局域网内用IP访问
  7. 在Ubuntu 12.04 64bit上搭建Crtmpserver视频直播服务
  8. Linux的watch命令--实时监测命令的运行结果
  9. BZOJ4568: [Scoi2016]幸运数字(线性基 倍增)
  10. nodejs配置nginx 以后链接mongodb数据库