关于TensorRT的介绍可以参考:  http://blog.csdn.net/fengbingchun/article/details/78469551

以下是参考TensorRT 2.1.2中的sampleMNISTAPI.cpp文件改写的实现对手写数字0-9识别的测试代码,各个文件内容如下:

common.hpp:

#ifndef FBC_TENSORRT_TEST_COMMON_HPP_
#define FBC_TENSORRT_TEST_COMMON_HPP_#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <NvInfer.h>template< typename T >
static inline int check_Cuda(T result, const char * const func, const char * const file, const int line)
{if (result) {fprintf(stderr, "Error CUDA: at %s: %d, error code=%d, func: %s\n", file, line, static_cast<unsigned int>(result), func);cudaDeviceReset(); // Make sure we call CUDA Device Reset before exitingreturn -1;}
}template< typename T >
static inline int check(T result, const char * const func, const char * const file, const int line)
{if (result) {fprintf(stderr, "Error: at %s: %d, error code=%d, func: %s\n", file, line, static_cast<unsigned int>(result), func);return -1;}
}#define checkCudaErrors(val) check_Cuda((val), __FUNCTION__, __FILE__, __LINE__)
#define checkErrors(val) check((val), __FUNCTION__, __FILE__, __LINE__)#define CHECK(x) { \if (x) {} \else { fprintf(stderr, "Check Failed: %s, file: %s, line: %d\n", #x, __FILE__, __LINE__); return -1; } \
}// Logger for GIE info/warning/errors
class Logger : public nvinfer1::ILogger
{void log(Severity severity, const char* msg) override{// suppress info-level messagesif (severity != Severity::kINFO)std::cout << msg << std::endl;}
};#endif // FBC_TENSORRT_TEST_COMMON_HPP_

mnist_api.cpp:

#include <string>
#include <fstream>
#include <iostream>
#include <map>
#include <tuple>#include <NvInfer.h>
#include <NvCaffeParser.h>
#include <cuda_runtime_api.h>
#include <opencv2/opencv.hpp>#include "common.hpp"// reference: TensorRT-2.1.2/samples/sampleMNIST/sampleMNISTAPI.cpp// intput width, input height, output size, input blob name, output blob name, weight file, mean file
typedef std::tuple<int, int, int, std::string, std::string, std::string, std::string> DATA_INFO;// Our weight files are in a very simple space delimited format.
// [type] [size] <data x size in hex>
static std::map<std::string, nvinfer1::Weights> loadWeights(const std::string& file)
{std::map<std::string, nvinfer1::Weights> weightMap;std::ifstream input(file);if (!input.is_open()) {fprintf(stderr, "Unable to load weight file: %s\n", file.c_str());return  weightMap;}int32_t count;input >> count;if (count <= 0) {fprintf(stderr, "Invalid weight map file: %d\n", count);return weightMap;}while(count--) {nvinfer1:: Weights wt{nvinfer1::DataType::kFLOAT, nullptr, 0};uint32_t type, size;std::string name;input >> name >> std::dec >> type >> size;wt.type = static_cast<nvinfer1::DataType>(type);if (wt.type == nvinfer1::DataType::kFLOAT) {uint32_t *val = reinterpret_cast<uint32_t*>(malloc(sizeof(val) * size));for (uint32_t x = 0, y = size; x < y; ++x) {input >> std::hex >> val[x];}wt.values = val;} else if (wt.type == nvinfer1::DataType::kHALF) {uint16_t *val = reinterpret_cast<uint16_t*>(malloc(sizeof(val) * size));for (uint32_t x = 0, y = size; x < y; ++x) {input >> std::hex >> val[x];}wt.values = val;}wt.count = size;weightMap[name] = wt;}return weightMap;
}// Creat the Engine using only the API and not any parser.
static nvinfer1::ICudaEngine* createMNISTEngine(unsigned int maxBatchSize, nvinfer1::IBuilder* builder, nvinfer1::DataType dt, const DATA_INFO& info)
{nvinfer1::INetworkDefinition* network = builder->createNetwork();//  Create input of shape { 1, 1, 28, 28 } with name referenced by INPUT_BLOB_NAMEauto data = network->addInput(std::get<3>(info).c_str(), dt, nvinfer1::DimsCHW{ 1, std::get<1>(info), std::get<0>(info)});assert(data != nullptr);// Create a scale layer with default power/shift and specified scale parameter.float scale_param = 0.0125f;nvinfer1::Weights power{nvinfer1::DataType::kFLOAT, nullptr, 0};nvinfer1::Weights shift{nvinfer1::DataType::kFLOAT, nullptr, 0};nvinfer1::Weights scale{nvinfer1::DataType::kFLOAT, &scale_param, 1};auto scale_1 = network->addScale(*data,   nvinfer1::ScaleMode::kUNIFORM, shift, scale, power);assert(scale_1 != nullptr);// Add a convolution layer with 20 outputs and a 5x5 filter.std::map<std::string, nvinfer1::Weights> weightMap = loadWeights(std::get<5>(info));auto conv1 = network->addConvolution(*scale_1->getOutput(0), 20, nvinfer1::DimsHW{5, 5}, weightMap["conv1filter"], weightMap["conv1bias"]);assert(conv1 != nullptr);conv1->setStride(nvinfer1::DimsHW{1, 1});// Add a max pooling layer with stride of 2x2 and kernel size of 2x2.auto pool1 = network->addPooling(*conv1->getOutput(0), nvinfer1::PoolingType::kMAX, nvinfer1::DimsHW{2, 2});assert(pool1 != nullptr);pool1->setStride(nvinfer1::DimsHW{2, 2});// Add a second convolution layer with 50 outputs and a 5x5 filter.auto conv2 = network->addConvolution(*pool1->getOutput(0), 50, nvinfer1::DimsHW{5, 5}, weightMap["conv2filter"], weightMap["conv2bias"]);assert(conv2 != nullptr);conv2->setStride(nvinfer1::DimsHW{1, 1});// Add a second max pooling layer with stride of 2x2 and kernel size of 2x3>auto pool2 = network->addPooling(*conv2->getOutput(0), nvinfer1::PoolingType::kMAX, nvinfer1::DimsHW{2, 2});assert(pool2 != nullptr);pool2->setStride(nvinfer1::DimsHW{2, 2});// Add a fully connected layer with 500 outputs.auto ip1 = network->addFullyConnected(*pool2->getOutput(0), 500, weightMap["ip1filter"], weightMap["ip1bias"]);assert(ip1 != nullptr);// Add an activation layer using the ReLU algorithm.auto relu1 = network->addActivation(*ip1->getOutput(0), nvinfer1::ActivationType::kRELU);assert(relu1 != nullptr);// Add a second fully connected layer with 20 outputs.auto ip2 = network->addFullyConnected(*relu1->getOutput(0), std::get<2>(info), weightMap["ip2filter"], weightMap["ip2bias"]);assert(ip2 != nullptr);// Add a softmax layer to determine the probability.auto prob = network->addSoftMax(*ip2->getOutput(0));assert(prob != nullptr);prob->getOutput(0)->setName(std::get<4>(info).c_str());network->markOutput(*prob->getOutput(0));// Build the enginebuilder->setMaxBatchSize(maxBatchSize);builder->setMaxWorkspaceSize(1 << 20);auto engine = builder->buildCudaEngine(*network);// we don't need the network any morenetwork->destroy();// Once we have built the cuda engine, we can release all of our held memory.for (auto &mem : weightMap) {free((void*)(mem.second.values));}return engine;
}static int APIToModel(unsigned int maxBatchSize, // batch size - NB must be at least as large as the batch we want to run with)nvinfer1::IHostMemory** modelStream, Logger logger, const DATA_INFO& info)
{// create the buildernvinfer1::IBuilder* builder = nvinfer1::createInferBuilder(logger);// create the model to populate the network, then set the outputs and create an enginenvinfer1::ICudaEngine* engine = createMNISTEngine(maxBatchSize, builder, nvinfer1::DataType::kFLOAT, info);CHECK(engine != nullptr);// serialize the engine, then close everything down(*modelStream) = engine->serialize();engine->destroy();builder->destroy();return 0;
}static int doInference(nvinfer1::IExecutionContext& context, float* input, float* output, int batchSize, const DATA_INFO& info)
{const nvinfer1::ICudaEngine& engine = context.getEngine();// input and output buffer pointers that we pass to the engine - the engine requires exactly IEngine::getNbBindings(),// of these, but in this case we know that there is exactly one input and one output.CHECK(engine.getNbBindings() == 2);void* buffers[2];// In order to bind the buffers, we need to know the names of the input and output tensors.// note that indices are guaranteed to be less than IEngine::getNbBindings()int inputIndex = engine.getBindingIndex(std::get<3>(info).c_str()), outputIndex = engine.getBindingIndex(std::get<4>(info).c_str());// create GPU buffers and a streamcheckCudaErrors(cudaMalloc(&buffers[inputIndex], batchSize * std::get<1>(info) * std::get<0>(info) * sizeof(float)));checkCudaErrors(cudaMalloc(&buffers[outputIndex], batchSize * std::get<2>(info) * sizeof(float)));cudaStream_t stream;checkCudaErrors(cudaStreamCreate(&stream));// DMA the input to the GPU,  execute the batch asynchronously, and DMA it back:checkCudaErrors(cudaMemcpyAsync(buffers[inputIndex], input, batchSize * std::get<1>(info) * std::get<0>(info) * sizeof(float), cudaMemcpyHostToDevice, stream));context.enqueue(batchSize, buffers, stream, nullptr);checkCudaErrors(cudaMemcpyAsync(output, buffers[outputIndex], batchSize * std::get<2>(info) * sizeof(float), cudaMemcpyDeviceToHost, stream));cudaStreamSynchronize(stream);// release the stream and the bufferscudaStreamDestroy(stream);checkCudaErrors(cudaFree(buffers[inputIndex]));checkCudaErrors(cudaFree(buffers[outputIndex]));return 0;
}int test_mnist_api()
{Logger logger; // multiple instances of IRuntime and/or IBuilder must all use the same logger// stuff we know about the network and the caffe input/output blobsconst DATA_INFO info(28, 28, 10, "data", "prob", "models/mnistapi.wts", "models/mnist_mean.binaryproto");// create a model using the API directly and serialize it to a streamnvinfer1::IHostMemory* modelStream{ nullptr };APIToModel(1, &modelStream, logger, info);// parse the mean file produced by caffe and subtract it from the imagenvcaffeparser1::ICaffeParser* parser = nvcaffeparser1::createCaffeParser();nvcaffeparser1::IBinaryProtoBlob* meanBlob = parser->parseBinaryProto(std::get<6>(info).c_str());parser->destroy();const float* meanData = reinterpret_cast<const float*>(meanBlob->getData());nvinfer1::IRuntime* runtime = nvinfer1::createInferRuntime(logger);nvinfer1::ICudaEngine* engine = runtime->deserializeCudaEngine(modelStream->data(), modelStream->size(), nullptr);nvinfer1::IExecutionContext* context = engine->createExecutionContext();uint8_t fileData[std::get<1>(info) * std::get<0>(info)];const std::string image_path{ "images/digit/" };for (int i = 0; i < 10; ++i) {const std::string image_name = image_path + std::to_string(i) + ".png";cv::Mat mat = cv::imread(image_name, 0);if (!mat.data) {fprintf(stderr, "read image fail: %s\n", image_name.c_str());return -1;}cv::resize(mat, mat, cv::Size(std::get<0>(info), std::get<1>(info)));mat.convertTo(mat, CV_32FC1);float data[std::get<1>(info)*std::get<0>(info)];const float* p = (float*)mat.data;for (int j = 0; j < std::get<1>(info)*std::get<0>(info); ++j) {data[j] = p[j] - meanData[j];}// run inferencefloat prob[std::get<2>(info)];doInference(*context, data, prob, 1, info);float val{-1.f};int idx{-1};for (int t = 0; t < std::get<2>(info); ++t) {if (val < prob[t]) {val = prob[t];idx = t;}}fprintf(stdout, "expected value: %d, actual value: %d, probability: %f\n", i, idx, val);}meanBlob->destroy();if (modelStream) modelStream->destroy();// destroy the enginecontext->destroy();engine->destroy();runtime->destroy();return 0;
}

测试图像如下:

执行结果如下:(与  http://blog.csdn.net/fengbingchun/article/details/78552908  中结果一致)

测试代码编译步骤如下(ReadMe.txt):

在Linux下通过CMake编译TensorRT_Test中的测试代码步骤:
1. 将终端定位到CUDA_Test/prj/linux_tensorrt_cmake,依次执行如下命令:$ mkdir build$ cd build$ cmake ..$ make (生成TensorRT_Test执行文件)$ ln -s ../../../test_data/models  ./ (将models目录软链接到build目录下)$ ln -s ../../../test_data/images  ./ (将images目录软链接到build目录下)$ ./TensorRT_Test
2. 对于有需要用OpenCV参与的读取图像的操作,需要先将对应文件中的图像路径修改为Linux支持的路径格式

GitHub: https://github.com/fengbingchun/CUDA_Test

TensorRT Samples: MNIST API相关推荐

  1. TensorRT Samples: MNIST(Plugin, add a custom layer)

    关于TensorRT的介绍可以参考:http://blog.csdn.net/fengbingchun/article/details/78469551 以下是参考TensorRT 2.1.2中的sa ...

  2. TensorRT Samples: MNIST

    关于TensorRT的介绍可以参考:  http://blog.csdn.net/fengbingchun/article/details/78469551 以下是参考TensorRT 2.1.2中的 ...

  3. TensorRT Samples: MNIST(serialize TensorRT model)

    关于TensorRT的介绍可以参考: http://blog.csdn.net/fengbingchun/article/details/78469551 这里实现在构建阶段将TensorRT mod ...

  4. TensorRT(3)-C++ API使用:mnist手写体识别

    本节将介绍如何使用tensorRT C++ API 进行网络模型创建. 1 使用C++ API 进行 tensorRT 模型创建 还是通过 tensorRT官方给的一个例程来学习. 还是mnist手写 ...

  5. TensorRT/samples/common/argsParser.h源碼研讀

    TensorRT/samples/common/argsParser.h源碼研讀 argsParser.h namespace struct的繼承 caffe特有參數 UFF格式 不需要typedef ...

  6. TensorRT Samples: CharRNN

    关于TensorRT的介绍可以参考:  http://blog.csdn.net/fengbingchun/article/details/78469551 以下是参考TensorRT 2.1.2中的 ...

  7. TensorRT Samples: GoogleNet

    关于TensorRT的介绍可以参考:  http://blog.csdn.net/fengbingchun/article/details/78469551 以下是参考TensorRT 2.1.2中的 ...

  8. TensorRT 下不同 API 推理时间的对比实验

    文章目录 项目简介 实验结果 环境构建 宿主机基础环境 基础镜像拉取 安装其他库 项目代码 文件说明 项目简介 基于 TensorRT 8.2.4 版本,具体环境见下面的环境构建部分 目标: 对比 p ...

  9. TensorRT(6)-INT8 inference

    这一节通过官方例程 介绍 INT8 inference mode. 例程位于 /usr/src/tensorrt/samples/sampleINT8 ,是基于mnist的,大体流程是一致的. 流程同 ...

最新文章

  1. 抓包工具Charles基本用法
  2. react-native开发经验
  3. php 登陆 才能查看,WordPress登录后才能查看网站内容,未登录跳转登录页面教程...
  4. 为什么有人说面向对象编程就是面向接口编程?
  5. 213. House Robber II 首尾相同的偷窃问题
  6. iPhone XI Max带壳渲染图曝光:依然刘海屏 后置“浴霸”三摄实锤
  7. sql 生成csv数据_创建包含SQL Server数据的动态生成的CSV文件
  8. tar bz2 解压
  9. 网络访问计算机无法访问,无法访问,您可能没有权限使用网络资源的解决方法...
  10. ubuntu20.04.4虚拟机 ping不通百度问题解决
  11. ae制作小球轨迹运动_视频剪辑教程:AE动画教程,如何创建一个运动的小球
  12. seaborn中sns.distplot图例显示方法及无法显示的解决办法
  13. HyperLPR车牌识别技术算法之车牌粗定位与训练
  14. PKI CA RA KMC
  15. java 图片添加水印(文字水印+图片水印)
  16. 《深度学习100例》目录
  17. mysql常用函数整理
  18. 【AI特训营】:柯西分布 Paddle API实现
  19. 测试前如何进行配置项测试?
  20. 学会分析股票的基本面!使你顺水长流!

热门文章

  1. 机器学习中的算法(4.3):SVM----针对线性不可分问题理解
  2. PCL :K-d tree 2 结构理解
  3. 奇葩错误 -- modelsim波形显示no data(全X)
  4. 目标检测——Faster R-CNN论文阅读
  5. Learn OpenGL (四):纹理
  6. Vue @import ‘~@/css/reset.css’;报错,解决方案
  7. 在CentOS 6.3 64bit上安装Nginx 1.8.0
  8. Blender 3.0基础入门学习教程 Introduction to Blender 3.0
  9. 【73套】Epic Stock Media配乐音效素材合集包
  10. 从BloomFilter到Counter BloomFilter