目录

  • 1 运行环境
  • 2 demo3 介绍
  • 3 前置知识
  • 4 代码
    • 4.1 mian函数
    • 4.2 多uri输入函数
    • 4.3 读取matedate打印函数

1 运行环境

程序运行环境基于docker :deepstream:5.0.1-20.09-triton

2 demo3 介绍

基于deepstream-test1构建,增加了一些新的特性:

2.1 在管道中使用多个源(同时播放多路视频)

2.2 使用uridecodebin接受任何类型的输入(例如RTSP /文件),任何GStreamer支持的容器格式以及任何编解编码

2.3 配置Gst-nvstreammux以生成一批帧并推断出这些帧以提高资源利用率

2.4 提取流元数据,其中包含有关批处理缓冲区中帧的有用信息

主要是顺着把重要的流程梳理了一遍,源码可以直接看官方的,推荐可以分别把不同的demo对比一下,看看有什么不同,我是使用vscode对比的。

3 前置知识

3.1 gstreamer主要概念介绍
重点理解 element 、bin、 pad 、bus、这四个概念
3.2 gstreamer 动态管道demo
这个一定要看,要是这个基础教程搞懂了,demo3也就明白了一半
3.3 queue 小知识点

4 代码

4.1 mian函数

int
main (int argc, char *argv[])
{//提前声明一些将会用到的元素GMainLoop *loop = NULL;GstElement *pipeline = NULL, *streammux = NULL, *sink = NULL, *pgie = NULL,*queue1, *queue2, *queue3, *queue4, *queue5, *nvvidconv = NULL,*nvosd = NULL, *tiler = NULL;//如果你使用jetson平台,会去做一些额外的操作,如果你使用的是x86平台,忽略这些就好了,下面也有很多类似的就不一一说了
#ifdef PLATFORM_TEGRAGstElement *transform = NULL;
#endifGstBus *bus = NULL;guint bus_watch_id;GstPad *tiler_src_pad = NULL;guint i, num_sources;guint tiler_rows, tiler_columns;guint pgie_batch_size;/* Check input arguments */if (argc < 2) {g_printerr ("Usage: %s <uri1> [uri2] ... [uriN] \n", argv[0]);return -1;}//输入文件uri的个数num_sources = argc - 1;/* Standard GStreamer initialization */gst_init (&argc, &argv);loop = g_main_loop_new (NULL, FALSE);/* Create gstreamer elements *//* Create Pipeline element that will form a connection of other elements */pipeline = gst_pipeline_new ("dstest3-pipeline");/* Create nvstreammux instance to form batches from one or more sources. */streammux = gst_element_factory_make ("nvstreammux", "stream-muxer");if (!pipeline || !streammux) {g_printerr ("One element could not be created. Exiting.\n");return -1;}gst_bin_add (GST_BIN (pipeline), streammux);//针对每一个输入的文件,开始循环创建输入组件 source_bin,然后为多个source_bin创建pad与streammux连接起来 for (i = 0; i < num_sources; i++) {GstPad *sinkpad, *srcpad;gchar pad_name[16] = { };//先创建一个source_bin 组件,这里create_source_bin 是自己定义的,传入index和uriGstElement *source_bin = create_source_bin (i, argv[i + 1]);if (!source_bin) {g_printerr ("Failed to create source bin. Exiting.\n");return -1;}//添加组件到管道里面gst_bin_add (GST_BIN (pipeline), source_bin);//为streammux组件创建一个接收数据数据用的sink_padg_snprintf (pad_name, 15, "sink_%u", i);sinkpad = gst_element_get_request_pad (streammux, pad_name);if (!sinkpad) {g_printerr ("Streammux request sink pad failed. Exiting.\n");return -1;}//为source_bin创建一个用于发送数据的src_padsrcpad = gst_element_get_static_pad (source_bin, "src");if (!srcpad) {g_printerr ("Failed to get src pad of source bin. Exiting.\n");return -1;}//将这两个pad连接起来if (gst_pad_link (srcpad, sinkpad) != GST_PAD_LINK_OK) {g_printerr ("Failed to link source bin to stream muxer. Exiting.\n");return -1;}//清理垃圾gst_object_unref (srcpad);gst_object_unref (sinkpad);}/*此时我们已经为所有的uri生成了一个 source_bin,并切将它和 streammux 连接起来*//* Use nvinfer to infer on batched frame. *//*用来对视频帧进行推理的插件*/pgie = gst_element_factory_make ("nvinfer", "primary-nvinference-engine");/* Add queue elements between every two elements *//*一个内存队列,因为多uri输入数据量很大,相当于增加了很多缓冲区*//*https://blog.csdn.net/sakulafly/article/details/21318313 这里面有详细解释*/queue1 = gst_element_factory_make ("queue", "queue1");queue2 = gst_element_factory_make ("queue", "queue2");queue3 = gst_element_factory_make ("queue", "queue3");queue4 = gst_element_factory_make ("queue", "queue4");queue5 = gst_element_factory_make ("queue", "queue5");/* Use nvtiler to composite the batched frames into a 2D tiled array based* on the source of the frames. *//*针对多个输入,创建一个类似于电视墙的一个二位平铺的阵列*/tiler = gst_element_factory_make ("nvmultistreamtiler", "nvtiler");/* Use convertor to convert from NV12 to RGBA as required by nvosd *//*先将nv12转换为RGBA才能使用nvosd插件*/nvvidconv = gst_element_factory_make ("nvvideoconvert", "nvvideo-converter");/* Create OSD to draw on the converted RGBA buffer *//*这个插件用来在视频流上绘制信息*/nvosd = gst_element_factory_make ("nvdsosd", "nv-onscreendisplay");/* Finally render the osd output *//*最后呈现osd输出*/
#ifdef PLATFORM_TEGRAtransform = gst_element_factory_make ("nvegltransform", "nvegl-transform");
#endifsink = gst_element_factory_make ("nveglglessink", "nvvideo-renderer");if (!pgie || !tiler || !nvvidconv || !nvosd || !sink) {g_printerr ("One element could not be created. Exiting.\n");return -1;}#ifdef PLATFORM_TEGRAif(!transform) {g_printerr ("One tegra element could not be created. Exiting.\n");return -1;}
#endif/*下面主要开始设置不同插件的属性*/g_object_set (G_OBJECT (streammux), "batch-size", num_sources, NULL);g_object_set (G_OBJECT (streammux), "width", MUXER_OUTPUT_WIDTH, "height",MUXER_OUTPUT_HEIGHT,"batched-push-timeout", MUXER_BATCH_TIMEOUT_USEC, NULL);/* Configure the nvinfer element using the nvinfer config file. */g_object_set (G_OBJECT (pgie),"config-file-path", "dstest3_pgie_config.txt", NULL);/* Override the batch-size set in the config file with the number of sources. *//* 这里是把配置文件里面的batch-size给覆盖掉,然后改成和uri数量相同的大小*/g_object_get (G_OBJECT (pgie), "batch-size", &pgie_batch_size, NULL);if (pgie_batch_size != num_sources) {g_printerr("WARNING: Overriding infer-config batch-size (%d) with number of sources (%d)\n",pgie_batch_size, num_sources);g_object_set (G_OBJECT (pgie), "batch-size", num_sources, NULL);}/*计算需要平铺成几行几列*/tiler_rows = (guint) sqrt (num_sources);tiler_columns = (guint) ceil (1.0 * num_sources / tiler_rows);/* we set the tiler properties here */g_object_set (G_OBJECT (tiler), "rows", tiler_rows, "columns", tiler_columns,"width", TILED_OUTPUT_WIDTH, "height", TILED_OUTPUT_HEIGHT, NULL);g_object_set (G_OBJECT (nvosd), "process-mode", OSD_PROCESS_MODE,"display-text", OSD_DISPLAY_TEXT, NULL);g_object_set (G_OBJECT (sink), "qos", 0, NULL);/* we add a message handler *//*添加一个消息接口,根据返回的消息,调用bus_call函数*/bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));bus_watch_id = gst_bus_add_watch (bus, bus_call, loop);gst_object_unref (bus);/* Set up the pipeline *//* we add all elements into the pipeline */
#ifdef PLATFORM_TEGRAgst_bin_add_many (GST_BIN (pipeline), queue1, pgie, queue2, tiler, queue3,nvvidconv, queue4, nvosd, queue5, transform, sink, NULL);/* we link the elements together* nvstreammux -> nvinfer -> nvtiler -> nvvidconv -> nvosd -> video-renderer */if (!gst_element_link_many (streammux, queue1, pgie, queue2, tiler, queue3,nvvidconv, queue4, nvosd, queue5, transform, sink, NULL)) {g_printerr ("Elements could not be linked. Exiting.\n");return -1;}
#else
/*将所有的组件都加入到管道里面,因为之前source和streammux都已经连好了,这里只连后面*/
gst_bin_add_many (GST_BIN (pipeline), queue1, pgie, queue2, tiler, queue3,nvvidconv, queue4, nvosd, queue5, sink, NULL);/* we link the elements together* nvstreammux -> nvinfer -> nvtiler -> nvvidconv -> nvosd -> video-renderer */if (!gst_element_link_many (streammux, queue1, pgie, queue2, tiler, queue3,nvvidconv, queue4, nvosd, queue5, sink, NULL)) {g_printerr ("Elements could not be linked. Exiting.\n");return -1;}
#endif/* Lets add probe to get informed of the meta data generated, we add probe to* the sink pad of the osd element, since by that time, the buffer would have* had got all the metadata. *//*这里在osd插件的输入接口上添加一个探针,把所有的元数据都拿出来,tiler_src_pad_buffer_probe这个函数读出数据并打印*/tiler_src_pad = gst_element_get_static_pad (pgie, "src");if (!tiler_src_pad)g_print ("Unable to get src pad\n");elsegst_pad_add_probe (tiler_src_pad, GST_PAD_PROBE_TYPE_BUFFER,tiler_src_pad_buffer_probe, NULL, NULL);gst_object_unref (tiler_src_pad);/* Set the pipeline to "playing" state */g_print ("Now playing:");for (i = 0; i < num_sources; i++) {g_print (" %s,", argv[i + 1]);}g_print ("\n");gst_element_set_state (pipeline, GST_STATE_PLAYING);/* Wait till pipeline encounters an error or EOS */g_print ("Running...\n");g_main_loop_run (loop);/* Out of the main loop, clean up nicely */g_print ("Returned, stopping playback\n");gst_element_set_state (pipeline, GST_STATE_NULL);g_print ("Deleting pipeline\n");gst_object_unref (GST_OBJECT (pipeline));g_source_remove (bus_watch_id);g_main_loop_unref (loop);return 0;
}

4.2 多uri输入函数

static void
cb_newpad (GstElement * decodebin, GstPad * decoder_src_pad, gpointer data)
{g_print ("In cb_newpad\n");GstCaps *caps = gst_pad_get_current_caps (decoder_src_pad);const GstStructure *str = gst_caps_get_structure (caps, 0);const gchar *name = gst_structure_get_name (str);GstElement *source_bin = (GstElement *) data;GstCapsFeatures *features = gst_caps_get_features (caps, 0);/* Need to check if the pad created by the decodebin is for video and not* audio. */if (!strncmp (name, "video", 5)) {/* Link the decodebin pad only if decodebin has picked nvidia* decoder plugin nvdec_*. We do this by checking if the pad caps contain* NVMM memory features. */if (gst_caps_features_contains (features, GST_CAPS_FEATURES_NVMM)) {/* Get the source bin ghost pad */GstPad *bin_ghost_pad = gst_element_get_static_pad (source_bin, "src");if (!gst_ghost_pad_set_target (GST_GHOST_PAD (bin_ghost_pad),decoder_src_pad)) {g_printerr ("Failed to link decoder src pad to source bin ghost pad\n");}gst_object_unref (bin_ghost_pad);} else {g_printerr ("Error: Decodebin did not pick nvidia decoder plugin.\n");}}
}static void
decodebin_child_added (GstChildProxy * child_proxy, GObject * object,gchar * name, gpointer user_data)
{g_print ("Decodebin child added: %s\n", name);if (g_strrstr (name, "decodebin") == name) {g_signal_connect (G_OBJECT (object), "child-added",G_CALLBACK (decodebin_child_added), user_data);}
}static GstElement *
create_source_bin (guint index, gchar * uri)
{//之前提到的source_bin其实主要就是        GstElement *bin = NULL, *uri_decode_bin = NULL;//根据index 生成一个名字,通过g_snprintf给 binname赋值,然后生成这个bingchar bin_name[16] = { };g_snprintf (bin_name, 15, "source-bin-%02d", index);/* Create a source GstBin to abstract this bin's content from the rest of the* pipeline */bin = gst_bin_new (bin_name);/* Source element for reading from the uri.* We will use decodebin and let it figure out the container format of the* stream and the codec and plug the appropriate demux and decode plugins. */uri_decode_bin = gst_element_factory_make ("uridecodebin", "uri-decode-bin");if (!bin || !uri_decode_bin) {g_printerr ("One element in source bin could not be created.\n");return NULL;}/* We set the input uri to the source element */g_object_set (G_OBJECT (uri_decode_bin), "uri", uri, NULL);/* Connect to the "pad-added" signal of the decodebin which generates a* callback once a new pad for raw data has beed created by the decodebin */g_signal_connect (G_OBJECT (uri_decode_bin), "pad-added",G_CALLBACK (cb_newpad), bin);g_signal_connect (G_OBJECT (uri_decode_bin), "child-added",G_CALLBACK (decodebin_child_added), bin);//将生成的uri_decode_bin 添加到bin中gst_bin_add (GST_BIN (bin), uri_decode_bin);/* We need to create a ghost pad for the source bin which will act as a proxy* for the video decoder src pad. The ghost pad will not have a target right* now. Once the decode bin creates the video decoder and generates the* cb_newpad callback, we will set the ghost pad target to the video decoder* src pad. */if (!gst_element_add_pad (bin, gst_ghost_pad_new_no_target ("src",GST_PAD_SRC))) {g_printerr ("Failed to add ghost pad in source bin\n");return NULL;}return bin;
}

4.3 读取matedate打印函数

static GstPadProbeReturn
tiler_src_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info,gpointer u_data)
{GstBuffer *buf = (GstBuffer *) info->data;guint num_rects = 0; NvDsObjectMeta *obj_meta = NULL;guint vehicle_count = 0;guint person_count = 0;NvDsMetaList * l_frame = NULL;NvDsMetaList * l_obj = NULL;//NvDsDisplayMeta *display_meta = NULL;NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta (buf);for (l_frame = batch_meta->frame_meta_list; l_frame != NULL;l_frame = l_frame->next) {NvDsFrameMeta *frame_meta = (NvDsFrameMeta *) (l_frame->data);//int offset = 0;for (l_obj = frame_meta->obj_meta_list; l_obj != NULL;l_obj = l_obj->next) {obj_meta = (NvDsObjectMeta *) (l_obj->data);if (obj_meta->class_id == PGIE_CLASS_ID_VEHICLE) {vehicle_count++;num_rects++;}if (obj_meta->class_id == PGIE_CLASS_ID_PERSON) {person_count++;num_rects++;}}g_print ("Frame Number = %d Number of objects = %d ""Vehicle Count = %d Person Count = %d\n",frame_meta->frame_num, num_rects, vehicle_count, person_count);
#if 0display_meta = nvds_acquire_display_meta_from_pool(batch_meta);NvOSD_TextParams *txt_params  = &display_meta->text_params;txt_params->display_text = g_malloc0 (MAX_DISPLAY_LEN);offset = snprintf(txt_params->display_text, MAX_DISPLAY_LEN, "Person = %d ", person_count);offset = snprintf(txt_params->display_text + offset , MAX_DISPLAY_LEN, "Vehicle = %d ", vehicle_count);/* Now set the offsets where the string should appear */txt_params->x_offset = 10;txt_params->y_offset = 12;/* Font , font-color and font-size */txt_params->font_params.font_name = "Serif";txt_params->font_params.font_size = 10;txt_params->font_params.font_color.red = 1.0;txt_params->font_params.font_color.green = 1.0;txt_params->font_params.font_color.blue = 1.0;txt_params->font_params.font_color.alpha = 1.0;/* Text background color */txt_params->set_bg_clr = 1;txt_params->text_bg_clr.red = 0.0;txt_params->text_bg_clr.green = 0.0;txt_params->text_bg_clr.blue = 0.0;txt_params->text_bg_clr.alpha = 1.0;nvds_add_display_meta_to_frame(frame_meta, display_meta);
#endif}return GST_PAD_PROBE_OK;
}

deepstream-test3相关推荐

  1. deepstream视频数据流分析工具包安装使用教程

    deepstream数据流分析工具包安装和使用 deepstream是nvidia官方推出的一个数据流处理工具包,可以很方便的实现对视频的解码.推理等工作,高效的完成图像分类.目标检测.识别和跟踪等任 ...

  2. NVIDIA中文车牌识别系列-1” 在Jetson上用DeepStream识别中文车牌

    前言 这是NVIDIA在2021年初公布的一个开源项目,用NVIDA Jetson设备上的DeepStream视频分析套件实现"车牌识别"的功能,这是个实用性非常高的应用,能应用在 ...

  3. NVIDIA DeepStream 5.0构建智能视频分析应用程序

    NVIDIA DeepStream 5.0构建智能视频分析应用程序 无论是要平衡产品分配和优化流量的仓库,工厂流水线检查还是医院管理,要确保员工和护理人员在照顾病人的同时使用个人保护设备(PPE),就 ...

  4. DeepStream: 新一代智能城市视频分析

    在Jetson TX2上试用TensorRT https://jkjung-avt.github.io/tensorrt-cats-dogs/ NVIDIA提供了一个名为TensorRT的高性能深度学 ...

  5. DeepStream开发日志

    DeepStream主页:https://developer.nvidia.com/deepstream-sdk DeepStream Development Guide:https://docs.n ...

  6. 安装thinkphp5后访问public index.php 报错require(/www/wwwroot/test3.com/public/../vendor/autoload.php)

    根据网上教程,把thinkphp5版本通过git下载到项目里,但是 检查发现,vendor下的很多文件都不存在,为啥? 于是去网上搜,网上建议是在linux下安装composer 那么什么是compo ...

  7. DeepStream插件Gstreamer(一):插件汇总

    一.Gst-nvinfer Gst-nvinfer插件使用NVIDIA®TensorRT™对输入数据进行推断.该插件从上游接受批处理的NV12 / RGBA缓冲区.NvDsBatchMeta结构必须已 ...

  8. DeepStream插件Gstreamer(一):概述

    一.插件概述 DeepStream SDK基于GStreamer框架.本手册介绍了DeepStream GStreamer插件以及DeepStream的输入,输出和控制参数.在包含NVIDIA®Jet ...

  9. DeepStream参数配置之sink

    官方参考文档:https://docs.nvidia.com/metropolis/deepstream/dev-guide/text/DS_ref_app_deepstream.html#sink- ...

  10. DeepStream输出插件Gst-nvmsgconv和Gst-nvmsgbroker

    在上一节中我们已经讲过DeepStream整体架构和运行流程. 说明:本文的目的是RTSP推流 一.流程 视频流输入--> 解码--> 帧批处理--> 推理--> 目标跟踪-- ...

最新文章

  1. Java面试题 简述jvm内存模型?
  2. 迅雷CEO陈磊出席深圳IT领袖峰会 解析区块链未来布局
  3. SPOJ COT Count on a tree 主席树
  4. 面试官竟让我用Redis实现一个消息队列!
  5. 如何从零开始学好单片机
  6. go语音protobuf_总结一下protobuf安装-Go语言中文社区
  7. python微信公众菜单_Python实现微信公众平台自定义菜单实例
  8. 认识服务器与web服务器
  9. 【TFS 2017 CI/CD系列 - 01】-- Agent篇
  10. 非常强的用户体验的网站功能
  11. MPC5744p时钟模块
  12. 计算机桌面变小了是怎么回事啊,电脑桌面整体变小了要怎么调回来的
  13. 工业相机选型和镜头焦距计算
  14. 关于大麦网接口抢票构造的一些思路
  15. SAP成都研究院非典型程序猿,菜园子小哥:当我用UI5诊断工具时我用些什么
  16. mysql initialize 什么意思_mysql initialize
  17. 这几个用 Pyecharts 做出来的交互图表,领导说叼爆了!
  18. 论程序员如何正确上班摸鱼
  19. RocketMq之削峰
  20. 独立站卖家如何利用Google广告引流

热门文章

  1. python文本自动伪原创_现在有哪些好用的伪原创工具?
  2. 考研数学模拟题要选哪个当真题做
  3. 【obs owt】屏幕采集创建DXGI
  4. RC电路 CR电路 理解
  5. 前端50个精美登录注册模板(觉得好用帮我点个赞呗)
  6. 斐波那契数列c语言编程递归,C语言实现Fibonacci数列递归
  7. java web背景颜色表,更改表行背景颜色
  8. cad文件如何转pdf图纸进行标准的打印
  9. 腾讯视频弹幕爬取----------之亲爱的,热爱的
  10. word中如何删除某符号前面或后面所有的文字