1.Window下QSV硬解码配置

在libavcodec/codec_list.c下添加

 &ff_h264_qsv_decoder,

在ffmpeg_generate.gni下加入

     "libavcodec/h264idct.c","libavcodec/h264qpel.c","libavcodec/startcode.c","libavcodec/h264_mp4toannexb_bsf.c",]}ffmpeg_c_sources += ["libavcodec/qsvenc_h264.c","libavcodec/qsvenc.c","libavcodec/qsv.c","libavcodec/qsvdec.c","libavcodec/qsvdec_h2645.c",]

libavcodec/bsf_list.c下

static const AVBitStreamFilter * const bitstream_filters[] = {&ff_h264_mp4toannexb_bsf,&ff_null_bsf,NULL };

修改win-msvc/x64/config.h配置

#define CONFIG_H264_QSV_DECODER 1

2.QSV硬解码实现

h264_decoder_impl_ffmpeg.cc的实现

#include "modules/video_coding/codecs/h264/h264_decoder_impl_ffmpeg.h"#include <algorithm>
#include <limits>extern "C" {
#include "third_party/ffmpeg/libavcodec/avcodec.h"
#include "third_party/ffmpeg/libavformat/avformat.h"
#include "third_party/ffmpeg/libavutil/imgutils.h"
#include "third_party/ffmpeg/libavutil/opt.h"
}  // extern "C"#include "base/checks.h"
#include "base/criticalsection.h"
#include "base/keep_ref_until_done.h"
#include "base/logging.h"
#include "system_wrappers/include/metrics.h"
#include "libyuv/convert.h"namespace webrtc {
namespace {
#define PRINT_TIME_DECODE_DELAY 0
const AVPixelFormat kPixelFormat = AV_PIX_FMT_YUV420P;
const size_t kYPlaneIndex = 0;
const size_t kUPlaneIndex = 1;
const size_t kVPlaneIndex = 2;// Used by histograms. Values of entries should not be changed.
enum H264DecoderImplEvent {kH264DecoderEventInit = 0,kH264DecoderEventError = 1,kH264DecoderEventMax = 16,
};#if defined(WEBRTC_INITIALIZE_FFMPEG)rtc::CriticalSection ffmpeg_init_lock;
bool ffmpeg_initialized = false;// Called by FFmpeg to do mutex operations if initialized using
// |InitializeFFmpeg|.
int LockManagerOperation(void** lock, AVLockOp op)EXCLUSIVE_LOCK_FUNCTION() UNLOCK_FUNCTION() {switch (op) {case AV_LOCK_CREATE:*lock = new rtc::CriticalSection();return 0;case AV_LOCK_OBTAIN:static_cast<rtc::CriticalSection*>(*lock)->Enter();return 0;case AV_LOCK_RELEASE:static_cast<rtc::CriticalSection*>(*lock)->Leave();return 0;case AV_LOCK_DESTROY:delete static_cast<rtc::CriticalSection*>(*lock);*lock = nullptr;return 0;}RTC_NOTREACHED() << "Unrecognized AVLockOp.";return -1;
}void InitializeFFmpeg() {LOG_F(LS_INFO);rtc::CritScope cs(&ffmpeg_init_lock);if (!ffmpeg_initialized) {if (av_lockmgr_register(LockManagerOperation) < 0) {RTC_NOTREACHED() << "av_lockmgr_register failed.";return;}av_register_all();ffmpeg_initialized = true;}
}#endif  // defined(WEBRTC_INITIALIZE_FFMPEG)}  // namespaceint H264DecoderImplFfmpeg::AVGetBuffer2(AVCodecContext* context, AVFrame* av_frame, int flags) {// Set in |InitDecode|.H264DecoderImplFfmpeg* decoder = static_cast<H264DecoderImplFfmpeg*>(context->opaque);// DCHECK values set in |InitDecode|.RTC_DCHECK(decoder);RTC_DCHECK_EQ(context->pix_fmt, kPixelFormat);// Necessary capability to be allowed to provide our own buffers.RTC_DCHECK(context->codec->capabilities | AV_CODEC_CAP_DR1);// |av_frame->width| and |av_frame->height| are set by FFmpeg. These are the// actual image's dimensions and may be different from |context->width| and// |context->coded_width| due to reordering.int width = av_frame->width;int height = av_frame->height;// See |lowres|, if used the decoder scales the image by 1/2^(lowres). This// has implications on which resolutions are valid, but we don't use it.RTC_CHECK_EQ(context->lowres, 0);// Adjust the |width| and |height| to values acceptable by the decoder.// Without this, FFmpeg may overflow the buffer. If modified, |width| and/or// |height| are larger than the actual image and the image has to be cropped// (top-left corner) after decoding to avoid visible borders to the right and// bottom of the actual image.avcodec_align_dimensions(context, &width, &height);RTC_CHECK_GE(width, 0);RTC_CHECK_GE(height, 0);int ret = av_image_check_size(static_cast<unsigned int>(width),static_cast<unsigned int>(height), 0, nullptr);if (ret < 0) {LOG(LS_ERROR) << "Invalid picture size " << width << "x" << height;decoder->ReportError();return ret;}// The video frame is stored in |video_frame|. |av_frame| is FFmpeg's version// of a video frame and will be set up to reference |video_frame|'s buffers.VideoFrame* video_frame = new VideoFrame();// FFmpeg expects the initial allocation to be zero-initialized according to// http://crbug.com/390941. Our pool is set up to zero-initialize new buffers.video_frame->set_video_frame_buffer(decoder->pool_.CreateBuffer(width, height));// DCHECK that we have a continuous buffer as is required.RTC_DCHECK_EQ(video_frame->buffer(kUPlane),video_frame->buffer(kYPlane) + video_frame->allocated_size(kYPlane));RTC_DCHECK_EQ(video_frame->buffer(kVPlane),video_frame->buffer(kUPlane) + video_frame->allocated_size(kUPlane));int total_size = video_frame->allocated_size(kYPlane) +video_frame->allocated_size(kUPlane) +video_frame->allocated_size(kVPlane);av_frame->format = context->pix_fmt;av_frame->reordered_opaque = context->reordered_opaque;// Set |av_frame| members as required by FFmpeg.av_frame->data[kYPlaneIndex] = video_frame->buffer(kYPlane);av_frame->linesize[kYPlaneIndex] = video_frame->stride(kYPlane);av_frame->data[kUPlaneIndex] = video_frame->buffer(kUPlane);av_frame->linesize[kUPlaneIndex] = video_frame->stride(kUPlane);av_frame->data[kVPlaneIndex] = video_frame->buffer(kVPlane);av_frame->linesize[kVPlaneIndex] = video_frame->stride(kVPlane);RTC_DCHECK_EQ(av_frame->extended_data, av_frame->data);av_frame->buf[0] = av_buffer_create(av_frame->data[kYPlaneIndex],total_size,AVFreeBuffer2,static_cast<void*>(video_frame),0);RTC_CHECK(av_frame->buf[0]);return 0;
}void H264DecoderImplFfmpeg::AVFreeBuffer2(void* opaque, uint8_t* data) {// The buffer pool recycles the buffer used by |video_frame| when there are no// more references to it. |video_frame| is a thin buffer holder and is not// recycled.VideoFrame* video_frame = static_cast<VideoFrame*>(opaque);delete video_frame;
}H264DecoderImplFfmpeg::H264DecoderImplFfmpeg(bool is_hw) : pool_(true),decoded_image_callback_(nullptr),has_reported_init_(false),has_reported_error_(false),clock_(Clock::GetRealTimeClock()),isFirstFrame(true),is_hw_(is_hw) {start_time_ = clock_->TimeInMilliseconds();
}H264DecoderImplFfmpeg::~H264DecoderImplFfmpeg() {Release();int64_t deltaTimeSec = (clock_->TimeInMilliseconds() - start_time_)/1000;LOG(LS_INFO) << "discard_cnt_:" << discard_cnt_<< ", decode_cnt_:" << decode_cnt_<< ", idr_cnt_:" << idr_cnt_<< ", decoded_cnt_:" << decoded_cnt_<< ", deltaTimeSec:" << deltaTimeSec<< ", average framte rate:" << (deltaTimeSec ? (decoded_cnt_/deltaTimeSec) : decoded_cnt_);}void H264DecoderImplFfmpeg::PrintDecoderSettings(const VideoCodec* codec_settings, const AVCodecContext* codec_ctx) {LOG(LS_INFO) << " ";LOG(LS_INFO) << "#############################################################";LOG(LS_INFO) << "#               Decoder Parameter Setting:                  #";LOG(LS_INFO) << "#############################################################";LOG(LS_INFO) << "codec name                               :" << codec_ctx->codec->name;LOG(LS_INFO) << "codec type                               :" << codec_ctx->codec_type;LOG(LS_INFO) << "codec id                                 :" << codec_ctx->codec_id;LOG(LS_INFO) << "codec_settings.width                     :" << codec_settings->width;LOG(LS_INFO) << "codec_settings.height                    :" << codec_settings->height;LOG(LS_INFO) << "codec_settings.startBitrate              :" << codec_settings->startBitrate;LOG(LS_INFO) << "codec_settings.maxBitrate                :" << codec_settings->maxBitrate;LOG(LS_INFO) << "codec_settings.minBitrate                :" << codec_settings->minBitrate;LOG(LS_INFO) << "codec_settings.targetBitrate             :" << codec_settings->targetBitrate;LOG(LS_INFO) << "codec_settings.maxFramerate              :" << static_cast<int32_t>(codec_settings->maxFramerate);LOG(LS_INFO) << "------------------------------------------------------------ ";LOG(LS_INFO) << "codec_ctx.width                          :" << codec_ctx->width;LOG(LS_INFO) << "codec_ctx.height                         :" << codec_ctx->height;LOG(LS_INFO) << "codec_ctx.pix_fmt                        :" << codec_ctx->pix_fmt;LOG(LS_INFO) << "codec_ctx.flags                          :" << static_cast<uint32_t>(codec_ctx->flags);LOG(LS_INFO) << "codec_ctx.bit_rate                       :" << codec_ctx->bit_rate;LOG(LS_INFO) << "#############################################################";
}int32_t H264DecoderImplFfmpeg::InitHwDecode(const VideoCodec* codec_settings) {AVCodec* codec = avcodec_find_decoder_by_name("h264_qsv");if (!codec) {// This is an indication that FFmpeg has not been initialized or it has not// been compiled/initialized with the correct set of codecs.LOG(LS_ERROR) << "FFmpeg H.264 HW decoder not found.";Release();ReportError();return WEBRTC_VIDEO_CODEC_ERROR;}LOG(LS_INFO) << "Found decoder codec name " << codec->name;av_context_.reset(avcodec_alloc_context3(codec));if (codec_settings) {av_context_->coded_width = codec_settings->width;av_context_->coded_height = codec_settings->height;}av_context_->pix_fmt = AV_PIX_FMT_NV12;av_opt_set(av_context_->priv_data, "async_depth", "1", 0);int res = avcodec_open2(av_context_.get(), codec, nullptr);if (res < 0) {LOG(LS_ERROR) << "avcodec_open2 error: " << res;Release();ReportError();return WEBRTC_VIDEO_CODEC_ERROR;}PrintDecoderSettings(codec_settings, av_context_.get());av_frame_.reset(av_frame_alloc());return WEBRTC_VIDEO_CODEC_OK;
}int32_t H264DecoderImplFfmpeg::InitDecode(const VideoCodec* codec_settings,int32_t number_of_cores) {LOG_F(LS_INFO);ReportInit();isFirstFrame = true;if (codec_settings &&codec_settings->codecType != kVideoCodecH264) {ReportError();return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;}// FFmpeg must have been initialized (with |av_lockmgr_register| and// |av_register_all|) before we proceed. |InitializeFFmpeg| does this, which// makes sense for WebRTC standalone. In other cases, such as Chromium, FFmpeg// is initialized externally and calling |InitializeFFmpeg| would be// thread-unsafe and result in FFmpeg being initialized twice, which could// break other FFmpeg usage. See the |rtc_initialize_ffmpeg| flag.
#if defined(WEBRTC_INITIALIZE_FFMPEG)// Make sure FFmpeg has been initialized. Subsequent |InitializeFFmpeg| calls// do nothing.InitializeFFmpeg();
#endif// Release necessary in case of re-initializing.int32_t ret = Release();if (ret != WEBRTC_VIDEO_CODEC_OK) {ReportError();return ret;}RTC_DCHECK(!av_context_);if (is_hw_) {return InitHwDecode(codec_settings);};// Initialize AVCodecContext.av_context_.reset(avcodec_alloc_context3(nullptr));av_context_->codec_type = AVMEDIA_TYPE_VIDEO;av_context_->codec_id = AV_CODEC_ID_H264;if (codec_settings) {av_context_->coded_width = codec_settings->width;av_context_->coded_height = codec_settings->height;}av_context_->pix_fmt = kPixelFormat;av_context_->extradata = nullptr;av_context_->extradata_size = 0;// If this is ever increased, look at |av_context_->thread_safe_callbacks| and// make it possible to disable the thread checker in the frame buffer pool.av_context_->thread_count = av_cpu_count() + 1;;av_context_->thread_type = FF_THREAD_SLICE;// Function used by FFmpeg to get buffers to store decoded frames in.av_context_->get_buffer2 = AVGetBuffer2;// |get_buffer2| is called with the context, there |opaque| can be used to get// a pointer |this|.av_context_->opaque = this;// Use ref counted frames (av_frame_unref).av_context_->refcounted_frames = 1;  // trueAVCodec* codec = avcodec_find_decoder(av_context_->codec_id);if (!codec) {// This is an indication that FFmpeg has not been initialized or it has not// been compiled/initialized with the correct set of codecs.LOG(LS_ERROR) << "FFmpeg H.264 decoder not found.";Release();ReportError();return WEBRTC_VIDEO_CODEC_ERROR;}int res = avcodec_open2(av_context_.get(), codec, nullptr);if (res < 0) {LOG(LS_ERROR) << "avcodec_open2 error: " << res;Release();ReportError();return WEBRTC_VIDEO_CODEC_ERROR;}PrintDecoderSettings(codec_settings, av_context_.get());av_frame_.reset(av_frame_alloc());return WEBRTC_VIDEO_CODEC_OK;
}int32_t H264DecoderImplFfmpeg::Release() {avcodec_close(av_context_.get());av_context_.reset();av_frame_.reset();return WEBRTC_VIDEO_CODEC_OK;
}int32_t H264DecoderImplFfmpeg::RegisterDecodeCompleteCallback(DecodedImageCallback* callback) {decoded_image_callback_ = callback;return WEBRTC_VIDEO_CODEC_OK;
}int32_t H264DecoderImplFfmpeg::Decode(const EncodedImage& input_image,bool /*missing_frames*/,const RTPFragmentationHeader* /*fragmentation*/,const CodecSpecificInfo* codec_specific_info,int64_t /*render_time_ms*/) {if (!IsInitialized()) {ReportError();return WEBRTC_VIDEO_CODEC_UNINITIALIZED;}if (!decoded_image_callback_) {LOG(LS_WARNING) << "InitDecode() has been called, but a callback function ""has not been set with RegisterDecodeCompleteCallback()";ReportError();return WEBRTC_VIDEO_CODEC_UNINITIALIZED;}if (!input_image._buffer || !input_image._length) {ReportError();return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;}if (codec_specific_info &&codec_specific_info->codecType != kVideoCodecH264) {ReportError();return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;}if ((input_image._frameType != kVideoFrameKey) && isFirstFrame) {LOG_F(LS_WARNING) <<" first Frame must be IDR frame";++discard_cnt_;return WEBRTC_VIDEO_CODEC_ERROR;}if (input_image._frameType == kVideoFrameKey) {++idr_cnt_;}isFirstFrame = false;#if PRINT_TIME_DECODE_DELAYint64_t h264_decode_start_time = clock_->TimeInMilliseconds();
#endif// FFmpeg requires padding due to some optimized bitstream readers reading 32// or 64 bits at once and could read over the end. See avcodec_decode_video2.RTC_CHECK_GE(input_image._size, input_image._length +EncodedImage::GetBufferPaddingBytes(kVideoCodecH264));// "If the first 23 bits of the additional bytes are not 0, then damaged MPEG// bitstreams could cause overread and segfault." See// AV_INPUT_BUFFER_PADDING_SIZE. We'll zero the entire padding just in case.memset(input_image._buffer + input_image._length,0,EncodedImage::GetBufferPaddingBytes(kVideoCodecH264));AVPacket packet;av_init_packet(&packet);packet.data = input_image._buffer;if (input_image._length >static_cast<size_t>(std::numeric_limits<int>::max())) {ReportError();return WEBRTC_VIDEO_CODEC_ERROR;}packet.size = static_cast<int>(input_image._length);av_context_->reordered_opaque = input_image.ntp_time_ms_ * 1000;  // ms -> usdecode_cnt_++;int frame_decoded = 0;RTC_CHECK(av_frame_.get());int result = avcodec_decode_video2(av_context_.get(),av_frame_.get(),&frame_decoded,&packet);if (result < 0) {LOG(LS_ERROR) << "avcodec_decode_video2 error: " << result;ReportError();return WEBRTC_VIDEO_CODEC_ERROR;}// |result| is number of bytes used, which should be all of them.if (result != packet.size) {LOG(LS_ERROR) << "avcodec_decode_video2 consumed " << result << " bytes ""when " << packet.size << " bytes were expected.";ReportError();return WEBRTC_VIDEO_CODEC_ERROR;}if (!frame_decoded) {LOG(LS_WARNING) << "avcodec_decode_video2 successful but no frame was ""decoded.";return WEBRTC_VIDEO_CODEC_OK;}decoded_cnt_++;
#if PRINT_TIME_DECODE_DELAYint64_t h264_decode_end_time = clock_->TimeInMilliseconds();int64_t h264_decode_use_time = h264_decode_end_time - h264_decode_start_time;LOG(LS_INFO) << "Decode: hardware enable: " << is_hw_ << " use_time_ms:" << h264_decode_use_time;
#endifif (is_hw_) {if (!temp_frame_) {temp_frame_.reset(av_frame_alloc());if (!temp_frame_) {LOG(LS_ERROR) << "Could not allocate video frame";return WEBRTC_VIDEO_CODEC_ERROR;}temp_frame_->format = AV_PIX_FMT_YUV420P; // FIXEDtemp_frame_->width = av_frame_->width;temp_frame_->height = av_frame_->height;int ret = av_frame_get_buffer(temp_frame_.get(), 32);if (ret < 0) {LOG(LS_ERROR) << "Could not allocate the video frame data";return WEBRTC_VIDEO_CODEC_ERROR;}}// Convert NV12 to YUV420int ret = libyuv::NV12ToI420(av_frame_->data[kYPlane], av_frame_->linesize[0],av_frame_->data[kUPlane], av_frame_->linesize[1],temp_frame_->data[kYPlane], av_frame_->linesize[0],temp_frame_->data[kUPlane], av_frame_->linesize[1] / 2,temp_frame_->data[kVPlane], av_frame_->linesize[1] / 2,av_frame_->width, av_frame_->height);LOG(LS_VERBOSE) << "Decoded Video Frame. input_image._length[" << input_image._length<< "], input_image._size[" << input_image._size<< "], decode number[" << decoded_cnt_<< "], timestamp[" << input_image._timeStamp<< "], temp_frame width[" << temp_frame_->width<< "], temp_frame height[" << temp_frame_->height<< "], temp_frame strideY[" << temp_frame_->linesize[0]<< "], temp_frame strideU[" << temp_frame_->linesize[1]<< "], AVFrame width[" << av_frame_->width<< "], AVFrame height[" << av_frame_->height<< "], AVFrame lines[0][" << av_frame_->linesize[0]<< "], AVFrame lines[1][" << av_frame_->linesize[1] << "].";decoded_frame_.CreateEmptyFrame(av_frame_->width, av_frame_->height, av_frame_->width, av_frame_->width/2, av_frame_->width/2);uint8_t *dst_y = decoded_frame_.buffer(kYPlane);uint8_t *src_y = temp_frame_->data[kYPlane];uint8_t *dst_u = decoded_frame_.buffer(kUPlane);uint8_t *src_u = temp_frame_->data[kUPlane];uint8_t *dst_v = decoded_frame_.buffer(kVPlane);uint8_t *src_v = temp_frame_->data[kVPlane];memcpy(dst_y, src_y, av_frame_->width * av_frame_->height);memcpy(dst_u, src_u, av_frame_->width * av_frame_->height/4);memcpy(dst_v, src_v, av_frame_->width * av_frame_->height/4);decoded_frame_.set_timestamp(input_image._timeStamp);decoded_frame_.SetIncomingTimeMs(input_image._incomingTimeMs);    decoded_frame_.SetFrameCnt(decode_cnt_);ret = decoded_image_callback_->Decoded(decoded_frame_);// Stop referencing it, possibly freeing |video_frame|.av_frame_unref(av_frame_.get());if (ret) {LOG(LS_WARNING) << "DecodedImageCallback::Decoded returned " << ret;return ret;}return WEBRTC_VIDEO_CODEC_OK;} // end of is_hw_// Obtain the |video_frame| containing the decoded image.VideoFrame* video_frame = static_cast<VideoFrame*>(av_buffer_get_opaque(av_frame_->buf[0]));RTC_DCHECK(video_frame);RTC_CHECK_EQ(av_frame_->data[kYPlane], video_frame->buffer(kYPlane));RTC_CHECK_EQ(av_frame_->data[kUPlane], video_frame->buffer(kUPlane));RTC_CHECK_EQ(av_frame_->data[kVPlane], video_frame->buffer(kVPlane));video_frame->set_timestamp(input_image._timeStamp);video_frame->SetIncomingTimeMs(input_image._incomingTimeMs);LOG(LS_VERBOSE) << "Decoded Video Frame. input_image._length[" << input_image._length<< "], input_image._size[" << input_image._size<< "], decode number[" << decode_cnt_<< "], timestamp[" << input_image._timeStamp<< "], pointer[" << (void*)(video_frame->video_frame_buffer()->DataY())<< "],video frame width[" << video_frame->width()<< "],video frame height[" << video_frame->height()<< "],video frame strideY[" << video_frame->stride(kYPlane)<< "],video frame strideU[" << video_frame->stride(kUPlane)<< "],AVFrame width[" << av_frame_->width<< "],AVFrame height[" << av_frame_->height<< "],AVFrame lines[0][" << av_frame_->linesize[0]<< "],AVFrame lines[1][" << av_frame_->linesize[1] << "].";int32_t ret = 0;// The decoded image may be larger than what is supposed to be visible, see// |AVGetBuffer2|'s use of |avcodec_align_dimensions|. This crops the image// without copying the underlying buffer.rtc::scoped_refptr<VideoFrameBuffer> buf = video_frame->video_frame_buffer();if((av_frame_->width != buf->width()) || (av_frame_->height != buf->height())) {decoded_frame_.CreateEmptyFrame(av_frame_->width, av_frame_->height, av_frame_->width, av_frame_->width/2, av_frame_->width/2);uint8_t *dst_y = decoded_frame_.buffer(kYPlane);uint8_t *src_y = const_cast<uint8_t*>(video_frame->video_frame_buffer()->DataY());uint8_t *dst_u = decoded_frame_.buffer(kUPlane);uint8_t *src_u = const_cast<uint8_t*>(video_frame->video_frame_buffer()->DataU());uint8_t *dst_v = decoded_frame_.buffer(kVPlane);uint8_t *src_v = const_cast<uint8_t*>(video_frame->video_frame_buffer()->DataV());if(av_frame_->width == buf->width()) {memcpy(dst_y, src_y, av_frame_->width * av_frame_->height);memcpy(dst_u, src_u, av_frame_->width * av_frame_->height/4);memcpy(dst_v, src_v, av_frame_->width * av_frame_->height/4);} else {for(int i = 0; i < av_frame_->height; i++){memcpy(dst_y, src_y, av_frame_->width);dst_y += av_frame_->width;src_y += buf->width();}for(int i = 0; i < av_frame_->height/2; i++){memcpy(dst_u, src_u, av_frame_->width/2);dst_u += av_frame_->width/2;src_u += buf->width()/2;}for(int i = 0; i < av_frame_->height/2; i++){memcpy(dst_v, src_v, av_frame_->width/2);dst_v += av_frame_->width/2;src_v += buf->width()/2;}}decoded_frame_.set_timestamp(input_image._timeStamp);decoded_frame_.SetIncomingTimeMs(input_image._incomingTimeMs);    decoded_frame_.SetFrameCnt(decode_cnt_);ret = decoded_image_callback_->Decoded(decoded_frame_);} else {//now not reach hereLOG(LS_ERROR) << "reach error area";video_frame->SetFrameCnt(decode_cnt_);ret = decoded_image_callback_->Decoded(*video_frame);}// Stop referencing it, possibly freeing |video_frame|.av_frame_unref(av_frame_.get());video_frame = nullptr;if (ret) {LOG(LS_WARNING) << "DecodedImageCallback::Decoded returned " << ret;return ret;}return WEBRTC_VIDEO_CODEC_OK;
}bool H264DecoderImplFfmpeg::IsInitialized() const {return av_context_ != nullptr;
}void H264DecoderImplFfmpeg::ReportInit() {if (has_reported_init_)return;RTC_HISTOGRAM_ENUMERATION("WebRTC.Video.H264DecoderImpl.Event",kH264DecoderEventInit,kH264DecoderEventMax);has_reported_init_ = true;
}void H264DecoderImplFfmpeg::ReportError() {if (has_reported_error_)return;RTC_HISTOGRAM_ENUMERATION("WebRTC.Video.H264DecoderImpl.Event",kH264DecoderEventError,kH264DecoderEventMax);has_reported_error_ = true;
}}  // namespace webrtc

3.问题分析和总结

qsv硬解码,解码器中会缓存2帧视频,按照fps=15算的话,一帧60ms,2帧的话会延迟120ms左右。用在播放器中可以,用在RTC中会导致时延变大,也可能有对应的优化参数,目前还没找到。

WebRTC-集成qsv硬解码实现相关推荐

  1. ffmpeg4教程12:intel media sdk(qsv)硬解码的使用方法+qt5 openggl显示AV_PIX_FMT_NV12

    https://blog.csdn.net/java_lilin/article/details/86527478 讨论群261074724 1.安装intel media sdk 请对于处理器的版本 ...

  2. FFmpeg 开启QSV硬解加速

    简介 QSV 全称:Quick Sync Video Acceleratio ,是Intel媒体开发库(The Intel® Media Software Development Kit)提供了一个对 ...

  3. Ubuntu18.04 编译FFmpeg 支持 QSV 硬编解码

    在Linux下,由于FFmpeg软解码CPU占用过高,所以打算使用h264_qsv硬解码,本文并非原创,主要参考以下文章,在此记录下编译过程,以便日后使用. 参考文章: 视频和视频帧:FFMPEG+I ...

  4. FFmpeg的软、硬解码方式梳理

    背景 项目中使用QT开发监控软件,集成海康.宇视.大华.华迈.以及网络流设备.品牌设备使用SDK控制,网络流设备自己使用FFmpeg库来解决.网络流设备如果同时解码多路播放,会出现CPU占用较高.操作 ...

  5. FFmpeg之硬解码

    导读 前面我们已经使用NDK编译出了FFmpeg并且已经集成到了Android Studio中去,相关文章:NDK21编译ffmpeg5.0.1 众所周知,软解码虽然兼容性一流,但是却非常依赖CPU, ...

  6. “硬解码”与“软解码”的区别

    关于"硬解码"与"软解码" 忧蓝 发布于: 2010-08-02 11:03 由于高清视频的分辨率远远高于一般格式视频,使得高清视频的码率非常高.再加上VC-1 ...

  7. 关于“硬解码”与“软解码”

    由于高清视频的分辨率远远高于一般格式视频,使得高清视频的码率非常高.再加上VC-1和H.264编码的压缩率很高,解码运算的运算量很大.因此常规地直接用CPU解码(即常说的"软解") ...

  8. 高清视频相关知识和、KMPlayer 硬解码(DXVA)设置、Z520+US15W+GMA500硬解码测试

    "高清",就是高清晰度,是相对于"标清"(即标准清晰度)而言的,主要是指高清晰度的视频媒体,高清技术的发展,越来越多的人接触到高清,现在网络上是铺天盖地的高清视 ...

  9. 【Android 音视频开发-音视频硬解码篇】1.音视频基础知识

    这是一个入门系列,涉及的知识也仅限于够用. 最后,写文章过程中,会借鉴参考其他人分享的文章,会在文章最后列出,感谢这些作者的分享. 本文你可以了解到 作为开篇的文章,我们先来看看音视频由什么构成的,以 ...

最新文章

  1. oracle用户创建及权限设置
  2. 第七周实践项目2.3 顺序环形队列
  3. reportInterruptAfterWait
  4. 《HTML、CSS、JavaScript 网页制作从入门到精通》——第6章 使用表格
  5. html5 百度地图api文档,开发指南--百度地图JavaScript API大众版.doc
  6. LInux 字符设备驱动程序
  7. 1502: [NOI2005]月下柠檬树 - BZOJ
  8. java中函数过载,Java继承中成员方法的overload(重载/过载)
  9. SiteServer 迁移至 Windows 2008 R2 问题汇总
  10. 1080. MOOC期终成绩 (25)-PAT乙级真题
  11. LINUX编译spandsp
  12. 传奇服务器玩家信息备份,传奇:史上5大漏洞,损失惨重,盛大被迫将服务器回档2天...
  13. 三角函数的思维导图(上)
  14. smart原则_写给中学生:用SMART原则制定寒假计划
  15. C#数据结构与算法 培训视频
  16. java 支付宝 验证签名失败,支付宝支付错误返回ILLEGAL_SIGN,签名验证错误
  17. mac虚拟机搭建设置静态ip
  18. 李沐论文讲解笔记 之 Transformer
  19. 某课的flask视频 速取
  20. 虚拟机安装linux黑屏一个光标,虚拟机启动后黑屏只剩一个光标

热门文章

  1. delta3d中,读取自己的xml配置文件。
  2. JSK-136 公式计算【入门】
  3. Bailian3247 回文素数【素数+回文】(POJ NOI0113-11)
  4. CCF NOI1046 打印方阵
  5. 数组(有序数组)的公共部分
  6. Python 数据结构与算法——tree(树)
  7. 算法——从旋转字符串到翻转单词
  8. vs2012 使用mysql_vs2012连接mysql
  9. python如何读取csv文件列表页_Python:使用列表列表读取CSV文件的字段
  10. python安装教程-PyCharm 安装教程(Windows)