WebM由Google提出,是一个开放、免费的媒体文件格式。其中Google将其拥有的VP8视频编码技术以类似BSD授权开源。WebM VP8实现了完全的免费开源与授权开放,并且,经过Google持续性的技术优化,其解码速度与开发工具显著增强,在压缩效率和性能方面的表现较发布初期显著提升。在Qt中的具体实现方法是继承QAbstractVideoSurface,然后在类Encoding中创建编码线程,最终通过信号与槽来进行数据传递。ffmpeg库自己网上搜索下载,内部包含有vp8编解码器。以下代码能实现vp8的初始化配置以及视频编码的具体过程。整个工程需要的私信我~
具体代码------cameravideosurface.h文件
#include <QAbstractVideoSurface>
#include <QTemporaryFile>
#include <QThread>
#include <QDebug>
#include <QTime>#define BIT_RATE 8000000
#define PTS_TIMES 40                     //Video of 25fps looks better in VLC, 25fps=40msextern "C"                               //FFmpeg libary
{
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libavdevice/avdevice.h>
#include <libavutil/avutil.h>
#include "libavutil/imgutils.h"
#include "libavutil/opt.h"
}class Encoding : public QObject
{Q_OBJECTpublic :Encoding(AVFormatContext *p_AVFormatContext, AVCodecContext *p_AVCodecContext, AVFrame *p_AVFrame, AVPacket *p_AVPacket, struct SwsContext *p_image_convert_ctx);~Encoding();public slots:void slot_encodeImage(QImage m_image, bool m_isFileClosed);private:AVFormatContext *m_AVFormatContext =  nullptr;AVCodecContext *m_AVCodecContext = nullptr;AVFrame *m_AVFrame = nullptr;AVPacket *m_AVPacket = nullptr;struct SwsContext *image_convert_ctx = nullptr;int frame_index = 0;
};class CameraVideoSurface : public QAbstractVideoSurface
{Q_OBJECTpublic:CameraVideoSurface(int width = 1280, int heigth = 720, int frameRate = 30, bool image_reverse = false);~CameraVideoSurface();void fileFinished(QString correctFileName = "No_name");bool present(const QVideoFrame &frame);QList<QVideoFrame::PixelFormat> supportedPixelFormats(QAbstractVideoBuffer::HandleType type = QAbstractVideoBuffer::NoHandle) const;void TriggerSignalTrue();signals:void signal_showFrame(QImage image);void signal_encodeImage(QImage m_image, bool isFileClosed = false);private:QImage m_image;AVFormatContext *m_AVFormatContext;AVStream *m_AVStream;AVCodec *m_AVCodec;AVCodecContext *m_AVCodecContext;AVFrame *m_AVFrame;AVPacket *m_AVPacket;QTemporaryFile *pTempFile;struct SwsContext *image_convert_ctx = nullptr;bool m_triggerSignal = false;bool isFileClosed = false;int ret = 0;bool m_image_reverse = false;Encoding *m_encoding = nullptr;QThread workerThread;
};
具体代码------cameravideosurface.cpp文件
CameraVideoSurface::CameraVideoSurface(int width, int height, int frameRate, bool image_reverse) : QAbstractVideoSurface()
{av_register_all();                                                  //Register all codecs in FFmpegm_image_reverse = image_reverse;pTempFile = new QTemporaryFile;pTempFile->open();QString outputFile = pTempFile->fileName();ret =  avformat_alloc_output_context2(&m_AVFormatContext, nullptr, "webm", outputFile.toLocal8Bit().data());  //Allocate an AVFormatContext for Webm.if (ret != 0)return;m_AVCodec = avcodec_find_encoder(AV_CODEC_ID_VP8);                  //Find VP8 encoderif(m_AVCodec == nullptr)return;m_AVCodecContext = avcodec_alloc_context3(m_AVCodec);               //Allocate an AVCodecContext for VP8if(m_AVCodecContext == nullptr)return;m_AVCodecContext->bit_rate = BIT_RATE;                              //Encoding bitrate, affect the clarity of videom_AVCodecContext->width = width;                                    //frame width, height set by outsidem_AVCodecContext->height = height;m_AVCodecContext->frame_number = 1;                                 //do not changem_AVCodecContext->time_base.num = 1;                                //do not changem_AVCodecContext->time_base.den = frameRate;                        //set by outside(in seconds), internal timebasem_AVCodecContext->gop_size = 0;                                     //Group of picture, Interval between two I_frames, do not change otherwise you can not seek frame correctly.m_AVCodecContext->pix_fmt = AV_PIX_FMT_YUV420P;                     //do not changem_AVCodecContext->max_b_frames = 0;                                 //Set B_frame = 0, do not changeif (m_AVFormatContext->oformat->flags & AVFMT_GLOBALHEADER)         //Some formats want stream headers to be separated.m_AVCodecContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;av_opt_set(m_AVCodecContext->priv_data, "quality", "realtime", 0);  //VP8 must be setAVDictionary *param = nullptr;if(avcodec_open2(m_AVCodecContext, m_AVCodec, &param) < 0)          //Open encoderreturn;m_AVStream = avformat_new_stream(m_AVFormatContext, m_AVCodec);     //Create output streamif (m_AVStream == nullptr)return;m_AVStream->time_base = m_AVCodecContext->time_base;                // 1/frameRate  eg: 30frames per secondsm_AVStream->codec = m_AVCodecContext;if(avio_open(&m_AVFormatContext->pb, outputFile.toStdString().c_str(), AVIO_FLAG_READ_WRITE) < 0 )      //Open output filereturn;if(avformat_write_header(m_AVFormatContext, nullptr) < 0)           //Write header of package formatreturn;m_AVFrame = av_frame_alloc();m_AVFrame->width  = m_AVCodecContext->width;m_AVFrame->height = m_AVCodecContext->height;m_AVFrame->format = m_AVCodecContext->pix_fmt;m_AVFrame->pts = 0;m_AVFrame->format = AV_PIX_FMT_YUV420P;av_image_alloc(m_AVFrame->data, m_AVFrame->linesize, m_AVFrame->width, m_AVFrame->height, m_AVCodecContext->pix_fmt, 32);image_convert_ctx = sws_getContext(m_AVCodecContext->width, m_AVCodecContext->height, AV_PIX_FMT_RGB32,  //Data source of input imagem_AVCodecContext->width, m_AVCodecContext->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, nullptr, nullptr, nullptr);   //Data source of output imageif(image_convert_ctx == nullptr)return;m_AVPacket = av_packet_alloc();av_init_packet(m_AVPacket);if( m_encoding != nullptr ){delete m_encoding;m_encoding = nullptr;}m_encoding = new Encoding(m_AVFormatContext, m_AVCodecContext, m_AVFrame, m_AVPacket, image_convert_ctx);m_encoding->moveToThread(&workerThread);connect(this, SIGNAL(signal_encodeImage(QImage, bool)), m_encoding, SLOT(slot_encodeImage(QImage, bool)));workerThread.start();
}QList<QVideoFrame::PixelFormat>CameraVideoSurface::supportedPixelFormats(QAbstractVideoBuffer::HandleType type) const
{Q_UNUSED(type);QList<QVideoFrame::PixelFormat> pixelFormats;pixelFormats<<QVideoFrame::Format_RGB32;                //Add two pixel formats which I usepixelFormats<<QVideoFrame::Format_YUV420P;return pixelFormats;
}bool CameraVideoSurface::present(const QVideoFrame &frame)  //Execute when camera gets one frame. If you set cameraRate 30fps, present() will be executed every 33.33ms
{if(frame.isValid())                                     //Identifies whether a video frame is valid.{QVideoFrame cloneFrame(frame);cloneFrame.map(QAbstractVideoBuffer::ReadOnly);     //Map the frame to memoryQImage m_image(cloneFrame.bits(), cloneFrame.width(), cloneFrame.height(), QVideoFrame::imageFormatFromPixelFormat(frame.pixelFormat()));  //Form a picturem_image = m_image.mirrored(m_image_reverse, true);  //horizontal, verticalif( m_triggerSignal )          //Get the trigger signal from outside or timer{emit signal_encodeImage(m_image, isFileClosed);m_triggerSignal = false;}emit signal_showFrame(m_image);cloneFrame.unmap();return true;}elsereturn false;
}void CameraVideoSurface::fileFinished(QString correctFileName)  //QString correctFileName
{av_write_trailer(m_AVFormatContext);         //Write tail of package formatavio_closep(&m_AVFormatContext->pb);         //Close and free avio_open()if(pTempFile->open()){pTempFile->copy(correctFileName);        //Copy pTempFile's content to the file named correctFileNamedelete pTempFile;pTempFile = nullptr;}isFileClosed = true;                         //File state flag must be set
}CameraVideoSurface::~CameraVideoSurface()
{if(m_encoding != nullptr){workerThread.exit(0);delete m_encoding;m_encoding = nullptr;}avformat_close_input(&m_AVFormatContext);av_frame_free(&m_AVFrame);
}void CameraVideoSurface::TriggerSignalTrue()      //single trigger
{m_triggerSignal = true;
}Encoding::Encoding(AVFormatContext *p_AVFormatContext, AVCodecContext *p_AVCodecContext, AVFrame *p_AVFrame, AVPacket *p_AVPacket, struct SwsContext *p_image_convert_ctx)
{m_AVFormatContext = p_AVFormatContext;m_AVCodecContext = p_AVCodecContext;m_AVFrame = p_AVFrame;m_AVPacket = p_AVPacket;image_convert_ctx = p_image_convert_ctx;
}Encoding::~Encoding()
{m_AVFormatContext = nullptr;m_AVCodecContext = nullptr;m_AVFrame = nullptr;m_AVPacket = nullptr;image_convert_ctx = nullptr;
}void Encoding::slot_encodeImage(QImage m_image, bool isFileClosed)
{const uint8_t *data[AV_NUM_DATA_POINTERS] = {nullptr};data[0] = m_image.constBits();                  //Get image raw dataint linesize[AV_NUM_DATA_POINTERS] = {0};linesize[0] = m_AVCodecContext->width * 4;      //RGB32 occupies 4 bytes, do not changesws_scale(image_convert_ctx, data, linesize, 0, m_AVCodecContext->height, m_AVFrame->data, m_AVFrame->linesize);  //Do not changeframe_index++;m_AVFrame->pts = frame_index * PTS_TIMES;       //Set ptsm_AVPacket->data = nullptr;m_AVPacket->size = 0;QTime timer;timer.start();int ret = avcodec_send_frame(m_AVCodecContext, m_AVFrame);  //codec context, m_AVFrame contains the raw video frame. Supply a raw video frame to the encoder.qDebug() << "Time 1 duration: " << timer.elapsed();QTime m_timer;m_timer.start();while(ret == 0){ret = avcodec_receive_packet(m_AVCodecContext, m_AVPacket);  //codec context, m_AVPacket is the packet buffer of encoder. Read encoded data from the encoder.if( ret == AVERROR(EAGAIN) )break;else if( ret < 0 )break;if( !isFileClosed )        //If the output file has been closed, the last packet can not write into file and errorav_interleaved_write_frame(m_AVFormatContext, m_AVPacket);     //Write a packet to an output fileav_packet_unref(m_AVPacket);       //Wipe the packet}qDebug() << "Time 2 duration: " << m_timer.elapsed();
}

Qt +ffmpeg(vp8) 记录视频每一帧并生成webm文件格式相关推荐

  1. 【Qt+FFmpeg】给视频添加时间水印

    ffmpeg编解码中,给本地视频加上时间水印,并保存到本地,使用到的技术是ffmpeg中的avfilter库: 具体效果如下 yuv: mp4 本方法不适合摄像头解码,解码出来糊得不行,本地视频的话会 ...

  2. QT+FFmpeg播放音视频,且支持音视频同步。

    大概框架: 线程1:ffmpeg视频解码. 线程2:ffmpeg音频解码. 线程3:播放每一帧音频,使用QAudioOutput配合QIODevice来实现. 主线程:绘制每一帧图片,使用QOpenG ...

  3. FFmpeg —— 对mp4视频按时间剪切,生成新的mp4(附源码)

    效果     代码      注:下面qDebug()为Qt输出,使用时删除即可. char iPath[] = "../mp4.mp4";char oPath[] = " ...

  4. 利用ffmpeg将mp4视频转换成多张jpg图片

    分离视频音频流 ffmpeg -i input_file -vcodec copy -an output_file_video //分离视频流 ffmpeg -i input_file -acodec ...

  5. 查看视频的IPB帧,IPB帧编码顺序,视频的第一帧图片

    > 如何查看视频的IPB帧 Android, 如何获取视频的每一帧的信息 视频 = 图片.图像(摄像头) + 声音(麦克风) : 谷歌官方给我们的提供的api接口类:MediaMetadataR ...

  6. 假期之不务正业—— Qt+FFmpeg+百度api进行视频的语音识别

    假期之不务正业--Qt+FFmpeg+百度api进行视频的语音识别 一.前言 二.FFmpeg进行音频提取和重采样 三.对音频分段 四.百度api调用 五.Qt编程的一些补充 六.结语 一.前言 现在 ...

  7. 【Qt+FFmpeg】鼠标滚轮放大、缩小、移动——解码播放本地视频(三)

    上一期我们实现了播放.暂停.重播.倍速功能,这期来谈谈如何实现鼠标滚轮放大缩小和移动:如果还没看过上期,请移步 [Qt+FFmpeg]解码播放本地视频(一)_logani的博客-CSDN博客[Qt+F ...

  8. QT界面中实现视频帧显示的多种方法及应用

    QT界面中实现视频帧显示的多种方法及应用 (一) 引言 1.1 视频帧在QT界面中的应用场景 1.2 不同方法的性能和适用性分析 1.2.1 使用QLabel和QPixmap 1.2.2 使用QPai ...

  9. 基于FFMPEG的音视频截取(C++Qt 版)

    基于FFMPEG的音视频截取(C++Qt 版) 这篇博客是基于上篇博客的: https://blog.csdn.net/liyuanbhu/article/details/121744275 上篇博客 ...

  10. android基于ffmpeg的简单视频播发器 跳到指定帧 av_seek_frame()

    跳到指定帧,在ffmpeg使用av_seek_frame()进行跳转,这个函数只能跳到关键帧,所以对关键帧时间差距比较大的视频很尴尬,总是不能调到想要的画面 还有av_seek_frame中的时间参数 ...

最新文章

  1. 摘自ubantuer-Linux防火墙iptables学习笔记(三)iptables命令详解和举例
  2. Web服务器性能/压力测试工具http_load、webbench、ab、Siege使用教程
  3. 禁止北京地区IP访问站点
  4. 前端学习(179):表单元素
  5. Python合并Excel2007+中多个WorkSheet
  6. mysql配置参数优化提示
  7. 计算机扩容硬盘,扩容盘是什么 如何恢复真实容量【方法详解】
  8. C#——NPOI对Excel的操作、导入导出时异常处理(二)
  9. error: invalid operands to binary == (have ‘uid_t’ {aka ‘unsigned int’} and ‘kuid_t’
  10. 机器学习入门09 - 特征组合 (Feature Crosses)
  11. python粒子特效_初试PyOpenGL四 (Python+OpenGL)GPU粒子系统与基本碰撞
  12. 软考试题合格率如何?
  13. 37岁程序员被裁,120天没找到工作,无奈去小公司,结果蒙了
  14. android bmob上传图片,Bmob+Android+ECharts 实现移动端数据上传与图表展示
  15. Java实现AD域登录认证
  16. 使用python(pydicom)读取Dicom文件并且转换成png
  17. layer icon图标汇总
  18. nboot,eboot和uboot的区别
  19. 小米扫地机器人充电座指示灯不亮_扫地机器人常见问题及故障排除
  20. 关于筹码理论的一些知识

热门文章

  1. OpenCV静态编译配置基于小熊猫C++
  2. Sqlmap使用教程【超全】
  3. 论文公式居中编号右对齐方式
  4. Win10系统中破解软件的注册机被自动删除的解决方法
  5. 驱动板LVDS输出接口(发送器),液晶面板LVDS输入接口(接收器)
  6. 抖音在线无水印解析PHP源码
  7. unity相机自由移动
  8. 微信小程序 有赞UI关于Tab 标签页的坑
  9. nodejs爬虫抓取搜狗微信文章详解
  10. 好用的PDF编辑软件有哪些?这几款工具建议收藏