运用ffmpeg生成MP4文件
生成的MP4文件,用播放器看见的视频如下,同时你可以听到一种呼叫的音频声音
生成的MP4文件音频为(aac)、视频为H264
由于生成的H264需要依赖x264开源库【因为需要将普通的原始数据编码成H264,而ffmpeg默认是按x264进行编码】,在生产x264库过程中,一般我们默认生成静态库即可。
编译ffmpeg并安装
代码如下:
- #include <stdlib.h>
- #include <stdio.h>
- #include <string.h>
- #include <math.h>
- #include "libavutil/mathematics.h"
- #include "libavformat/avformat.h"
- #include "libswscale/swscale.h"
- #undef exit
- /* 5 seconds stream duration */
- #define STREAM_DURATION 200.0
- #define STREAM_FRAME_RATE 25 /* 25 images/s */
- #define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
- #define STREAM_PIX_FMT PIX_FMT_YUV420P /* default pix_fmt */
- static int sws_flags = SWS_BICUBIC;
- /**************************************************************/
- /* audio output */
- static float t, tincr, tincr2;
- static int16_t *samples;
- static uint8_t *audio_outbuf;
- static int audio_outbuf_size;
- static int audio_input_frame_size;
- /*
- * add an audio output stream
- */
- static AVStream *add_audio_stream(AVFormatContext *oc, enum CodecID codec_id)
- {
- AVCodecContext *c;
- AVStream *st;
- st = avformat_new_stream(oc, NULL);
- if (!st) {
- fprintf(stderr, "Could not alloc stream\n");
- exit(1);
- }
- st->id = 1;
- c = st->codec;
- c->codec_id = codec_id;
- c->codec_type = AVMEDIA_TYPE_AUDIO;
- /* put sample parameters */
- c->sample_fmt = AV_SAMPLE_FMT_S16;
- c->bit_rate = 64000;
- c->sample_rate = 44100;
- c->channels = 2;
- // some formats want stream headers to be separate
- if (oc->oformat->flags & AVFMT_GLOBALHEADER)
- c->flags |= CODEC_FLAG_GLOBAL_HEADER;
- return st;
- }
- static void open_audio(AVFormatContext *oc, AVStream *st)
- {
- AVCodecContext *c;
- AVCodec *codec;
- c = st->codec;
- /* find the audio encoder */
- codec = avcodec_find_encoder(c->codec_id);
- if (!codec) {
- fprintf(stderr, "codec not found\n");
- exit(1);
- }
- /* open it */
- if (avcodec_open(c, codec) < 0) {
- fprintf(stderr, "could not open codec\n");
- exit(1);
- }
- /* init signal generator */
- t = 0;
- tincr = 2 * M_PI * 110.0 / c->sample_rate;
- /* increment frequency by 110 Hz per second */
- tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
- audio_outbuf_size = 10000;
- audio_outbuf = av_malloc(audio_outbuf_size);
- if (c->frame_size <= 1) {
- audio_input_frame_size = audio_outbuf_size / c->channels;
- switch(st->codec->codec_id) {
- case CODEC_ID_PCM_S16LE:
- case CODEC_ID_PCM_S16BE:
- case CODEC_ID_PCM_U16LE:
- case CODEC_ID_PCM_U16BE:
- audio_input_frame_size >>= 1;
- break;
- default:
- break;
- }
- } else {
- audio_input_frame_size = c->frame_size;
- }
- samples = av_malloc(audio_input_frame_size * 2 * c->channels);
- }
- static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels)
- {
- int j, i, v;
- int16_t *q;
- q = samples;
- for (j = 0; j < frame_size; j++) {
- v = (int)(sin(t) * 10000);
- for(i = 0; i < nb_channels; i++)
- *q++ = v;
- t += tincr;
- tincr += tincr2;
- }
- }
- static void write_audio_frame(AVFormatContext *oc, AVStream *st)
- {
- AVCodecContext *c;
- AVPacket pkt;
- av_init_packet(&pkt);
- c = st->codec;
- get_audio_frame(samples, audio_input_frame_size, c->channels);
- pkt.size = avcodec_encode_audio(c, audio_outbuf, audio_outbuf_size, samples);
- if (c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE)
- pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
- pkt.flags |= AV_PKT_FLAG_KEY;
- pkt.stream_index = st->index;
- pkt.data = audio_outbuf;
- /* write the compressed frame in the media file */
- if (av_interleaved_write_frame(oc, &pkt) != 0) {
- fprintf(stderr, "Error while writing audio frame\n");
- exit(1);
- }
- }
- static void close_audio(AVFormatContext *oc, AVStream *st)
- {
- avcodec_close(st->codec);
- av_free(samples);
- av_free(audio_outbuf);
- }
- /**************************************************************/
- /* video output */
- static AVFrame *picture, *tmp_picture;
- static uint8_t *video_outbuf;
- static int frame_count, video_outbuf_size;
- /* add a video output stream */
- static AVStream *add_video_stream(AVFormatContext *oc, enum CodecID codec_id)
- {
- AVCodecContext *c;
- AVStream *st;
- AVCodec *codec;
- st = avformat_new_stream(oc, NULL);
- if (!st) {
- fprintf(stderr, "Could not alloc stream\n");
- exit(1);
- }
- c = st->codec;
- /* find the video encoder */
- codec = avcodec_find_encoder(codec_id);
- if (!codec) {
- fprintf(stderr, "codec not found\n");
- exit(1);
- }
- avcodec_get_context_defaults3(c, codec);
- c->codec_id = codec_id;
- /* put sample parameters */
- c->bit_rate = 400000;
- /* resolution must be a multiple of two */
- c->width = 352;
- c->height = 288;
- /* time base: this is the fundamental unit of time (in seconds) in terms
- of which frame timestamps are represented. for fixed-fps content,
- timebase should be 1/framerate and timestamp increments should be
- identically 1. */
- c->time_base.den = STREAM_FRAME_RATE;
- c->time_base.num = 1;
- c->gop_size = 12; /* emit one intra frame every twelve frames at most */
- c->pix_fmt = STREAM_PIX_FMT;
- if (c->codec_id == CODEC_ID_MPEG2VIDEO) {
- /* just for testing, we also add B frames */
- c->max_b_frames = 2;
- }
- if (c->codec_id == CODEC_ID_MPEG1VIDEO){
- /* Needed to avoid using macroblocks in which some coeffs overflow.
- This does not happen with normal video, it just happens here as
- the motion of the chroma plane does not match the luma plane. */
- c->mb_decision=2;
- }
- // some formats want stream headers to be separate
- if (oc->oformat->flags & AVFMT_GLOBALHEADER)
- c->flags |= CODEC_FLAG_GLOBAL_HEADER;
- return st;
- }
- static AVFrame *alloc_picture(enum PixelFormat pix_fmt, int width, int height)
- {
- AVFrame *picture;
- uint8_t *picture_buf;
- int size;
- picture = avcodec_alloc_frame();
- if (!picture)
- return NULL;
- size = avpicture_get_size(pix_fmt, width, height);
- picture_buf = av_malloc(size);
- if (!picture_buf) {
- av_free(picture);
- return NULL;
- }
- avpicture_fill((AVPicture *)picture, picture_buf,
- pix_fmt, width, height);
- return picture;
- }
- static void open_video(AVFormatContext *oc, AVStream *st)
- {
- AVCodec *codec;
- AVCodecContext *c;
- c = st->codec;
- /* find the video encoder */
- codec = avcodec_find_encoder(c->codec_id);
- if (!codec) {
- fprintf(stderr, "codec not found\n");
- exit(1);
- }
- /* open the codec */
- if (avcodec_open(c, codec) < 0) {
- fprintf(stderr, "could not open codec\n");
- exit(1);
- }
- video_outbuf = NULL;
- if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) {
- video_outbuf_size = 200000;
- video_outbuf = av_malloc(video_outbuf_size);
- }
- /* allocate the encoded raw picture */
- picture = alloc_picture(c->pix_fmt, c->width, c->height);
- if (!picture) {
- fprintf(stderr, "Could not allocate picture\n");
- exit(1);
- }
- /* if the output format is not YUV420P, then a temporary YUV420P
- picture is needed too. It is then converted to the required
- output format */
- tmp_picture = NULL;
- if (c->pix_fmt != PIX_FMT_YUV420P) {
- tmp_picture = alloc_picture(PIX_FMT_YUV420P, c->width, c->height);
- if (!tmp_picture) {
- fprintf(stderr, "Could not allocate temporary picture\n");
- exit(1);
- }
- }
- }
- /* prepare a dummy image */
- static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height)
- {
- int x, y, i;
- i = frame_index;
- /* Y */
- for (y = 0; y < height; y++) {
- for (x = 0; x < width; x++) {
- pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
- }
- }
- /* Cb and Cr */
- for (y = 0; y < height/2; y++) {
- for (x = 0; x < width/2; x++) {
- pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
- pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
- }
- }
- }
- static void write_video_frame(AVFormatContext *oc, AVStream *st)
- {
- int out_size, ret;
- AVCodecContext *c;
- static struct SwsContext *img_convert_ctx;
- c = st->codec;
- if (frame_count >= STREAM_NB_FRAMES) {
- /* no more frame to compress. The codec has a latency of a few
- frames if using B frames, so we get the last frames by
- passing the same picture again */
- } else {
- if (c->pix_fmt != PIX_FMT_YUV420P) {
- /* as we only generate a YUV420P picture, we must convert it
- to the codec pixel format if needed */
- if (img_convert_ctx == NULL) {
- img_convert_ctx = sws_getContext(c->width, c->height,
- PIX_FMT_YUV420P,
- c->width, c->height,
- c->pix_fmt,
- sws_flags, NULL, NULL, NULL);
- if (img_convert_ctx == NULL) {
- fprintf(stderr, "Cannot initialize the conversion context\n");
- exit(1);
- }
- }
- fill_yuv_image(tmp_picture, frame_count, c->width, c->height);
- sws_scale(img_convert_ctx, tmp_picture->data, tmp_picture->linesize,
- 0, c->height, picture->data, picture->linesize);
- } else {
- fill_yuv_image(picture, frame_count, c->width, c->height);
- }
- }
- if (oc->oformat->flags & AVFMT_RAWPICTURE) {
- /* raw video case. The API will change slightly in the near
- future for that. */
- AVPacket pkt;
- av_init_packet(&pkt);
- pkt.flags |= AV_PKT_FLAG_KEY;
- pkt.stream_index = st->index;
- pkt.data = (uint8_t *)picture;
- pkt.size = sizeof(AVPicture);
- ret = av_interleaved_write_frame(oc, &pkt);
- } else {
- /* encode the image */
- out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, picture);
- /* if zero size, it means the image was buffered */
- if (out_size > 0) {
- AVPacket pkt;
- av_init_packet(&pkt);
- if (c->coded_frame->pts != AV_NOPTS_VALUE)
- pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
- if(c->coded_frame->key_frame)
- pkt.flags |= AV_PKT_FLAG_KEY;
- pkt.stream_index = st->index;
- pkt.data = video_outbuf;
- pkt.size = out_size;
- /* write the compressed frame in the media file */
- ret = av_interleaved_write_frame(oc, &pkt);
- } else {
- ret = 0;
- }
- }
- if (ret != 0) {
- fprintf(stderr, "Error while writing video frame\n");
- exit(1);
- }
- frame_count++;
- }
- static void close_video(AVFormatContext *oc, AVStream *st)
- {
- avcodec_close(st->codec);
- av_free(picture->data[0]);
- av_free(picture);
- if (tmp_picture) {
- av_free(tmp_picture->data[0]);
- av_free(tmp_picture);
- }
- av_free(video_outbuf);
- }
- /**************************************************************/
- /* media file output */
- int main(int argc, char **argv)
- {
- const char *filename;
- AVOutputFormat *fmt;
- AVFormatContext *oc;
- AVStream *audio_st, *video_st;
- double audio_pts, video_pts;
- int i;
- /* initialize libavcodec, and register all codecs and formats */
- av_register_all();
- if (argc != 2) {
- printf("usage: %s output_file\n"
- "API example program to output a media file with libavformat.\n"
- "The output format is automatically guessed according to the file extension.\n"
- "Raw images can also be output by using '%%d' in the filename\n"
- "\n", argv[0]);
- return 1;
- }
- filename = argv[1];
- /* allocate the output media context */
- avformat_alloc_output_context2(&oc, NULL, NULL, filename);
- if (!oc) {
- printf("Could not deduce output format from file extension: using MPEG.\n");
- avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
- }
- if (!oc) {
- return 1;
- }
- fmt = oc->oformat;
- /* add the audio and video streams using the default format codecs
- and initialize the codecs */
- video_st = NULL;
- audio_st = NULL;
- if (fmt->video_codec != CODEC_ID_NONE) {
- video_st = add_video_stream(oc, fmt->video_codec);
- }
- if (fmt->audio_codec != CODEC_ID_NONE) {
- audio_st = add_audio_stream(oc, fmt->audio_codec);
- }
- av_dump_format(oc, 0, filename, 1);
- /* now that all the parameters are set, we can open the audio and
- video codecs and allocate the necessary encode buffers */
- if (video_st)
- open_video(oc, video_st);
- if (audio_st)
- open_audio(oc, audio_st);
- /* open the output file, if needed */
- if (!(fmt->flags & AVFMT_NOFILE)) {
- if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) {
- fprintf(stderr, "Could not open '%s'\n", filename);
- return 1;
- }
- }
- /* write the stream header, if any */
- av_write_header(oc);
- picture->pts = 0;
- for(;;) {
- /* compute current audio and video time */
- if (audio_st)
- audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
- else
- audio_pts = 0.0;
- if (video_st)
- video_pts = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den;
- else
- video_pts = 0.0;
- if ((!audio_st || audio_pts >= STREAM_DURATION) &&
- (!video_st || video_pts >= STREAM_DURATION))
- break;
- /* write interleaved audio and video frames */
- if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {
- write_audio_frame(oc, audio_st);
- } else {
- write_video_frame(oc, video_st);
- picture->pts++;
- }
- }
- av_write_trailer(oc);
- /* close each codec */
- if (video_st)
- close_video(oc, video_st);
- if (audio_st)
- close_audio(oc, audio_st);
- /* free the streams */
- for(i = 0; i < oc->nb_streams; i++) {
- av_freep(&oc->streams[i]->codec);
- av_freep(&oc->streams[i]);
- }
- if (!(fmt->flags & AVFMT_NOFILE)) {
- /* close the output file */
- avio_close(oc->pb);
- }
- /* free the stream */
- av_free(oc);
- return 0;
- }
参考:http://www.ylmf.net/ubuntu/tips/2010122919090_3.html
http://hi.baidu.com/ccqi0000/blog/item/d39fb21f1d5157a84aedbcb1.html
运用ffmpeg生成MP4文件相关推荐
- 解决ffmpeg生成mp4文件不能正常预览的问题
框架的使用极大的提高了开发效率,让我们能够有更多的精力去关注系统的整体架构和业务需求,而不需要过多的关注一些具体的实现细节,但任何事情都有两面性,如果某个细节出了问题,由于对具体的实现不了解,那么会给 ...
- 利用FFmpeg转码生成MP4文件
利用FFmpeg转码生成MP4文件 2017年06月24日 14:42:53 阅读数:2401 项目中,需要把一路音频流及一路视频流分别转码,生成指定格式(MP4)文件.在使用ffmpeg转码生成mp ...
- FFMpeg写MP4文件例子分析
这段时间看了FFMpeg提供的例子muxing.c,我略微修改了下源代码,使其生成一个MP4文件,音频使用AAC编码,视频使用H.264编码.代码很简单,我就不做说明了,代码如下.以后我们继续写如何将 ...
- 转: FFMpeg 封装MP4 文件
FFmpeg 封装MP4文件的一个例子 项目中用到FFmpeg 将IP摄像头的视频流的保存MP4文件的.之前的大哥将它先存成了H264文件,然后又调用FFMpeg的命令行去实现转码为MP4.感觉 ...
- 服务器php将视频转为m3u8,php应用ffmpeg对mp4文件转换并生成m3u8视频流文件
生成m3u8视频流,如果没有做视频流文件,播放视频的时候,一个大视频直接加载,会增加服务器巨大压力,并且视频太大加载慢,显示视频也慢,都不利于观看.那么视频流的好处是,将大视频分成一小段一小段的ts分 ...
- 使用ffmpeg从mp4文件中提取视频流到h264文件中
注释: -i 2018.mp4: 是输入的MP4文件 -codec copy: 从mp4中拷贝 -bsf: h264_mp4toannexb: 从mp4拷贝到annexB封装 -f h264: 采用 ...
- 照做就完事了:Mac下编译ffmpeg生成so文件
下载ffmpeg源码 下载地址:www.ffmpeg.org/download.ht- 下载NDK 下载地址:user-gold-cdn.xitu.io/2019/4/10/1- 编写Android脚 ...
- 安卓采集摄像头画面生成MP4文件
主要用的安卓类有MediaCodec和MediaMuxer,MediaCodec负责视频数据编解码,MediaMuxer负责将编码后的数据封装成MP4文件,采集摄像头用的是camera,并且用surf ...
- windows使用ffmpeg将mp4文件转变成h264视频文件
我是直接使用ffmpeg的安装包,win10下 首先下载ffmpeg http://www.ffmpeg.org/download.htmlhttp://www.ffmpeg.org/download ...
- Android中如何提取和生成mp4文件
1. MediaExtractor 该类主要用于音视频混合数据的分离,接口比较简单,首先要通过setDataSource(String path)函数设置数据源,数据源可以是本地文件地址,也可以使用H ...
最新文章
- [Vue warn]: Failed to mount component: template or render function not defined. 错误解决方法
- Harvard's CS50
- 交叉编译和交叉调试环境的搭建及使用
- OpenGL indexedCube索引多维数据集的实例
- 编辑器js获取浏览器高度和宽度值(转)
- 计算机系统结构 期末复习
- php函数声明提前,php的日期处理函数及uchome的function_coomon中日期处理函数的研究...
- redis的bitmap操作
- Dart入门—开发环境
- bash: test1: command not found
- flex mx:TabNavigator进行选项卡切换,需要进行交互时。发生Error #1009错误
- LeaRun.Java可视化拖拽编辑的BI大屏
- js中的经典题Foo.getName
- 新浪开发者平台(Sina App Engine)初探
- python 保存网页图片到本地
- tp5.1 EasyTask Windows本地测试和centos系统服务器安装
- [ArcGIS] 空间分析(五) 网络分析
- js如何实现拆分字符串并依次输出
- 一文搞懂各种架构(业务架构、应用架构、数据架构...
- QUECTEL上海移远4G通讯CAT4模组EC20CEFAG模块串口调试指南之【05EC20模组TCP/IP模块AT指令说明】
热门文章
- 内存图片IOS app启动动画的实现
- titanium开发教程-04-05从rows打开window
- maven 加入第三方库_项目pom里引入第三方库的四个方法
- 3.JUC线程高级-同步容器 ConcurrentHashMap
- struts标签logic:iterate的用法
- MyBatis映射表问题
- Go 性能优化技巧 4/10
- internet与Internet的区别
- wamp环境单独安装(windows下apache2.4、mysql5.5、php5.5的版本)
- 为什么用preparedStatement 而不是statement