文章目录

  • 定义全局数据类
  • 定义数据队列
  • 定义SDL库初始化操作
  • 定义音视频流解析函数
  • 定义解封装线程和视频解码线程
  • 定义音视频的解码函数
  • 主函数事件响应

视频文件解复用之后视频流和音频流是独立的,也是独立播放。由于压缩方式不同,数据格式不同,在播放的时候音频流输出是线性的而视频流输出不是线程的,这就会导致视频流和音频流的时间偏差越来越大,最终导致音视频不同步。

为了解决这个问题,我们在播放视频文件的时候需要调整音频或者视频的播放速度,来实现两种数据的同步。考虑到人对声音的敏感度要强于视频,频繁调节音频会带来较差的观感体验,且音频的播放时钟为线性增长,所以一般会以音频时钟为参考时钟,将视频同步到音频上。

这里以一个将视频流同步到音频流上的例子来说明一下音视频同步的实现方式。程序的架构图如下图所示:

定义全局数据类

首先定义全局数据类,用于不同线程之间的数据共享。主要数据结构VideoState记录了音视频文件的各种上下文参数。

//define.h
#ifndef _DEFINE_H_
#define _DEFINE_H_#include <stdio.h>
#include <assert.h>
#include <math.h>#include <SDL.h>
extern "C"
{#include <libavcodec/avcodec.h>
#include <libavdevice/avdevice.h>
#include <libavfilter/avfilter.h>
#include <libavformat/avformat.h>
#include <libavformat/avio.h>
#include <libavutil/avutil.h>
#include <libswresample/swresample.h>
#include <libswscale/swscale.h>
#include <libavutil/frame.h>
#include <libavutil/imgutils.h>
#include <libavformat/avformat.h>
#include <libavutil/time.h>
}
#include <iostream>#define SDL_AUDIO_BUFFER_SIZE 1024
#define MAX_AUDIO_FRAME_SIZE 192000#define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
#define MAX_VIDEOQ_SIZE (5 * 256 * 1024)#define AV_SYNC_THRESHOLD 0.01
#define AV_NOSYNC_THRESHOLD 10.0#define FF_REFRESH_EVENT (SDL_USEREVENT)
#define FF_QUIT_EVENT (SDL_USEREVENT + 1)#define VIDEO_PICTURE_QUEUE_SIZE 1typedef struct PacketQueue {AVPacketList *first_pkt, *last_pkt;int nb_packets;int size;SDL_mutex *mutex;SDL_cond *cond;
} PacketQueue;typedef struct VideoState {AVFormatContext *pFormatCtx;         //音视频的上下文int             videoStreamIndex;    //视频流索引int              audioStreamIndex;    //音频流索引AVStream        *audio_st;           //音频流指针AVCodecContext  *audio_ctx;          //音频流上下文PacketQueue     audioq;              //音频流队列//音频缓存uint8_t         audio_buf[192000 * 3 / 2];unsigned int    audio_buf_size;      //缓存大小unsigned int    audio_buf_index;     //缓存索引AVFrame         audio_frame;         //音频帧AVPacket        audio_pkt;           //音频包uint8_t         *audio_pkt_data;     //音频数据指针  int             audio_pkt_size;      //音频数据包大小int             audio_hw_buf_size;struct SwrContext *audio_swr_ctx;    //音频处理操作类//音视频的数据帧double          audio_clock;double          video_clock;double          frame_timer;int64_t          frame_last_pts;int64_t          frame_last_delay;AVStream        *video_st;         //视频流AVCodecContext  *video_ctx;        //视频流上下文PacketQueue     videoq;            //视频数据队列struct SwsContext *video_sws_ctx;  //视频操作上下文//视频帧数据队列AVFrame    pictq[VIDEO_PICTURE_QUEUE_SIZE];int             pictq_size;int pictq_rindex;int pictq_windex;//操作数据帧的锁和信号量SDL_mutex       *pictq_mutex;SDL_cond        *pictq_cond;//解封装的线程SDL_Thread      *parse_tid;//视频流线程SDL_Thread      *video_tid;//输入文件名称char            filename[1024];//退出标志位int             quit;AVFrame        wanted_frame;SDL_AudioSpec wantedSpec = { 0 };SDL_AudioSpec audioSpec = { 0 };
} VideoState;SDL_mutex       *text_mutex;
SDL_Window  *win;
SDL_Renderer  *renderer;
SDL_Texture   *texture;//视频全局状态
VideoState* g_state = NULL;#endif

定义数据队列

定义音视频数据队列操作,用来缓存音视频数据包。

//datequeue.h
#ifndef _DATA_QUEUE_H_
#define _DATA_QUEUE_H_#include "define.h"int queue_picture(VideoState *is, AVFrame *pFrame, double pts)
{SDL_LockMutex(is->pictq_mutex);while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !is->quit){SDL_CondWait(is->pictq_cond, is->pictq_mutex);}SDL_UnlockMutex(is->pictq_mutex);if (is->quit)return -1;AVFrame* current_frame = &is->pictq[is->pictq_windex];int ret = av_frame_make_writable(current_frame);if (!current_frame->data ||current_frame->width != is->video_ctx->width ||current_frame->height != is->video_ctx->height) {current_frame->format = pFrame->format;current_frame->width = pFrame->width;current_frame->height = pFrame->height;int ret = av_image_alloc(current_frame->data, current_frame->linesize, is->video_ctx->width, is->video_ctx->height,is->video_ctx->pix_fmt, 32);if (is->quit) {return -1;}}//缩放视频if (current_frame){current_frame->pts = pFrame->pts;//将图片数据添加到帧中uint8_t *src_planes[4];int src_linesize[4];av_image_fill_arrays(src_planes, src_linesize, (const uint8_t *)pFrame->data, is->video_ctx->pix_fmt,is->video_ctx->width, is->video_ctx->height, 1);//YUV数据转变成SDL使用的纹理数据sws_scale(is->video_sws_ctx, (uint8_t const * const *)pFrame->data,pFrame->linesize, 0, is->video_ctx->height,current_frame->data, current_frame->linesize);//通知队列的消费者取数据if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE){is->pictq_windex = 0;}SDL_LockMutex(is->pictq_mutex);is->pictq_size++;SDL_UnlockMutex(is->pictq_mutex);}return 0;
}void packet_queue_init(PacketQueue *q)
{memset(q, 0, sizeof(PacketQueue));q->mutex = SDL_CreateMutex();q->cond = SDL_CreateCond();
}//添加到数据队列中
int packet_queue_put(PacketQueue *q, AVPacket *pkt)
{AVPacketList *pkt1;AVPacket* newPkt;newPkt = (AVPacket*)av_mallocz_array(1, sizeof(AVPacket));if (av_packet_ref(newPkt, pkt) < 0)return -1;pkt1 = (AVPacketList*)av_malloc(sizeof(AVPacketList));pkt1->pkt = *newPkt;pkt1->next = NULL;SDL_LockMutex(q->mutex);if (!q->last_pkt)q->first_pkt = pkt1;elseq->last_pkt->next = pkt1;q->last_pkt = pkt1;q->nb_packets++;q->size += newPkt->size;SDL_CondSignal(q->cond);SDL_UnlockMutex(q->mutex);return 0;
}//读取数据包中的数据
int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
{AVPacketList *pkt1;int ret;SDL_LockMutex(q->mutex);while (1){pkt1 = q->first_pkt;if (pkt1) {q->first_pkt = pkt1->next;if (!q->first_pkt)q->last_pkt = NULL;q->nb_packets--;q->size -= pkt1->pkt.size;*pkt = pkt1->pkt;av_free(pkt1);ret = 1;break;}else if (!block) {ret = 0;break;}else {SDL_CondWait(q->cond, q->mutex);}}SDL_UnlockMutex(q->mutex);return ret;
}#endif

定义SDL库初始化操作

//SDL_Wraper.h
#ifndef _SDL_WRAPPER_H_
#define _SDL_WRAPPER_H_
#include "define.h"void InitSDL()
{//初始化SDLif (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER))printf("There is something wrong with your SDL Libs. Couldn't run");//打开音频驱动
#ifdef _WIN32SDL_AudioInit("directsound");
#endif
}//SDL显示视频帧信息
void video_display(VideoState *is) {SDL_Rect rect;AVFrame *vp;float aspect_ratio;int w, h, x, y;int i;vp = &is->pictq[is->pictq_rindex];if (vp){SDL_UpdateYUVTexture(texture, NULL,vp->data[0], vp->linesize[0],vp->data[1], vp->linesize[1],vp->data[2], vp->linesize[2]);rect.x = 0;rect.y = 0;rect.w = is->video_ctx->width;rect.h = is->video_ctx->height;SDL_LockMutex(text_mutex);SDL_RenderClear(renderer);SDL_RenderCopy(renderer, texture, NULL, &rect);SDL_RenderPresent(renderer);SDL_UnlockMutex(text_mutex);}
}
#endif

定义音视频流解析函数

音视频流的解析函数将音视频流的参数解析到全局数据结构体中。

//parser_stream.h
#ifndef _PARSER_STREAM_H_
#define _PARSER_STREAM_H_#include "define.h"
#include "callback.h"int stream_component_open(VideoState *is, int stream_index)
{if (stream_index < 0 || stream_index >= is->pFormatCtx->nb_streams){return -1;}//查找解码器分配上下文const AVCodec* codec = avcodec_find_decoder(is->pFormatCtx->streams[stream_index]->codecpar->codec_id);if (!codec) {fprintf(stderr, "Unsupported codec!\n");return -1;}AVCodecContext* codecCtx = avcodec_alloc_context3(codec);if (!codecCtx){fprintf(stderr, "new codec context failed!\n");return -1;}int ret = avcodec_parameters_to_context(codecCtx, is->pFormatCtx->streams[stream_index]->codecpar);if (ret < 0){return -2;}if (avcodec_open2(codecCtx, codec, NULL) < 0){fprintf(stderr, "Unsupported codec!\n");return -1;}switch (codecCtx->codec_type){case AVMEDIA_TYPE_AUDIO:is->audio_ctx = codecCtx;//设置音频参数转换的上下文is->audio_swr_ctx = swr_alloc();if (is->audio_swr_ctx == NULL){return -4;}//设置通道数,采样率,采样格式的输入输出格式av_opt_set_channel_layout(is->audio_swr_ctx, "in_channel_layout", codecCtx->channel_layout, 0);av_opt_set_channel_layout(is->audio_swr_ctx, "out_channel_layout", codecCtx->channel_layout, 0);av_opt_set_int(is->audio_swr_ctx, "in_sample_rate", codecCtx->sample_rate, 0);av_opt_set_int(is->audio_swr_ctx, "out_sample_rate", codecCtx->sample_rate, 0);av_opt_set_sample_fmt(is->audio_swr_ctx, "in_sample_fmt", codecCtx->sample_fmt, 0);av_opt_set_sample_fmt(is->audio_swr_ctx, "out_sample_fmt", AV_SAMPLE_FMT_FLT, 0);ret = swr_init(is->audio_swr_ctx);if (ret != 0){return -5;}//打开音响设备memset(&is->wantedSpec, 0, sizeof(is->wantedSpec));is->wantedSpec.channels = codecCtx->channels;is->wantedSpec.freq = codecCtx->sample_rate;is->wantedSpec.format = AUDIO_S16SYS;is->wantedSpec.silence = 0;is->wantedSpec.samples = SDL_AUDIO_BUFFER_SIZE;is->wantedSpec.userdata = codecCtx;  //音频流的上下文is->wantedSpec.callback = audio_callback; //设置数据包的回调函数if (SDL_OpenAudio(&is->wantedSpec, &is->audioSpec) < 0){printf("Failed to open audio");return -6;}packet_queue_init(&is->audioq);is->wanted_frame.format = AV_SAMPLE_FMT_S16;is->wanted_frame.sample_rate = is->audioSpec.freq;is->wanted_frame.channel_layout = av_get_default_channel_layout(is->audioSpec.channels);is->wanted_frame.channels = is->audioSpec.channels;is->audioStreamIndex = stream_index;is->audio_st = is->pFormatCtx->streams[stream_index];is->audio_buf_size = 0;is->audio_buf_index = 0;memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));SDL_PauseAudio(0);break;//对视频数据进行处理case AVMEDIA_TYPE_VIDEO:is->video_ctx = codecCtx;is->video_st = is->pFormatCtx->streams[stream_index];is->videoStreamIndex = stream_index;is->frame_timer = (double)av_gettime() / 1000000.0;is->frame_last_delay = 40e-3;packet_queue_init(&is->videoq);is->video_sws_ctx = sws_getContext(is->video_ctx->width, is->video_ctx->height,is->video_ctx->pix_fmt, is->video_ctx->width,is->video_ctx->height, AV_PIX_FMT_YUV420P,SWS_BILINEAR, NULL, NULL, NULL);break;default:break;}
}
#endif

定义解封装线程和视频解码线程

解封装线程负责解析视频文件并读取数据包到不同的队列中。视频解码线程负责将视频数据包解析成SDL能识别的图片数据类型。

//thread.h
#ifndef _THREAD_H_
#define _THREAD_H_
#include "define.h"
#include "datequeue.h"
#include "parser_stream.h"double synchronize_video(VideoState *is, AVFrame *src_frame, double pts)
{double frame_delay;if (pts != 0){is->video_clock = pts;}else{pts = is->video_clock;}//更新帧的时钟frame_delay = av_q2d(is->video_ctx->time_base);frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);is->video_clock += frame_delay;return pts;
}int decode_video_thread(void *arg)
{VideoState *is = (VideoState *)arg;AVPacket pkt1, packet;int64_t pts = 0;int ret = -1;AVFrame *pFrame = av_frame_alloc();while (1){//从队列中提取数据包if (packet_queue_get(&is->videoq, &packet, 1) < 0){continue;}int ret = avcodec_send_packet(is->video_ctx, &packet);ret = avcodec_receive_frame(is->video_ctx, pFrame);if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF){continue;}if (ret < 0){continue;}pts = pFrame->pts;//* av_q2d(is->video_st->time_base);;//同步视频pts = synchronize_video(is, pFrame, pts);if (queue_picture(is, pFrame, pts) < 0){break;}av_packet_unref(&packet);}av_frame_free(&pFrame);return 0;
}int demux_thread(void *arg)
{int ret = -1;VideoState *is = (VideoState*)arg;AVPacket packet;AVFrame         *pFrame = NULL;//打开上下文解析数据流if (avformat_open_input(&is->pFormatCtx, is->filename, NULL, NULL) != 0)return -1;if (avformat_find_stream_info(is->pFormatCtx, NULL)<0)return -1;//查音视频流的索引for (int i = 0; i<is->pFormatCtx->nb_streams; i++){if (is->pFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {is->videoStreamIndex = i;}if (is->pFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {is->audioStreamIndex = i;}}//解析音频流和视频流if (is->audioStreamIndex >= 0){stream_component_open(is, is->audioStreamIndex);}if (is->videoStreamIndex >= 0){stream_component_open(is, is->videoStreamIndex);//启动视频解析线程is->video_tid = SDL_CreateThread(decode_video_thread, "decode_video_thread", is);}if (is->videoStreamIndex < 0 || is->audioStreamIndex < 0){fprintf(stderr, "%s: could not open codecs\n", is->filename);return -1;}pFrame = av_frame_alloc();//读取数据包while (av_read_frame(is->pFormatCtx, &packet) >= 0){if (packet.stream_index == is->audioStreamIndex){packet_queue_put(&is->audioq, &packet);}else{packet_queue_put(&is->videoq, &packet);SDL_Delay(10);}av_packet_unref(&packet);}__FAIL://后处理清理数据if (pFrame) {av_frame_free(&pFrame);}if (is->audio_ctx){avcodec_close(is->audio_ctx);}SDL_Quit();return ret;
}
#endif

定义音视频的解码函数

解码函数负责从数据队列里面读取音视频数据并进行渲染播放。

//callback.h
#ifndef _CALL_BACK_H_
#define _CALL_BACK_H_
#include "define.h"//从音频流中解析数据包
int audio_decode_frame(AVCodecContext* aCodecCtx, uint8_t* audio_buf, int buf_size)
{static AVPacket pkt;static uint8_t* audio_pkt_data = NULL;static int audio_pkt_size = 0;static AVFrame frame;int len1;int data_size = 0;SwrContext* swr_ctx = NULL;while (1){//取到数据之后解析数据while (audio_pkt_size > 0){int got_frame = 0;avcodec_send_packet(aCodecCtx, &pkt);avcodec_receive_frame(aCodecCtx, &frame);len1 = frame.pkt_size;if (len1 < 0){audio_pkt_size = 0;break;}//拷贝音频数据audio_pkt_data += len1;audio_pkt_size -= len1;data_size = 0;if (got_frame){int linesize = 1;data_size = av_samples_get_buffer_size(&linesize, aCodecCtx->channels, frame.nb_samples, aCodecCtx->sample_fmt, 1);assert(data_size <= buf_size);memcpy(audio_buf, frame.data[0], data_size);}//获取通道信息if (frame.channels > 0 && frame.channel_layout == 0)frame.channel_layout = av_get_default_channel_layout(frame.channels);else if (frame.channels == 0 && frame.channel_layout > 0)frame.channels = av_get_channel_layout_nb_channels(frame.channel_layout);if (swr_ctx){swr_free(&swr_ctx);swr_ctx = NULL;}//对音频格式进行转换,重采样swr_ctx = swr_alloc_set_opts(NULL, g_state->wanted_frame.channel_layout, (AVSampleFormat)g_state->wanted_frame.format, g_state->wanted_frame.sample_rate,frame.channel_layout, (AVSampleFormat)frame.format, frame.sample_rate, 0, NULL);if (!swr_ctx || swr_init(swr_ctx) < 0){printf("swr_init failed\n");}int dst_nb_samples = (int)av_rescale_rnd(swr_get_delay(swr_ctx, frame.sample_rate) + frame.nb_samples,g_state->wanted_frame.sample_rate, g_state->wanted_frame.format, AV_ROUND_INF);int len2 = swr_convert(swr_ctx, &audio_buf, dst_nb_samples,(const uint8_t**)frame.data, frame.nb_samples);if (len2 < 0){printf("swr_convert failed\n");}//data_size = 2 * g_state->wanted_frame.nb_samples * 2;data_size = g_state->wanted_frame.channels * len2 * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16);int n = 2 * g_state->audio_ctx->channels;g_state->audio_clock += (double)data_size /(double)(n * g_state->audio_ctx->sample_rate);av_packet_unref(&pkt);if (swr_ctx){swr_free(&swr_ctx);swr_ctx = NULL;}//返回数据长度return data_size;//return g_state->wanted_frame.channels * len2 * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16);}//从队列里面取数据if (packet_queue_get(&g_state->audioq, &pkt, 1) < 0)return -1;audio_pkt_data = pkt.data;audio_pkt_size = pkt.size;}
}//音频数据包的回调函数
void audio_callback(void* userdata, Uint8* stream, int len)
{AVCodecContext* aCodecCtx = (AVCodecContext*)userdata;int len1, audio_size;static uint8_t audio_buff[192000 * 3 / 2];static unsigned int audio_buf_size = 0;static unsigned int audio_buf_index = 0;SDL_memset(stream, 0, len);while (len > 0){if (g_state->audio_buf_index >= g_state->audio_buf_size){audio_size = audio_decode_frame(aCodecCtx, audio_buff, sizeof(audio_buff));if (audio_size < 0){g_state->audio_buf_size = 1024*2*2;memset(audio_buff, 0, g_state->audio_buf_size);}elseg_state->audio_buf_size = audio_size;g_state->audio_buf_index = 0;}//播放取到的音频数据len1 = g_state->audio_buf_size - g_state->audio_buf_index;if (len1 > len)len1 = len;SDL_MixAudio(stream, audio_buff + g_state->audio_buf_index, len1, SDL_MIX_MAXVOLUME);len -= len1;stream += len1;g_state->audio_buf_index += len1;}
}
#endif

主函数事件响应

在主函数里面对各种资源进行整合同时处理事件响应,定时刷新视频流的显示。

//main.cpp
#include "define.h"
#include "SDL_Wraper.h"
#include "datequeue.h"
#include "parser_stream.h"
#include "callback.h"
#include "thread.h"double get_audio_clock(VideoState *is)
{double pts;int hw_buf_size, bytes_per_sec, n;pts = is->audio_clock;hw_buf_size = is->audio_buf_size - is->audio_buf_index;bytes_per_sec = 0;n = is->audio_ctx->channels * 2;if (is->audio_st) {bytes_per_sec = is->audio_ctx->sample_rate * n;}if (bytes_per_sec) {pts -= (double)hw_buf_size / bytes_per_sec;}return pts;
}//定时发送事件
static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque) {SDL_Event event;event.type = FF_REFRESH_EVENT;event.user.data1 = opaque;SDL_PushEvent(&event);SDL_Delay(40);return 0;
}//添加一个定时器
static void schedule_refresh(VideoState *is, int delay) {SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
}void video_refresh_timer(void *userdata)
{VideoState *is = (VideoState *)userdata;AVFrame *vp;int64_t  delay, sync_threshold, ref_clock;double actual_delay;if (is->video_st){if (is->pictq_size == 0){schedule_refresh(is, 1);}else{vp = &is->pictq[is->pictq_rindex];delay = vp->pts - is->frame_last_pts;//存储pts和delay下次使用is->frame_last_delay = delay;is->frame_last_pts = vp->pts;//获取音频延迟时间ref_clock = get_audio_clock(is);double diff = vp->pts * av_q2d(is->video_st->time_base) - ref_clock;/* Skip or repeat the frame. Take delay into accountFFPlay still doesn't "know if this is the best guess." */sync_threshold = (delay > AV_SYNC_THRESHOLD) ? delay : AV_SYNC_THRESHOLD;if (fabs(diff) < AV_NOSYNC_THRESHOLD){if (diff <= -sync_threshold) {delay = 0;}else if (diff >= sync_threshold) {delay = 2 * delay;}}is->frame_timer += delay * av_q2d(is->video_st->time_base);//计算真正的延迟时间actual_delay = is->frame_timer - (av_gettime() / 1000000.0);if (actual_delay < 0.010){actual_delay = 0.010;}//std::cout << actual_delay << "frame_timer" << is->frame_timer << std::endl;schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));//显示视频帧video_display(is);//刷新视频信息,为下次刷新做准备if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE) {is->pictq_rindex = 0;}SDL_LockMutex(is->pictq_mutex);is->pictq_size--;SDL_CondSignal(is->pictq_cond);SDL_UnlockMutex(is->pictq_mutex);}}else{schedule_refresh(is, 100);}}int main(int argc, char *argv[])
{int ret = -1;SDL_Event       event;if (argc < 2) {printf("Usage: command <file>\n");return ret;}//初始化SDLInitSDL();//初始化各种变量g_state = (VideoState*)av_mallocz(sizeof(VideoState));g_state->pictq_mutex = SDL_CreateMutex();g_state->pictq_cond = SDL_CreateCond();memcpy(g_state->filename, argv[1], sizeof(g_state->filename));//解封装的线程g_state->parse_tid = SDL_CreateThread(demux_thread, "demux_thread", g_state);if (!g_state->parse_tid){av_free(g_state);goto __FAIL;}while (!g_state->video_ctx){SDL_Delay(10);}//创建窗口渲染视频,在子线程里面创建会阻塞主线程的时间循环win = SDL_CreateWindow("Feifei Player",SDL_WINDOWPOS_UNDEFINED,SDL_WINDOWPOS_UNDEFINED,g_state->video_ctx->width, g_state->video_ctx->height,SDL_WINDOW_OPENGL | SDL_WINDOW_RESIZABLE);renderer = SDL_CreateRenderer(win, -1, 0);texture = SDL_CreateTexture(renderer,SDL_PIXELFORMAT_IYUV,SDL_TEXTUREACCESS_STREAMING,g_state->video_ctx->width, g_state->video_ctx->height);schedule_refresh(g_state, 40);while (1){SDL_WaitEvent(&event);switch (event.type){case FF_QUIT_EVENT:case SDL_QUIT:g_state->quit = 1;goto __QUIT;break;case FF_REFRESH_EVENT:video_refresh_timer(event.user.data1);break;default:break;}}
__QUIT:ret = 0;
__FAIL:SDL_Quit();return ret;
}

完善了音视频同步之后,demo程序其实就是一个播放器的雏形了。我们可以使用它来播放各种视频。这里以一个mkv格式的视频为例进行播放,播放效果如下所示:

FFmpeg基础:音视频同步播放相关推荐

  1. ffmpeg播放器 android,Android使用FFmpeg(六)--ffmpeg实现音视频同步播放

    关于 准备工作 正文 依旧依照流程图来逐步实现同步播放: 从流程图可以看出,实现同步播放需要三个线程,一个开启解码的装置得到packet线程,然后分别是播放音频和视频的线程.这篇简书是以音频播放为基准 ...

  2. ffmpeg java 播放视频_Javacv使用ffmpeg实现音视频同步播放

    最近用javaCV的ffmpeg包的FFmpegFrameGrabber帧捕捉器对捕捉到的音频帧和视频帧做了同步的播放.采用的同步方法是视频向音频同步. 具体的思路如下: (1)首先介绍ffmpeg是 ...

  3. 音视频同步(播放)原理

    每一帧音频或视频都有一个持续时间:duration: 采样频率是指将模拟声音波形进行数字化时,每秒钟抽取声波幅度样本的次数. .正常人听觉的频率范围大约在20Hz~20kHz之间,根据奈奎斯特采样理论 ...

  4. Android FFmpeg系列——5 音视频同步播放

    https://blog.csdn.net/JohanMan/article/details/83176144

  5. 基于FFmpeg和Android的音视频同步播放实现

    https://blog.csdn.net/ericbar/article/details/80785764 https://blog.csdn.net/AndroidAlvin/article/de ...

  6. 视频、音频打时间戳的方法及其音视频同步(播放)原理

    https://blog.csdn.net/nine_locks/article/details/48007055

  7. FFmpeg 音视频同步

    原地址:http://www.jianshu.com/p/27279255f67e 音视频播放器的工作的具体流程如下图所示: 播放器工作流程 简单的来说包括:解协议,解封装,对音频和视频分别进行解码, ...

  8. 音视频同步 ffmpeg 推流

    音视频同步原理及实现 可参考: https://blog.csdn.net/myvest/article/details/97416415 https://www.jianshu.com/p/3578 ...

  9. 音视频技术之ffplay源码分析-音视频同步

    音视频同步的目的是为了使播放的声音和显示的画面保持一致.视频按帧播放,图像显示设备每次显示一帧画面,视频播放速度由帧率确定,帧率指示每秒显示多少帧:音频按采样点播放,声音播放设备每次播放一个采样点,声 ...

最新文章

  1. JZ2440学习总结1
  2. php mysql随机数不重复,js生成不重复的随机数
  3. 当我们谈游戏优化时,我们谈些什么
  4. 怎么样批量修改html里的内容,批量修改替换多个Word文档中同一内容的方法
  5. AE调整形状图层大小插件 BorderPatrol for mac
  6. 认识死锁之生产者与消费者
  7. ajax能拿到401axios无法拿到,解决axios.interceptors.response 401 403问题
  8. python实用代码
  9. Windows常用运行库合集--官网(VC++、DirectX、.NET)
  10. 统计学习导论(ISLR)(二):统计学习概述
  11. 你有必要不沾计算机一段时间英语,八年级上册英语第一单元背默(人教版)
  12. 企业的五种组织架构模式
  13. Xcode8 及 iOS 10 的适配
  14. SpringMvc框架及SSM框架整合
  15. vuecli 实现导航切换
  16. 7、Instant-ngp
  17. VOT, OTB——目标追踪的发展概况
  18. linux 回到用户主目录,linux中用户的主目录~
  19. windows下缩短cmd路径的方式
  20. 喜欢一个人的感觉是什么?

热门文章

  1. 深度神经网络的matlab实现,深度神经网络代码matlab
  2. 教师资格证的面试科目
  3. 〖大前端 - 基础入门三大核心之CSS篇⑯〗- 相对定位、绝对定位 与 固定定位
  4. Google机器学习速成课程 - 视频笔记整理汇总 - 基础篇核心部分
  5. 专职渲染房价上涨气氛 网络房托曝忽悠内幕
  6. python执行系统命令的方法:subprocess
  7. EPC项目全过程精细化管控21个关键点,一个也不能少!
  8. 京东抢购茅台Python打包版 需要一定的编程基础
  9. [转贴]一位程序员的成长史
  10. ios 开发 微信分享失败_一个失落的孩子如何在失败的情况下从失败变成了iOS开发人员...