思路

基于FFmpeg写一个播放器,其实十分的简单。实际上,主要是对FFmpeg的API的封装,同时,我们需要将音视频通过主机呈现出来,所以还依赖于平台的SDL库,整体步骤和思路如下:
1. 编译用于音视频解码的FFmpeg库;
2. 编译用于音视频呈现的SDL库;
3. 编写主程序完成对音视频的整个调度过程;

编译FFmpeg库

这个步骤在《与FFmpeg的初次邂逅》(http://blog.csdn.net/ericbar/article/details/69943941)文中已经有所描述,这里为了简单,我们将FFmpeg的各库编译成静态方式,不采用动态库方式进行链接。

编译SDL库

这里SDL库我们依赖于1.2.15来进行编译,而不是最新的2.0版本;所以首先到SDL的官方网站http://www.libsdl.org/下载1.2.15版本的源代码进行傻瓜式的编译即可,这里需要注意我们配置的SDL生成库和头文件的位置。

ffmpeg@ubuntu:~/work/test$ tar xzvf SDL-1.2.15.tar.gz
ffmpeg@ubuntu:~/work/test$ cd SDL-1.2.15/
ffmpeg@ubuntu:~/work/test/SDL-1.2.15$ ./configure  --prefix=/home/ffmpeg/work/SDL-1.2.15/out
ffmpeg@ubuntu:~/work/test/SDL-1.2.15$ make

编译过程中可能会遇到如下错误,

./src/video/x11/SDL_x11sym.h:168:17: error: conflicting types for ‘_XData32’SDL_X11_SYM(int,_XData32,(Display *dpy,register long *data,unsigned len),(dpy,data,len),return)^
./src/video/x11/SDL_x11dyn.c:95:5: note: in definition of macro ‘SDL_X11_SYM’rc fn params { ret p##fn args ; }^
In file included from ./src/video/x11/SDL_x11dyn.h:34:0,from ./src/video/x11/SDL_x11dyn.c:26:
/usr/include/X11/Xlibint.h:568:12: note: previous declaration of ‘_XData32’ was hereextern int _XData32(^
build-deps:1129: recipe for target 'build/SDL_x11dyn.lo' failed
make: *** [build/SDL_x11dyn.lo] Error 1
ffmpeg@ubuntu:~/work/SDL-1.2.15$ 

请参考如下方法修改(http://blog.csdn.net/jhting/article/details/38523945),

-SDL_X11_SYM(int,_XData32,(Display *dpy,register long *data,unsigned len),(dpy,data,len),return)
+SDL_X11_SYM(int,_XData32,(Display *dpy,register _Xconst long *data,unsigned len),(dpy,data,len),return)  

xplayer播放器

暂且叫我们这个播放器叫xplayer吧,对于代码我们采用Makefile的方式进行管理,其目录结构如下:

下面是整个播放器的Makefile文件:

# xplayer Makefile Sample# List Compiler Tools
CC = gcc
XX = g++
CFLAGS = -Wall -O -g# Compile Target
TARGET = xplayer    # Include files
INCDIR = /home/ffmpeg/work/ffmpeg-3.2.4/out/include
INCDIR += /home/ffmpeg/work/ffmpeg-3.2.4
INCDIR += /home/ffmpeg/work/SDL-1.2.15/out/includeINCLUDE = $(foreach dir, $(INCDIR), -I$(dir))LIBPATH = /home/ffmpeg/work/ffmpeg-3.2.4/out/lib
LIBPATH += /home/ffmpeg/work/SDL-1.2.15/out/lib LIBSPATH = $(foreach dir, $(LIBPATH), -L$(dir))# needs to be in linking order
LIB = avfilter avformat avcodec swresample swscale avutil pthread z SDL dl asoundLIBS := $(foreach n,$(LIB),-l$(n))# Depend on
%.o: %.c $(CC) $(INCLUDE) -c $< -o $@ $(CFLAGS)%.o:%.cpp $(XX) $(INCLUDE) -c $< -o $@ $(CFLAGS)# Source Code
SOURCES = $(wildcard *.c *.cpp) # Objs File
OBJS = $(patsubst %.c,%.o,$(patsubst %.cpp,%.o,$(SOURCES)))# BIN depend on
$(TARGET) : $(OBJS) $(XX) -o $(TARGET) $(OBJS) $(LIBS) $(LIBSPATH) chmod a+x $(TARGET) # clean
clean : rm -rf $(OBJS)rm -rf $(TARGET)

下面是音频相关的audio.c的文件代码:

/** Copyright (c) 2017 ericbaba** FFmpeg is free software; you can redistribute it and/or* modify it under the terms of the GNU Lesser General Public* License as published by the Free Software Foundation; either* version 2.1 of the License, or (at your option) any later version.** FFmpeg is distributed in the hope that it will be useful,* but WITHOUT ANY WARRANTY; without even the implied warranty of* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU* Lesser General Public License for more details.** You should have received a copy of the GNU Lesser General Public* License along with FFmpeg; if not, write to the Free Software* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA*/#include "xplayer.h"static AVFilterContext *in_audio_filter;   // the first filter in the audio chain
static AVFilterContext *out_audio_filter;  // the last filter in the audio chain
static AVFilterGraph *agraph;              // audio filter graph
static struct AudioParams audio_filter_src;
static double audio_diff_cum; /* used for AV difference average computation */
static double audio_diff_avg_coef;
static double audio_diff_threshold;
static int audio_diff_avg_count;
static double audio_clock;
static int audio_buf_size;
static int audio_buf_index;static int synchronize_audio(short *samples, int samples_size)
{int n;double ref_clock;double diff, avg_diff;int wanted_size, min_size, max_size;ref_clock = get_master_clock();diff = get_audio_clock() - ref_clock;if(diff < AV_NOSYNC_THRESHOLD) {// accumulate the diffsaudio_diff_cum = diff + audio_diff_avg_coef * audio_diff_cum;if(audio_diff_avg_count < AUDIO_DIFF_AVG_NB){audio_diff_avg_count++;}else{avg_diff = audio_diff_cum * (1.0 - audio_diff_avg_coef);if(fabs(avg_diff) >= audio_diff_threshold) {n = (2 * global_context.acodec_ctx->channels);wanted_size = samples_size + ((int)(diff * global_context.acodec_ctx->sample_rate) * n);min_size = samples_size * ((100 - SAMPLE_CORRECTION_PERCENT_MAX)  / 100);max_size = samples_size * ((100 + SAMPLE_CORRECTION_PERCENT_MAX)  / 100);if(wanted_size < min_size) {wanted_size = min_size;} else if (wanted_size > max_size) {wanted_size = max_size;}if(wanted_size < samples_size) {samples_size = wanted_size;} else if(wanted_size > samples_size){uint8_t *samples_end, *q;int nb;nb = (samples_size - wanted_size);samples_end = (uint8_t *)samples + samples_size - n;q = samples_end + n;while(nb > 0) {memcpy(q, samples_end, n);q += n;nb -= n;}samples_size = wanted_size;}}}}else {audio_diff_avg_count = 0;audio_diff_cum = 0;}return samples_size;}static int init_filter_graph(AVFilterGraph **graph, AVFilterContext **src, AVFilterContext **sink)
{AVFilterGraph *filter_graph;AVFilterContext *abuffer_ctx;AVFilter        *abuffer;AVFilterContext *aformat_ctx;AVFilter        *aformat;AVFilterContext *abuffersink_ctx;AVFilter        *abuffersink;uint8_t options_str[1024];uint8_t ch_layout[64];int err;/* Create a new filtergraph, which will contain all the filters. */filter_graph = avfilter_graph_alloc();if (!filter_graph) {av_log(NULL, AV_LOG_ERROR, "Unable to create filter graph.\n");return AVERROR(ENOMEM);}/* Create the abuffer filter;* it will be used for feeding the data into the graph. */abuffer = avfilter_get_by_name("abuffer");if (!abuffer){av_log(NULL, AV_LOG_ERROR, "Could not find the abuffer filter.\n");return AVERROR_FILTER_NOT_FOUND;}abuffer_ctx = avfilter_graph_alloc_filter(filter_graph, abuffer, "src");if (!abuffer_ctx){av_log(NULL, AV_LOG_ERROR, "Could not allocate the abuffer instance.\n");return AVERROR(ENOMEM);}/* Set the filter options through the AVOptions API. */av_get_channel_layout_string(ch_layout, sizeof(ch_layout), 0, audio_filter_src.channel_layout);av_opt_set    (abuffer_ctx, "channel_layout", ch_layout,                            AV_OPT_SEARCH_CHILDREN);av_opt_set    (abuffer_ctx, "sample_fmt",     av_get_sample_fmt_name(audio_filter_src.fmt), AV_OPT_SEARCH_CHILDREN);av_opt_set_q  (abuffer_ctx, "time_base",      (AVRational){ 1, audio_filter_src.freq},  AV_OPT_SEARCH_CHILDREN);av_opt_set_int(abuffer_ctx, "sample_rate",    audio_filter_src.freq,                     AV_OPT_SEARCH_CHILDREN);/* Now initialize the filter; we pass NULL options, since we have already* set all the options above. */err = avfilter_init_str(abuffer_ctx, NULL);if (err < 0){av_log(NULL, AV_LOG_ERROR, "Could not initialize the abuffer filter.\n");return err;}/* Create the aformat filter;* it ensures that the output is of the format we want. */aformat = avfilter_get_by_name("aformat");if (!aformat) {av_log(NULL, AV_LOG_ERROR, "Could not find the aformat filter.\n");return AVERROR_FILTER_NOT_FOUND;}aformat_ctx = avfilter_graph_alloc_filter(filter_graph, aformat, "aformat");if (!aformat_ctx) {av_log(NULL, AV_LOG_ERROR, "Could not allocate the aformat instance.\n");return AVERROR(ENOMEM);}/* A third way of passing the options is in a string of the form* key1=value1:key2=value2.... */snprintf(options_str, sizeof(options_str),"sample_fmts=%s:sample_rates=%d:channel_layouts=0x%"PRIx64,av_get_sample_fmt_name(AV_SAMPLE_FMT_S16), audio_filter_src.freq,(uint64_t)audio_filter_src.channel_layout);err = avfilter_init_str(aformat_ctx, options_str);if (err < 0) {av_log(NULL, AV_LOG_ERROR, "Could not initialize the aformat filter.\n");return err;}/* Finally create the abuffersink filter;* it will be used to get the filtered data out of the graph. */abuffersink = avfilter_get_by_name("abuffersink");if (!abuffersink) {av_log(NULL, AV_LOG_ERROR, "Could not find the abuffersink filter.\n");return AVERROR_FILTER_NOT_FOUND;}abuffersink_ctx = avfilter_graph_alloc_filter(filter_graph, abuffersink, "sink");if (!abuffersink_ctx) {av_log(NULL, AV_LOG_ERROR, "Could not allocate the abuffersink instance.\n");return AVERROR(ENOMEM);}/* This filter takes no options. */err = avfilter_init_str(abuffersink_ctx, NULL);if (err < 0) {av_log(NULL, AV_LOG_ERROR, "Could not initialize the abuffersink instance.\n");return err;}/* Connect the filters;* in this simple case the filters just form a linear chain. */err = avfilter_link(abuffer_ctx, 0, aformat_ctx, 0);if (err >= 0){err = avfilter_link(aformat_ctx, 0, abuffersink_ctx, 0);}if (err < 0) {av_log(NULL, AV_LOG_ERROR, "Error connecting filters\n");return err;}/* Configure the graph. */err = avfilter_graph_config(filter_graph, NULL);if (err < 0){av_log(NULL, AV_LOG_ERROR, "Error configuring the filter graph\n");return err;}*graph = filter_graph;*src   = abuffer_ctx;*sink  = abuffersink_ctx;return 0;}static inline int64_t get_valid_channel_layout(int64_t channel_layout, int channels){if (channel_layout && av_get_channel_layout_nb_channels(channel_layout) == channels){return channel_layout;}else{return 0;}}// decode a new packet(not multi-frame)// return decoded frame size, not decoded packet size
static int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size)
{static AVPacket pkt;static uint8_t *audio_pkt_data = NULL;static int audio_pkt_size = 0;int len1, data_size;int got_frame;AVFrame * frame = NULL;static int reconfigure = 1;int ret = -1;for(;;) {while(audio_pkt_size > 0) {         if(NULL == frame){frame = av_frame_alloc();}data_size = buf_size;got_frame = 0;// len1 is decoded packet sizelen1 = avcodec_decode_audio4(aCodecCtx, frame, &got_frame, &pkt);if(got_frame){if (reconfigure) {reconfigure = 0;int64_t dec_channel_layout = get_valid_channel_layout(frame->channel_layout, av_frame_get_channels(frame));// used by init_filter_graph()audio_filter_src.fmt            = frame->format;audio_filter_src.channels       = av_frame_get_channels(frame);audio_filter_src.channel_layout = dec_channel_layout;audio_filter_src.freq           = frame->sample_rate;init_filter_graph(&agraph, &in_audio_filter, &out_audio_filter);}if ((ret = av_buffersrc_add_frame(in_audio_filter, frame)) < 0){av_log(NULL, AV_LOG_ERROR, "av_buffersrc_add_frame :  failure. \n");        return ret;}if ((ret = av_buffersink_get_frame(out_audio_filter, frame)) < 0) {av_log(NULL, AV_LOG_ERROR, "av_buffersink_get_frame :  failure. \n");continue;}data_size = av_samples_get_buffer_size(NULL, frame->channels, frame->nb_samples, frame->format, 1);      // len1 is decoded packet size// < 0  means failure or error,so break to get a new packetif(len1 < 0) {audio_pkt_size = 0;av_log(NULL, AV_LOG_ERROR, "avcodec_decode_audio4 failure. \n");break;} // decoded data to audio bufmemcpy(audio_buf, frame->data[0], data_size);audio_pkt_data += len1;audio_pkt_size -= len1;int  n = 2 * global_context.acodec_ctx->channels;audio_clock += (double)data_size / (double)(n * global_context.acodec_ctx->sample_rate); // add bytes offsetav_free_packet(&pkt);av_frame_free(&frame);return data_size;}}av_free_packet(&pkt);av_frame_free(&frame);// get a new packetif(packet_queue_get(&global_context.audio_queue, &pkt, 1) < 0) {return -1;}audio_pkt_data = pkt.data;audio_pkt_size = pkt.size;// save current pts clockif(pkt.pts != AV_NOPTS_VALUE) {audio_clock = pkt.pts * av_q2d(global_context.astream->time_base);}}return ret;}double get_audio_clock()
{double pts;int hw_buf_size, bytes_per_sec, n;pts = audio_clock;hw_buf_size = audio_buf_size - audio_buf_index;bytes_per_sec = 0;n = global_context.acodec_ctx->channels * 2;bytes_per_sec = global_context.acodec_ctx->sample_rate * n;if(bytes_per_sec) {pts -= (double)hw_buf_size / bytes_per_sec;}return audio_clock;
}void audio_callback(void *userdata, Uint8 *stream, int len)
{AVCodecContext *aCodecCtx = (AVCodecContext *)userdata;int len1, audio_size;static uint8_t audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE*3)/2 + FF_INPUT_BUFFER_PADDING_SIZE];while(len > 0) {        // "audio_buf_index >= audio_buf_size" means all decoded data have aleady read over// we need read new packet for decodeif(audio_buf_index >= audio_buf_size) {// decode a new packet, result in a new frameaudio_size = audio_decode_frame(aCodecCtx, audio_buf, sizeof(audio_buf));if(audio_size < 0) {// decode no data, reset bufferaudio_buf_size = (AVCODEC_MAX_AUDIO_FRAME_SIZE*3)/2 + FF_INPUT_BUFFER_PADDING_SIZE;audio_buf_index = 0;memset(audio_buf, 0, audio_buf_size);}else {// decode ok, sync audio, just give synced size audio_size = synchronize_audio((int16_t *)audio_buf, audio_size);audio_buf_size = audio_size;audio_buf_index = 0;}}// copy buffer data(decoded) to audio device(stream)len1 = audio_buf_size - audio_buf_index;    // remained decoded data sizeif(len1 > len){len1 = len;}memcpy(stream, (uint8_t *)audio_buf + audio_buf_index, len1);len -= len1;stream += len1;audio_buf_index += len1;}
}

下面是视频相关的文件video.c的源代码:

/** Copyright (c) 2017 ericbaba** FFmpeg is free software; you can redistribute it and/or* modify it under the terms of the GNU Lesser General Public* License as published by the Free Software Foundation; either* version 2.1 of the License, or (at your option) any later version.** FFmpeg is distributed in the hope that it will be useful,* but WITHOUT ANY WARRANTY; without even the implied warranty of* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU* Lesser General Public License for more details.** You should have received a copy of the GNU Lesser General Public* License along with FFmpeg; if not, write to the Free Software* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA*/#include "xplayer.h"static double video_clock;
static double video_current_pts;
static double video_current_pts;static double synchronize_video(AVFrame *src_frame, double pts)
{double time_base;double frame_delay = 0;if(pts != 0) {video_clock = pts;}else{pts = video_clock;}time_base = av_q2d(global_context.vstream->time_base);frame_delay += (src_frame->repeat_pict * (time_base * 0.5));video_clock += frame_delay;return pts;
}static int img_convert(AVPicture *dst, int dst_pix_fmt,const AVPicture *src, int src_pix_fmt,int src_width, int src_height)
{int w;int h;struct SwsContext *pSwsCtx;w = src_width;h = src_height;pSwsCtx = sws_getContext(w, h, src_pix_fmt, w, h, dst_pix_fmt, SWS_BICUBIC, NULL, NULL, NULL);sws_scale(pSwsCtx, (const uint8_t* const*)src->data, src->linesize, 0, h, dst->data, dst->linesize);//这里释放掉pSwsCtx的内存return 0;
}static int queue_picture(AVFrame *pFrame, double pts)
{VideoPicture *vp;int dst_pix_fmt;AVPicture pict;SDL_Event event; SDL_LockMutex(global_context.pictq_mutex);while(global_context.pictq_size >= VIDEO_PICTURE_QUEUE_SIZE){SDL_CondWait(global_context.pictq_cond, global_context.pictq_mutex);}SDL_UnlockMutex(global_context.pictq_mutex);// windex is set to 0 initiallyvp = &global_context.pictq[global_context.pictq_windex];if(!vp->bmp ||vp->width != global_context.vcodec_ctx->width ||vp->height != global_context.vcodec_ctx->height) {vp->allocated = 0;event.type = FF_ALLOC_EVENT;SDL_PushEvent(&event);SDL_LockMutex(global_context.pictq_mutex);while(!vp->allocated){SDL_CondWait(global_context.pictq_cond, global_context.pictq_mutex);}SDL_UnlockMutex(global_context.pictq_mutex);}if(vp->bmp) {vp->pts = pts;SDL_LockYUVOverlay(vp->bmp);dst_pix_fmt = AV_PIX_FMT_YUV420P;pict.data[0] = vp->bmp->pixels[0];pict.data[1] = vp->bmp->pixels[2];pict.data[2] = vp->bmp->pixels[1];pict.linesize[0] = vp->bmp->pitches[0];pict.linesize[1] = vp->bmp->pitches[2];pict.linesize[2] = vp->bmp->pitches[1];// Convert the image into YUV format that SDL usesimg_convert(&pict, dst_pix_fmt, (AVPicture *)pFrame, global_context.vcodec_ctx->pix_fmt,global_context.vcodec_ctx->width, global_context.vcodec_ctx->height);SDL_UnlockYUVOverlay(vp->bmp);if(++global_context.pictq_windex == VIDEO_PICTURE_QUEUE_SIZE) {global_context.pictq_windex = 0;}SDL_LockMutex(global_context.pictq_mutex);global_context.pictq_size++;SDL_UnlockMutex(global_context.pictq_mutex);}return 0;
}double get_video_clock()
{double delta = (av_gettime() - global_context.video_current_pts_time) / 1000000.0;return video_current_pts + delta;
}int video_thread(void *arg)
{AVPacket pkt1;AVPacket *packet = &pkt1;int frameFinished;AVFrame *pFrame;double pts;pFrame = av_frame_alloc();for(;;) {if(packet_queue_get(&global_context.video_queue, packet, 1) < 0){// means we quit getting packetsav_log(NULL, AV_LOG_ERROR, "packet_queue_get failure . \n");break;}avcodec_decode_video2(global_context.vcodec_ctx, pFrame, &frameFinished, packet);// Did we get a video frame?if(frameFinished){        pts = pFrame->pkt_pts*av_q2d(global_context.vstream->time_base);    pts = synchronize_video(pFrame, pts);  if(queue_picture(pFrame,  pts) < 0) {break;}}av_packet_unref(packet);av_init_packet(packet);}av_free(pFrame);return 0;
}void alloc_picture(void *userdata)
{VideoPicture *vp;vp = &global_context.pictq[global_context.pictq_windex];if(vp->bmp) {// we already have one make another, bigger/smallerSDL_FreeYUVOverlay(vp->bmp);}// Allocate a place to put our YUV image on that screenvp->bmp = SDL_CreateYUVOverlay(global_context.vcodec_ctx->width, global_context.vcodec_ctx->height, SDL_YV12_OVERLAY, global_context.screen);vp->width = global_context.vcodec_ctx->width;vp->height = global_context.vcodec_ctx->height;SDL_LockMutex(global_context.pictq_mutex);vp->allocated = 1;SDL_CondSignal(global_context.pictq_cond);SDL_UnlockMutex(global_context.pictq_mutex);
}void video_display()
{SDL_Rect rect;VideoPicture *vp;float aspect_ratio;int w, h, x, y;vp = &global_context.pictq[global_context.pictq_rindex];if(vp->bmp) {if(global_context.vcodec_ctx->sample_aspect_ratio.num == 0) {aspect_ratio = 0;}else{aspect_ratio = av_q2d(global_context.vcodec_ctx->sample_aspect_ratio) *global_context.vcodec_ctx->width / global_context.vcodec_ctx->height;}if(aspect_ratio <= 0.0){aspect_ratio = (float)global_context.vcodec_ctx->width /(float)global_context.vcodec_ctx->height;}h = global_context.screen->h;w = ((int)rint(h * aspect_ratio)) & -3;if(w > global_context.screen->w){w = global_context.screen->w;h = ((int)rint(w / aspect_ratio)) & -3;}x = (global_context.screen->w - w) / 2;y = (global_context.screen->h - h) / 2;rect.x = x;rect.y = y;rect.w = w;rect.h = h;SDL_DisplayYUVOverlay(vp->bmp, &rect);}
}void video_refresh_timer()
{VideoPicture *vp;double actual_delay, delay, sync_threshold, ref_clock, diff;if(global_context.pictq_size == 0){schedule_refresh(1);} else {vp = &global_context.pictq[global_context.pictq_rindex];video_current_pts = vp->pts;global_context.video_current_pts_time = av_gettime();delay = vp->pts - global_context.frame_last_pts;if(delay <= 0 || delay >= 1.0){ // 非法值判断delay = global_context.frame_last_delay;}global_context.frame_last_delay = delay;global_context.frame_last_pts = vp->pts;ref_clock = get_master_clock();diff = vp->pts - ref_clock;sync_threshold = (delay > AV_SYNC_THRESHOLD) ? delay : AV_SYNC_THRESHOLD;if(fabs(diff) < AV_NOSYNC_THRESHOLD){//av_log(NULL, AV_LOG_ERROR, " diff < 10 \n");            if(diff <= -sync_threshold) {av_log(NULL, AV_LOG_ERROR, "video_refresh_timer : repeat. \n");            delay = 0;} else if(diff >= sync_threshold) {av_log(NULL, AV_LOG_ERROR, "video_refresh_timer : skip. \n");            delay = 2 * delay;}}else{av_log(NULL, AV_LOG_ERROR, " diff > 10 , diff = %f, vp->pts = %f , ref_clock = %f\n", diff , vp->pts , ref_clock);                  }global_context.frame_timer += delay;actual_delay = global_context.frame_timer - (av_gettime() / 1000000.0);if(actual_delay < 0.010) {    //每秒100帧的刷新率不存在actual_delay = 0.010;}schedule_refresh((int)(actual_delay * 1000 + 0.5)); //add 0.5 for 进位video_display();if(++global_context.pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE){global_context.pictq_rindex = 0;}SDL_LockMutex(global_context.pictq_mutex);global_context.pictq_size--;SDL_CondSignal(global_context.pictq_cond);SDL_UnlockMutex(global_context.pictq_mutex);}
}

下面是公共函数文件util.c的源代码:

/** Copyright (c) 2017 ericbaba** FFmpeg is free software; you can redistribute it and/or* modify it under the terms of the GNU Lesser General Public* License as published by the Free Software Foundation; either* version 2.1 of the License, or (at your option) any later version.** FFmpeg is distributed in the hope that it will be useful,* but WITHOUT ANY WARRANTY; without even the implied warranty of* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU* Lesser General Public License for more details.** You should have received a copy of the GNU Lesser General Public* License along with FFmpeg; if not, write to the Free Software* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA*/#include "xplayer.h"static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
{SDL_Event event;event.type = FF_REFRESH_EVENT;event.user.data1 = opaque;SDL_PushEvent(&event);
return 0;
}void schedule_refresh(int delay)
{SDL_AddTimer(delay, sdl_refresh_timer_cb , NULL);
}void packet_queue_init(PacketQueue *q)
{memset(q, 0, sizeof(PacketQueue));q->mutex = SDL_CreateMutex();q->cond = SDL_CreateCond();
}int packet_queue_put(PacketQueue *q, AVPacket *pkt)
{AVPacketList *pkt1;if(av_dup_packet(pkt) < 0) {
        return -1;}pkt1 = av_malloc(sizeof(AVPacketList));if (!pkt1){
        return -1;}pkt1->pkt = *pkt;pkt1->next = NULL;SDL_LockMutex(q->mutex);if (!q->last_pkt){q->first_pkt = pkt1;}else{q->last_pkt->next = pkt1;}q->last_pkt = pkt1;q->nb_packets++;q->size += pkt1->pkt.size;SDL_CondSignal(q->cond);SDL_UnlockMutex(q->mutex);
return 0;
}int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
{AVPacketList *pkt1;int ret;SDL_LockMutex(q->mutex);for(;;) {if(global_context.quit){ret = -1;break;}pkt1 = q->first_pkt;if (pkt1) {q->first_pkt = pkt1->next;if (!q->first_pkt){q->last_pkt = NULL;}q->nb_packets--;q->size -= pkt1->pkt.size;*pkt = pkt1->pkt;av_free(pkt1);ret = 1;break;}else if (!block) {ret = 0;break;}else {SDL_CondWait(q->cond, q->mutex);}}SDL_UnlockMutex(q->mutex);
return ret;
}int packet_queue_size(PacketQueue *q)
{
    return q->size;
}

下面是核心的播放器控制代码xplayer.c及头文件xplayer.h源代码:

/** Copyright (c) 2017 ericbaba** FFmpeg is free software; you can redistribute it and/or* modify it under the terms of the GNU Lesser General Public* License as published by the Free Software Foundation; either* version 2.1 of the License, or (at your option) any later version.** FFmpeg is distributed in the hope that it will be useful,* but WITHOUT ANY WARRANTY; without even the implied warranty of* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU* Lesser General Public License for more details.** You should have received a copy of the GNU Lesser General Public* License along with FFmpeg; if not, write to the Free Software* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA*/#include <stdio.h>
#include <signal.h>
#include "xplayer.h"#define TEST_FILE_AVI "/home/ffmpeg/work/ljr.avi"
#define TEST_FILE_4K "/home/ffmpeg/work/4k.mp4"
#define TEST_FILE_TS "/home/ffmpeg/work/clear.ts"
#define TEST_FILE_H265 "/home/ffmpeg/work/surfing.265"
#define TEST_FILE_1080P "/home/ffmpeg/work/1080p.avi"
#define TEST_FILE_FLV "/home/ffmpeg/work/flvtest.flv"
#define TEST_FILE_AAC "/home/ffmpeg/work/aaclc.mp4"
#define TEST_FILE_JPG "/home/ffmpeg/work/11.jpg"
#define TEST_FILE_MP3 "/home/ffmpeg/work/lkdd.mp3"
#define TEST_FILE_MLH_MP3 "/home/ffmpeg/work/mlh.mp3"
#define TEST_FILE_WAV "/home/ffmpeg/work/xpstart.wav"
#define TEST_FILE_MPG "/home/ffmpeg/work/rec.mpg"
#define TEST_FILE_RMVB "/home/ffmpeg/work/tj.rmvb"
#define TEST_FILE_RTSP "rtsp://10.18.69.232:8554/clear.ts"
#define TEST_FILE_VIDEO_DST "/home/ffmpeg/work/study/test/readframe/video.es"
#define TEST_FILE_HD_CTS "/home/ffmpeg/work/bbb_short.ffmpeg.1280x720.mp4.libx264_5000kbps_30fps.libfaac_stereo_192kbps_48000Hz.mp4"
#define TEST_FILE_HD_CTS2 "/home/ffmpeg/work/bbb_short.ffmpeg.480x360.mp4.libx264_500kbps_25fps.libfaac_stereo_128kbps_44100Hz.mp4"#define TEST_FILE_NAME TEST_FILE_AAC#define SDL_AUDIO_BUFFER_SIZE 4096static int av_sync_type = AV_SYNC_AUDIO_MASTER;
GlobalContext global_context;static void sigterm_handler(int sig)
{SDL_Event event;av_log(NULL, AV_LOG_ERROR, "sigterm_handler : sig is %d \n", sig);    event.type = FF_QUIT_EVENT;SDL_PushEvent(&event);exit(123);
}double get_master_clock() {if(av_sync_type == AV_SYNC_VIDEO_MASTER) {return get_video_clock();} else if(av_sync_type == AV_SYNC_AUDIO_MASTER) {return get_audio_clock();} else {return get_audio_clock();}
}int event_thread(void *arg)
{SDL_Event event;for(;;) {SDL_WaitEvent(&event);switch(event.type) { case FF_ALLOC_EVENT:alloc_picture(event.user.data1);break;    case FF_QUIT_EVENT:global_context.quit = 1;break;case FF_REFRESH_EVENT:video_refresh_timer(event.user.data1);break;}}
}int main(int argc, char **argv)
{int i;int err = 0;int framecnt;AVFormatContext *fmt_ctx = NULL;AVDictionaryEntry *dict = NULL;AVPacket pkt;int audio_stream_index = -1;int video_stream_index = -1;SDL_AudioSpec desired;SDL_AudioSpec spec;SDL_Event event;global_context.quit = 0;// register INT/TERM signalsignal(SIGINT , sigterm_handler); /* Interrupt (ANSI).    */signal(SIGTERM, sigterm_handler); /* Termination (ANSI).  */// set log levelav_log_set_level(AV_LOG_WARNING);/* register all codecs, demux and protocols */avfilter_register_all();av_register_all();avformat_network_init();fmt_ctx = avformat_alloc_context();err = avformat_open_input(&fmt_ctx, TEST_FILE_NAME, NULL, NULL);if (err < 0) {av_log(NULL, AV_LOG_ERROR, "avformat_open_input : err is %d \n", err);err = -1;goto failure;}if ((err = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {av_log(NULL, AV_LOG_ERROR, "avformat_find_stream_info : err is %d \n", err);err = -1;goto failure;}// search video stream in all streams.for (i = 0; i < fmt_ctx->nb_streams; i++){// because video stream only one, so found and stop.if (fmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO){video_stream_index = i;break;}}// search audio stream in all streams.for (i = 0; i < fmt_ctx->nb_streams; i++){// we used the first audio streamif (fmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO){audio_stream_index = i;break;}}// if no video and audio, exitif((-1 == video_stream_index) && (-1 == audio_stream_index)){goto failure;}// open videoif(-1 != video_stream_index){global_context.vcodec_ctx = fmt_ctx->streams[video_stream_index]->codec;global_context.vstream = fmt_ctx->streams[video_stream_index];global_context.vcodec = avcodec_find_decoder(global_context.vcodec_ctx->codec_id);if(NULL == global_context.vcodec){av_log(NULL, AV_LOG_ERROR, "avcodec_find_decoder failure. \n");goto failure;}if(avcodec_open2(global_context.vcodec_ctx, global_context.vcodec, NULL) < 0 ){av_log(NULL, AV_LOG_ERROR, "avcodec_open2 failure. \n");goto failure;}}// open audioif(-1 != audio_stream_index){global_context.acodec_ctx = fmt_ctx->streams[audio_stream_index]->codec;global_context.astream = fmt_ctx->streams[audio_stream_index];global_context.acodec = avcodec_find_decoder(global_context.acodec_ctx->codec_id);if(NULL == global_context.acodec){av_log(NULL, AV_LOG_ERROR, "avcodec_find_decoder failure. \n");err = -1;goto failure;}if(avcodec_open2(global_context.acodec_ctx, global_context.acodec, NULL) < 0 ){av_log(NULL, AV_LOG_ERROR, "avcodec_open2 failure. \n");err = -1;goto failure;}}// SDL initif(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {av_log(NULL, AV_LOG_ERROR, "Could not initialize SDL - %s\n", SDL_GetError());goto failure;}// open SDL video surfaceif(-1 != video_stream_index){// get vout screenglobal_context.screen = SDL_SetVideoMode(global_context.vcodec_ctx->width, global_context.vcodec_ctx->height, 0, 0); // SDL_NOFRAMEif(!global_context.screen) {av_log(NULL, AV_LOG_ERROR, "SDL: could not set video mode - exiting\n");goto failure;}global_context.pictq_mutex = SDL_CreateMutex();global_context.pictq_cond = SDL_CreateCond();}// open SDL audio device, audio decode at audio_callbackdesired.freq = global_context.acodec_ctx->sample_rate;desired.format = AUDIO_S16SYS;desired.channels = global_context.acodec_ctx->channels;desired.silence = 0;desired.samples = SDL_AUDIO_BUFFER_SIZE;desired.callback = audio_callback;desired.userdata = global_context.acodec_ctx;if(SDL_OpenAudio(&desired, &spec) < 0) {av_log(NULL, AV_LOG_ERROR, "SDL_OpenAudio: %s\n", SDL_GetError());goto failure;}// check audio open result, AUDIO_S16SYS is test setupif(spec.format != AUDIO_S16SYS){av_log(NULL, AV_LOG_ERROR, "spec.format != AUDIO_S16SYS . \n");goto failure;}// init frame timeglobal_context.frame_timer = (double)av_gettime() / 1000000.0;global_context.frame_last_delay = 40e-3;// init video current ptsglobal_context.video_current_pts_time = av_gettime();// creat event manage threadSDL_CreateThread(event_thread, NULL);// init audio and video packet queuepacket_queue_init(&global_context.video_queue);packet_queue_init(&global_context.audio_queue);// start audio deviceSDL_PauseAudio(0);if(-1 != video_stream_index){// creat video decode threadSDL_CreateThread(video_thread, NULL);// video display timervideo_refresh_timer(0);}// read url media data circlewhile(av_read_frame(fmt_ctx, &pkt) >= 0) {if (pkt.stream_index == video_stream_index) {packet_queue_put(&global_context.video_queue, &pkt);} else  if(pkt.stream_index == audio_stream_index) {packet_queue_put(&global_context.audio_queue, &pkt);} else {av_free_packet(&pkt);}}// wait exitwhile(!global_context.quit){SDL_Delay(100);        }failure:if (fmt_ctx) {avformat_close_input(&fmt_ctx);avformat_free_context(fmt_ctx);}avformat_network_deinit();return 0;
}

头文件如下:

/** Copyright (c) 2017 ericbaba** FFmpeg is free software; you can redistribute it and/or* modify it under the terms of the GNU Lesser General Public* License as published by the Free Software Foundation; either* version 2.1 of the License, or (at your option) any later version.** FFmpeg is distributed in the hope that it will be useful,* but WITHOUT ANY WARRANTY; without even the implied warranty of* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU* Lesser General Public License for more details.** You should have received a copy of the GNU Lesser General Public* License along with FFmpeg; if not, write to the Free Software* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA*/#ifndef __XPLAYER_H__
#define __XPLAYER_H__#include "config.h"#include "libavutil/log.h"
#include "libavutil/time.h"
#include "libavutil/samplefmt.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libswresample/swresample.h"
#include "libavcodec/internal.h"
#include "libavfilter/avfilter.h"
#include "libavfilter/buffersink.h"
#include "libavfilter/buffersrc.h"#include "SDL/SDL.h"#if CONFIG_AVDEVICE
#include "libavdevice/avdevice.h"
#endif
#if CONFIG_AVFILTER
#include "libavfilter/avfilter.h"
#endif#define FF_ALLOC_EVENT (SDL_USEREVENT)
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
#define FF_QUIT_EVENT (SDL_USEREVENT + 2)#define VIDEO_PICTURE_QUEUE_SIZE 3#define AV_SYNC_THRESHOLD 0.1
/* no AV correction is done if too big error */
#define AV_NOSYNC_THRESHOLD 10.0
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
#define AUDIO_DIFF_AVG_NB   20
/* maximum audio speed change to get correct sync */
#define SAMPLE_CORRECTION_PERCENT_MAX 10#define AVCODEC_MAX_AUDIO_FRAME_SIZE 192000 // 1 second of 48khz 32bit audioenum {AV_SYNC_AUDIO_MASTER,AV_SYNC_VIDEO_MASTER,AV_SYNC_EXTERNAL_MASTER,
};typedef struct PacketQueue {AVPacketList *first_pkt, *last_pkt;int nb_packets;int size;int abort_request;int serial;SDL_mutex *mutex;SDL_cond *cond;
} PacketQueue;typedef struct VideoPicture {SDL_Overlay *bmp;int width, height;int allocated;double pts;void *opaque;
} VideoPicture;typedef struct AudioParams {int freq;int channels;int64_t channel_layout;enum AVSampleFormat fmt;int frame_size;int bytes_per_sec;
} AudioParams;typedef struct GlobalContexts {AVCodecContext *acodec_ctx;AVCodecContext *vcodec_ctx;AVStream *vstream;AVStream *astream;AVCodec *vcodec;AVCodec *acodec;SDL_Surface *screen;SDL_mutex *pictq_mutex;SDL_cond *pictq_cond;    PacketQueue audio_queue;PacketQueue video_queue;int pictq_size;int pictq_windex;int pictq_rindex;VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];int audio_buf_size;int audio_buf_index;int64_t video_current_pts_time;double frame_last_delay;double frame_last_pts;double frame_timer;int quit;}GlobalContext;double get_master_clock() ;
double get_audio_clock() ;
double get_video_clock() ;
void schedule_refresh(int delay);
void packet_queue_init(PacketQueue *q) ;
int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block);
int packet_queue_put(PacketQueue *q, AVPacket *pkt) ;
void audio_callback(void *userdata, Uint8 *stream, int len);
int video_thread(void *arg) ;extern GlobalContext global_context;#endif /* __XPLAYER_H__ */

其中,xplayer.c中的 TEST_FILE_NAME 宏定义用于指定我们播放的文件绝对路径。
接下来,在xplayer目录中执行make即可编译,Makefile中指定了FFmpeg和SDL库及头文件的路径。

执行程序之前,有两点可能要注意:

  1. 需要先指定SDL动态库加载的位置:
export LD_LIBRARY_PATH=/home/ffmpeg/work/SDL-1.2.15/out/lib:$LD_LIBRARY_PATH
 2.Ubuntu系统需要安装sound包,否则运行时可能会报错:
sudo apt-get install libasound-dev

下面执行程序,即可看到Ubuntu窗口呈现解码后的视频,并且可以听到播放的声音了。

ffmpeg@ubuntu:~/work/xplayer$ ./xplayer

播放后的视频界面如下:

是不是很简单呢,大家都可以试试。

基于FFmpeg和SDL1.2的极简播放器实现相关推荐

  1. 基于ffmpeg和libvlc的视频剪辑、播放器

    以前研究的时候,写过一个简单的基于VLC的视频播放器.后来因为各种项目,有时为了方便测试,等各种原因,陆续加了一些功能,现在集成了视频播放.视频加减速.视频剪切,视频合并(增加中)等功能在一起.有时候 ...

  2. 从零开始仿写一个抖音App——基于FFmpeg的极简视频播放器

    本文首发于微信公众号--世界上有意思的事,搬运转载请注明出处,否则将追究版权责任.微信号:a1018998632,交流qq群:859640274 1.从零开始仿写一个抖音app--开始 4.从零开始仿 ...

  3. 最简单的基于FFmpeg的移动端例子:Android 推流器

    ===================================================== 最简单的基于FFmpeg的移动端例子系列文章列表: 最简单的基于FFmpeg的移动端例子:A ...

  4. php项目网页音乐播放器插件,基于HTML5 canvas和Web Audio的音频播放器插件

    wavesurfer.js是一款基于HTML5 canvas和Web Audio的音频播放器插件.通过wavesurfer.js你可以使用它来制作各种HTML5音频播放器,它可以在各种支持 Web A ...

  5. 基于51单片机的7键电子琴音乐播放器proteus

    本设计仅供参考 基于51单片机的7键电子琴音乐播放器proteus(仿真+源码+原理图) 原理图:Altium Designer 仿真图proteus 7.8 程序编译器:keil 4/keil 5 ...

  6. [附源码]计算机毕业设计Python+uniapp基于微信小程序平台开发的音乐播放器f0rrr(程序+lw+远程部署)

    [附源码]计算机毕业设计Python+uniapp基于微信小程序平台开发的音乐播放器f0rrr(程序+lw+远程部署) 该项目含有源码.文档.程序.数据库.配套开发软件.软件安装教程 项目运行环境配置 ...

  7. 基于Linux内核的纯手工极简系统研究

    原文地址:http://shajunxing.tpddns.cn:8888/web/blog/2018-04-06-基于Linux内核的纯手工极简系统研究/index.html 问提由来 Linux操 ...

  8. 利用FFmpeg和OpenGL ES 实现 3D 全景播放器

    前言 我们已经利用 FFmpeg + OpenGLES + OpenSLES 实现了一个多媒体播放器,本文将基于此播放器实现一个酷炫的 3D 全景播放器. 全景播放器原理 全景视频是由多台摄像机在一个 ...

  9. 学习笔记:在WIN11及UBUNTU平台下的基于Tkinter、pydub、pyaudio的音乐播放器

    目录 一.总述 二.文件结构 三.打包方式 四.使用pydub进行音乐播放 main2.py all_music.py show2.py 五.pydub使用中遇到的一些问题 六.新的尝试--pyaud ...

最新文章

  1. 【Java集合框架】ArrayList类方法简明解析(举例说明)
  2. atitti.atiNav 手机导航组件的设计
  3. MFC消息映射的定义
  4. 2.15 Python 中的广播-深度学习-Stanford吴恩达教授
  5. 牛客题霸 SQL5 查找所有员工的last_name和first_name以及对应部门编号dept_no
  6. MySQL系列(一) MySQL体系结构概述
  7. Pandas index详解
  8. Apache目录介绍
  9. android armv7 libmp3lame.so,lame支持armv6 armv7 i386 armv7s arm64
  10. Linux卸载驱动方法
  11. bootstrap table表格 设置背景颜色 设置字体颜色cellStyle 显示隐藏列(更换工具栏图标) 固定列 导出(兼容全部导出时(all)) 调整列宽 日期格式化1970-01-01bug
  12. “免费代理IP” 又双叒叕来了,这次无限量、更稳定。(附带使用教程)
  13. 1467 A. Wizard of Orz
  14. hive sql union all的性能优化
  15. 美团运维SRE+运维开发一面面经汇总
  16. JDK8的Stream操作你还不会用吗?
  17. Win10任务栏全透明化(TranslucentTB)
  18. Java虚拟机这一块 —— JVM 调优和深入了解性能优化
  19. 三字经全文(此版本是读诵最多的)
  20. 《Python数据分析与挖掘实战》学习笔记——电力漏窃电用户自动识别

热门文章

  1. java拉兹猜想的编程_Java数据结构及算法实例:考拉兹猜想 Collatz Conjecture
  2. 创维酷开电视能换成android系统,创维酷开电视刷机(酷开系统升级)步骤
  3. 计算机视觉算法探究:OpenCV CLAHE 插值算法详解
  4. Navicat使用跳板机连接mysql
  5. IDEA生成sql逆向工程
  6. 合肥工业大学宣城校区计算机大赛,初赛公示_2020年全国高校计算机能力挑战赛...
  7. 如何规划和管理自己的职业生涯?
  8. 操作系统实验2——高响应比调度算法
  9. [个人笔记]EME Solver自学笔记---参照lumerical官网视频
  10. 政府应急指挥调度管理系统软件解决方案