最近学习了一下如何使用ffmpeg解码音视频,网上的教程挺多但是也挺杂的,搞了好几天,明白了ffmpeg解码音视频的大体流程,这里记录一下ffmpeg解码视频并播放音视频的例子,但并没有做音频、视频播放 的同步处理。

直接上代码:

#include <iostream>
#include <opencv2/opencv.hpp>#ifdef __cplusplus
extern "C"{
#endif#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libswresample/swresample.h>#ifdef __cplusplus
}
#endif#include <alsa/asoundlib.h>#define FALSE 0
#define TRUE 1snd_pcm_t *handle;
snd_pcm_hw_params_t *params;
snd_pcm_uframes_t frames = 1024;//一个周期多少帧
snd_pcm_access_t access_mode = SND_PCM_ACCESS_RW_INTERLEAVED;//访问模式:交错访问
snd_pcm_format_t format = SND_PCM_FORMAT_S16_LE;//采样位数:16位,小端存储
int channel = 2;
unsigned int simple_rate = 44100;//采样率
//int dir;//设备采样率与输入采样的偏差char *buffer = NULL;//缓冲区数据
int size;//缓冲区大小
bool have_data = false;bool end_mark = false;const char *in_filename = "HelloVenus.mp4";//输入URL(mov\mkv\ts\mp4\h264)
const char *out_filename = "rtmp://123.206.23.239:1935/wstv/home";//输出URL(rtmp://123.206.23.239:1935/wstv/home或rtp://233.233.233.233:6666)bool YUV2RGB(uchar* pYuvBuf, int nWidth, int nHeight, int channels, uchar* pRgbBuf)
{
#define PIXELSIZE nWidth * nHeightconst int Table_fv1[256] = { -180, -179, -177, -176, -174, -173, -172, -170, -169, -167, -166, -165, -163, -162, -160, -159, -158, -156, -155, -153, -152, -151, -149, -148, -146, -145, -144, -142, -141, -139, -138, -137, -135, -134, -132, -131, -130, -128, -127, -125, -124, -123, -121, -120, -118, -117, -115, -114, -113, -111, -110, -108, -107, -106, -104, -103, -101, -100, -99, -97, -96, -94, -93, -92, -90, -89, -87, -86, -85, -83, -82, -80, -79, -78, -76, -75, -73, -72, -71, -69, -68, -66, -65, -64, -62, -61, -59, -58, -57, -55, -54, -52, -51, -50, -48, -47, -45, -44, -43, -41, -40, -38, -37, -36, -34, -33, -31, -30, -29, -27, -26, -24, -23, -22, -20, -19, -17, -16, -15, -13, -12, -10, -9, -8, -6, -5, -3, -2, 0, 1, 2, 4, 5, 7, 8, 9, 11, 12, 14, 15, 16, 18, 19, 21, 22, 23, 25, 26, 28, 29, 30, 32, 33, 35, 36, 37, 39, 40, 42, 43, 44, 46, 47, 49, 50, 51, 53, 54, 56, 57, 58, 60, 61, 63, 64, 65, 67, 68, 70, 71, 72, 74, 75, 77, 78, 79, 81, 82, 84, 85, 86, 88, 89, 91, 92, 93, 95, 96, 98, 99, 100, 102, 103, 105, 106, 107, 109, 110, 112, 113, 114, 116, 117, 119, 120, 122, 123, 124, 126, 127, 129, 130, 131, 133, 134, 136, 137, 138, 140, 141, 143, 144, 145, 147, 148, 150, 151, 152, 154, 155, 157, 158, 159, 161, 162, 164, 165, 166, 168, 169, 171, 172, 173, 175, 176, 178 };const int Table_fv2[256] = { -92, -91, -91, -90, -89, -88, -88, -87, -86, -86, -85, -84, -83, -83, -82, -81, -81, -80, -79, -78, -78, -77, -76, -76, -75, -74, -73, -73, -72, -71, -71, -70, -69, -68, -68, -67, -66, -66, -65, -64, -63, -63, -62, -61, -61, -60, -59, -58, -58, -57, -56, -56, -55, -54, -53, -53, -52, -51, -51, -50, -49, -48, -48, -47, -46, -46, -45, -44, -43, -43, -42, -41, -41, -40, -39, -38, -38, -37, -36, -36, -35, -34, -33, -33, -32, -31, -31, -30, -29, -28, -28, -27, -26, -26, -25, -24, -23, -23, -22, -21, -21, -20, -19, -18, -18, -17, -16, -16, -15, -14, -13, -13, -12, -11, -11, -10, -9, -8, -8, -7, -6, -6, -5, -4, -3, -3, -2, -1, 0, 0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 7, 8, 9, 10, 10, 11, 12, 12, 13, 14, 15, 15, 16, 17, 17, 18, 19, 20, 20, 21, 22, 22, 23, 24, 25, 25, 26, 27, 27, 28, 29, 30, 30, 31, 32, 32, 33, 34, 35, 35, 36, 37, 37, 38, 39, 40, 40, 41, 42, 42, 43, 44, 45, 45, 46, 47, 47, 48, 49, 50, 50, 51, 52, 52, 53, 54, 55, 55, 56, 57, 57, 58, 59, 60, 60, 61, 62, 62, 63, 64, 65, 65, 66, 67, 67, 68, 69, 70, 70, 71, 72, 72, 73, 74, 75, 75, 76, 77, 77, 78, 79, 80, 80, 81, 82, 82, 83, 84, 85, 85, 86, 87, 87, 88, 89, 90, 90 };const int Table_fu1[256] = { -44, -44, -44, -43, -43, -43, -42, -42, -42, -41, -41, -41, -40, -40, -40, -39, -39, -39, -38, -38, -38, -37, -37, -37, -36, -36, -36, -35, -35, -35, -34, -34, -33, -33, -33, -32, -32, -32, -31, -31, -31, -30, -30, -30, -29, -29, -29, -28, -28, -28, -27, -27, -27, -26, -26, -26, -25, -25, -25, -24, -24, -24, -23, -23, -22, -22, -22, -21, -21, -21, -20, -20, -20, -19, -19, -19, -18, -18, -18, -17, -17, -17, -16, -16, -16, -15, -15, -15, -14, -14, -14, -13, -13, -13, -12, -12, -11, -11, -11, -10, -10, -10, -9, -9, -9, -8, -8, -8, -7, -7, -7, -6, -6, -6, -5, -5, -5, -4, -4, -4, -3, -3, -3, -2, -2, -2, -1, -1, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 11, 11, 11, 12, 12, 12, 13, 13, 13, 14, 14, 14, 15, 15, 15, 16, 16, 16, 17, 17, 17, 18, 18, 18, 19, 19, 19, 20, 20, 20, 21, 21, 22, 22, 22, 23, 23, 23, 24, 24, 24, 25, 25, 25, 26, 26, 26, 27, 27, 27, 28, 28, 28, 29, 29, 29, 30, 30, 30, 31, 31, 31, 32, 32, 33, 33, 33, 34, 34, 34, 35, 35, 35, 36, 36, 36, 37, 37, 37, 38, 38, 38, 39, 39, 39, 40, 40, 40, 41, 41, 41, 42, 42, 42, 43, 43 };const int Table_fu2[256] = { -227, -226, -224, -222, -220, -219, -217, -215, -213, -212, -210, -208, -206, -204, -203, -201, -199, -197, -196, -194, -192, -190, -188, -187, -185, -183, -181, -180, -178, -176, -174, -173, -171, -169, -167, -165, -164, -162, -160, -158, -157, -155, -153, -151, -149, -148, -146, -144, -142, -141, -139, -137, -135, -134, -132, -130, -128, -126, -125, -123, -121, -119, -118, -116, -114, -112, -110, -109, -107, -105, -103, -102, -100, -98, -96, -94, -93, -91, -89, -87, -86, -84, -82, -80, -79, -77, -75, -73, -71, -70, -68, -66, -64, -63, -61, -59, -57, -55, -54, -52, -50, -48, -47, -45, -43, -41, -40, -38, -36, -34, -32, -31, -29, -27, -25, -24, -22, -20, -18, -16, -15, -13, -11, -9, -8, -6, -4, -2, 0, 1, 3, 5, 7, 8, 10, 12, 14, 15, 17, 19, 21, 23, 24, 26, 28, 30, 31, 33, 35, 37, 39, 40, 42, 44, 46, 47, 49, 51, 53, 54, 56, 58, 60, 62, 63, 65, 67, 69, 70, 72, 74, 76, 78, 79, 81, 83, 85, 86, 88, 90, 92, 93, 95, 97, 99, 101, 102, 104, 106, 108, 109, 111, 113, 115, 117, 118, 120, 122, 124, 125, 127, 129, 131, 133, 134, 136, 138, 140, 141, 143, 145, 147, 148, 150, 152, 154, 156, 157, 159, 161, 163, 164, 166, 168, 170, 172, 173, 175, 177, 179, 180, 182, 184, 186, 187, 189, 191, 193, 195, 196, 198, 200, 202, 203, 205, 207, 209, 211, 212, 214, 216, 218, 219, 221, 223, 225 };int len = channels * nWidth * nHeight;if (!pYuvBuf || !pRgbBuf)return false;const long nYLen = long(PIXELSIZE);const int nHfWidth = (nWidth >> 1);if (nYLen<1 || nHfWidth<1)return false;// Y dataunsigned char* yData = pYuvBuf;// v dataunsigned char* vData = &yData[nYLen];// u dataunsigned char* uData = &vData[nYLen >> 2];if (!uData || !vData)return false;int rgb[3];int i, j, m, n, x, y, pu, pv, py, rdif, invgdif, bdif;m = -nWidth;n = -nHfWidth;bool addhalf = true;for (y = 0; y<nHeight; y++) {m += nWidth;if (addhalf) {n += nHfWidth;addhalf = false;}else {addhalf = true;}for (x = 0; x<nWidth; x++) {i = m + x;j = n + (x >> 1);py = yData[i];// search tables to get rdif invgdif and bidifrdif = Table_fv1[vData[j]];    // fv1invgdif = Table_fu1[uData[j]] + Table_fv2[vData[j]]; // fu1+fv2bdif = Table_fu2[uData[j]]; // fu2rgb[0] = py + rdif;    // Rrgb[1] = py - invgdif; // Grgb[2] = py + bdif;    // Bj = nYLen - nWidth - m + x;i = (j << 1) + j;// copy this pixel to rgb datafor (j = 0; j<3; j++){if (rgb[j] >= 0 && rgb[j] <= 255) {pRgbBuf[i + j] = rgb[j];}else {pRgbBuf[i + j] = (rgb[j] < 0) ? 0 : 255;}}}}return true;
}void AVFrame2Img(AVFrame *pFrame, cv::Mat& img)
{int frameHeight = pFrame->height;int frameWidth = pFrame->width;int channels = 3;//输出图像分配内存img = cv::Mat::zeros(frameHeight, frameWidth, CV_8UC3);//反转图像pFrame->data[0] += pFrame->linesize[0] * (frameHeight - 1);pFrame->linesize[0] *= -1;pFrame->data[1] += pFrame->linesize[1] * (frameHeight / 2 - 1);pFrame->linesize[1] *= -1;pFrame->data[2] += pFrame->linesize[2] * (frameHeight / 2 - 1);pFrame->linesize[2] *= -1;//创建保存yuv数据的bufferuchar* pDecodedBuffer = (uchar*)malloc(frameHeight*frameWidth * sizeof(uchar)*channels);//从AVFrame中获取yuv420p数据,并保存到bufferint i, j, k;//拷贝y分量for (i = 0; i < frameHeight; i++){memcpy(pDecodedBuffer + frameWidth*i,pFrame->data[0] + pFrame->linesize[0] * i,frameWidth);}//拷贝u分量for (j = 0; j < frameHeight / 2; j++){memcpy(pDecodedBuffer + frameWidth*i + frameWidth / 2 * j,pFrame->data[1] + pFrame->linesize[1] * j,frameWidth / 2);}//拷贝v分量for (k = 0; k < frameHeight / 2; k++){memcpy(pDecodedBuffer + frameWidth*i + frameWidth / 2 * j + frameWidth / 2 * k,pFrame->data[2] + pFrame->linesize[2] * k,frameWidth / 2);}//将buffer中的yuv420p数据转换为RGB;YUV2RGB(pDecodedBuffer, frameWidth, frameHeight, channels, img.data);//释放bufferfree(pDecodedBuffer);
}int ffmpeg_decode()
{//1. 首先注册ffmpeg所有的编解码器av_register_all();//2.1 打开输入文件AVFormatContext* avFormatContext = avformat_alloc_context();int ret = avformat_open_input(&avFormatContext, in_filename, NULL, NULL);//读取文件头if( ret != 0 ){printf("Couldn't open input file.\n");return -1;}//2.2 获取流信息ret = avformat_find_stream_info(avFormatContext, NULL);if( ret < 0 ){//获取文件中的流信息printf("Couldn't get input file infomation.\n");return -1;}//2.3 打印文件的详细信息av_dump_format(avFormatContext, -1, in_filename, 0);//3. 获取对应流的索引号int videoStream_index = -1;//视频流索引号int audioStream_index = -1;//音频流索引号for(int i = 0; i < avFormatContext->nb_streams; i++ ){if(avFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO){videoStream_index = i;}if(avFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO){audioStream_index = i;}}if( videoStream_index == -1 || audioStream_index == -1 ){printf("Couldn't get input file videoStream or audioStream!\n");return -1;}//3.1 获取视频流的编码上下文AVCodecContext* videoCodecContext = avFormatContext->streams[videoStream_index]->codec;AVCodec* videoCodec = avcodec_find_decoder(videoCodecContext->codec_id);//通过上下文查找编码器if( videoCodec == NULL ){printf("Couldn't get AVCodec\n");return -1;}if( avcodec_open2(videoCodecContext, videoCodec, NULL) < 0 ){// 打开编码器printf("Couldn't open AVCodec\n");return -1;}//3.2 获取音频流的编解码上下文AVCodecContext* audioCodecContext = avFormatContext->streams[audioStream_index]->codec;AVCodec* audioCodec = avcodec_find_decoder(audioCodecContext->codec_id);//通过上下文查找编码器if( audioCodec == NULL ){printf("Couldn't get AVCodec\n");return -1;}if( avcodec_open2(audioCodecContext, audioCodec, NULL) < 0 ){// 打开编码器printf("Couldn't open AVCodec\n");return -1;}simple_rate = audioCodecContext->sample_rate;//采样率(音频)printf("simple_rate=%d\n",simple_rate);channel = audioCodecContext->channels;//声道数(音频)printf("channel=%d\n",channel);switch (audioCodecContext->sample_fmt) {//采样格式case AV_SAMPLE_FMT_U8:format = SND_PCM_FORMAT_U8;//8位printf("sample_fmt=%d\n",SND_PCM_FORMAT_U8);break;case AV_SAMPLE_FMT_S16:format = SND_PCM_FORMAT_S16_LE;//16位printf("sample_fmt=%d\n",SND_PCM_FORMAT_S16_LE);break;}/* 1. 打开pcm设备 */int rc = snd_pcm_open(&handle, "default",SND_PCM_STREAM_PLAYBACK, 0);if (rc < 0) {printf("open device failed\n");return FALSE;}/* 2. 分配一个硬件参数对象 */snd_pcm_hw_params_alloca(&params);snd_pcm_hw_params_any(handle, params);/* 使用默认值填充参数对象. *//* 设置硬件参数 */snd_pcm_hw_params_set_access(handle, params,access_mode);/* 交错模式 Interleaved mode */snd_pcm_hw_params_set_format(handle, params,format);/* 采样位数 Signed 16-bit little-endian format */snd_pcm_hw_params_set_channels(handle, params, channel);/* 通道数 Two channels (stereo) */snd_pcm_hw_params_set_rate_near(handle, params,&simple_rate, NULL);/* 采样率 44100 bits/second sampling rate (CD quality) */snd_pcm_hw_params_set_period_size_near(handle,params, &frames, NULL);//设置一个周期的多少帧rc = snd_pcm_hw_params(handle, params);/* 将设置好的参数写入驱动 */if (rc < 0) {printf("unable to set hw parameters: %s\n",snd_strerror(rc));return FALSE;}/* 获取一个周期的大小(帧) Use a buffer large enough to hold one period */snd_pcm_hw_params_get_period_size(params, &frames,NULL);printf("frames = %ld\n",frames);//4.1 开始读取数据AVPacket *avPacket = (AVPacket*)av_malloc(sizeof(AVPacket));//分配avPacketAVFrame *videoFrame = av_frame_alloc();//分配视频帧AVFrame *audioFrame = av_frame_alloc();//分配音频帧unsigned int buffer_size = 2*2*44100; //uint8_t *out_buffer = (uint8_t*)av_malloc(buffer_size);/*对解码的数据进行重新采样*/SwrContext *swrCtx = swr_alloc();enum AVSampleFormat in_sample_fmt = audioCodecContext->sample_fmt; //输入的采样格式enum AVSampleFormat out_sample_fmt = AV_SAMPLE_FMT_S16; //输出的采样格式 16bit PCMint in_sample_rate = audioCodecContext->sample_rate; //输入的采样率int out_sample_rate = 44100; //输出的采样率uint64_t in_ch_layout = audioCodecContext->channel_layout; //输入的声道布局uint64_t out_ch_layout = AV_CH_LAYOUT_STEREO; //输出的声道布局swr_alloc_set_opts(swrCtx,out_ch_layout,  //输出通道布局out_sample_fmt, //输出采样格式out_sample_rate,//输出采样率in_ch_layout,   //输入采样布局in_sample_fmt,  //输入采样格式in_sample_rate, //输入采样率0,    //logging level offsetNULL);//parent logging context, can be NULLswr_init(swrCtx);//创建图像格式转换上下文SwsContext *rgbSwsContext = sws_getContext(videoCodecContext->width,//原图像宽videoCodecContext->height,//原图像高videoCodecContext->pix_fmt,//原图像格式640,//目标图像宽480,//目标图像高AV_PIX_FMT_BGR24,//目标图像格式SWS_BICUBIC,//转换算法NULL,//原图像滤波NULL,//目标图像滤波NULL);//其他参数//转换后的图像数据缓冲区AVFrame* rgbFrame = av_frame_alloc();//图像缓冲区大小int rgbBuffer_size=avpicture_get_size(AV_PIX_FMT_BGR24, videoCodecContext->width,videoCodecContext->height);uint8_t* rgbBuffer=(uint8_t*)av_malloc(rgbBuffer_size*sizeof(uint8_t));avpicture_fill((AVPicture *)rgbFrame, rgbBuffer, AV_PIX_FMT_BGR24,videoCodecContext->width, videoCodecContext->height);while (av_read_frame(avFormatContext,avPacket) >= 0){if(avPacket->stream_index == videoStream_index){int got_picture_ptr = 0;//是否被解码成功解码的标识(0标识失败,非0表示成功)avcodec_decode_video2(videoCodecContext,videoFrame,&got_picture_ptr,avPacket);//解码视频帧if(0 != got_picture_ptr){
//                sws_scale(rgbSwsContext,  //转换上下文
//                          (uint8_t const * const *)videoFrame->data,  //输入图像数据
//                          videoFrame->linesize, //输入图像行尺寸(宽度)
//                          0, //第一列要处理的位置(从第几行数据开始处理)
//                          videoFrame->height, //输入图像高度
//                          rgbFrame->data,   //输出图像数据
//                          rgbFrame->linesize); //输出图像行尺寸(宽度)cv::Mat mat = cv::Mat(cv::Size(640,480),CV_8UC3);AVFrame2Img(videoFrame,mat);cv::imshow("picture",mat);cv::waitKey(15);std::cout<<"视频解码成功:"<<videoFrame->format<<std::endl;}}if(avPacket->stream_index == audioStream_index){int got_frame_ptr = 0;//是否被解码成功的标识avcodec_decode_audio4(audioCodecContext,audioFrame,&got_frame_ptr,avPacket);//解码音频帧if(0 != got_frame_ptr){swr_convert(swrCtx,  //转换上下文&out_buffer, //输出buffbuffer_size, //输出空间大小(const uint8_t**)audioFrame->data, //输入bufferaudioFrame->nb_samples); //采样数量/*将音频数据喂给设备*/rc = snd_pcm_writei(handle, out_buffer, audioFrame->nb_samples);if (rc == -EPIPE){printf("underrun occurred\n");/* EPIPE means underrun */snd_pcm_prepare(handle);}else if (rc < 0){printf("error from writei: %s\n",snd_strerror(rc));}printf("音频解码成功:frames=%d  -- nb_samples=%d\n",frames,audioFrame->nb_samples);}}}av_free(rgbBuffer);//释放缓冲区数据av_frame_free(&rgbFrame);//释放RGBFramesws_freeContext(rgbSwsContext);//释放图像转换上下文av_free(out_buffer);//释放音频采样缓冲区swr_free(&swrCtx);//释放音频采样上下文//关闭PCM设备snd_pcm_drain(handle);snd_pcm_close(handle);av_frame_free(&audioFrame);//释放audioFrameav_frame_free(&videoFrame);//释放videoFrameav_packet_free(&avPacket);//释放avPacketavcodec_close(videoCodecContext);//关闭音频解码器avcodec_close(audioCodecContext);//关闭视频解码器avformat_close_input(&avFormatContext);//关闭输入文件return 0;}int main()
{ffmpeg_decode();
}

这个例子使用qt creator写的,需要链接动态库什么的

//链接alsa库
LIBS += -lasound//加入ffmpeg头文件并链接ffmpeg库
INCLUDEPATH += /usr/local/ffmpeg/include
LIBS += -L/usr/local/ffmpeg/lib/ -lswscale -lswresample -lavutil -lavformat -lavfilter -lavdevice -lavcodec//加入opencv头文件并链接opencv库
INCLUDEPATH += /usr/local/include
LIBS += -L/usr/local/lib/  -lopencv_calib3d -lopencv_core -lopencv_features2d -lopencv_flann -lopencv_highgui -lopencv_imgcodecs -lopencv_imgproc -lopencv_ml -lopencv_objdetect -lopencv_photo -lopencv_shape -lopencv_stitching -lopencv_superres -lopencv_videoio -lopencv_video -lopencv_videostab

遇到的问题:

刚开始不知道怎么播放音频,所以就学了一下如何使用alsa播放pcm音频数据,找了一个pcm文件,用来测试alsa的代码是否正确。

后来用ffmpeg解码wav格式的数据,再用alsa播放音频发现可以正常播放,就直接拿视频文件来测试,使用ffmpeg解码视频文件后,发现音频播放时一直出现杂音不正常,后来才知道从视频文件解码后的音频流格式与设置alsa播放的格式不一样,需要对解码后的音频数据进行重新采样,然后再送到声卡设备播放,这样就能正常播放了。

ffmpeg解码视频文件并播放相关推荐

  1. 使用 FFmpeg 开发播放器基础--使用 ffmpeg 解码视频文件

    原:http://blog.chinaunix.net/uid-11344913-id-4282729.html 使用 ffmpeg 解码多媒体文件之前,首先需要了解一些基本的概念: 容器:多媒体文件 ...

  2. ffmpeg 解码视频(h264、mpeg2)输出yuv420p文件

    ffmpeg 解码视频(h264.mpeg2)输出yuv420p文件 播放yuv可以参考:ffplay -pixel_format yuv420p -video_size 768x320 -frame ...

  3. 从零实现简易播放器:4.ffmpeg 解码视频为yuv数据-使用avcodec_send_packet与avcodec_receive_frame

    ffmpeg 解码视频为yuv数据 作者:史正 邮箱:shizheng163@126.com 如有错误还请及时指正 如果有错误的描述给您带来不便还请见谅 如需交流请发送邮件,欢迎联系 csdn : h ...

  4. ffmpeg解码视频存为BMP文件

    ffmpeg解码视频存为BMP文件 分类: ffmpeg2011-07-28 12:13 8人阅读 评论(0) 收藏 举报 view plain #include <windows.h> ...

  5. ffmpeg音视频文件音视频流抽取,初步尝试人声分离

    文章目录 ffmpeg抽取音视频文件中的音频流 音频流类型 AAC与m4a的区别 AAC与mp3的区别 用ffmpeg查看视频的信息 用ffmpeg抽取AAC音频流 从AAC文件中获取音轨 音轨是什么 ...

  6. Java使用FFmpeg处理视频文件指南

    https://www.cnblogs.com/Dreamer-1/p/10394011.html Java使用FFmpeg处理视频文件指南 本文主要讲述如何使用Java + FFmpeg实现对视频文 ...

  7. 使用ffmpeg进行视频文件转换成FLV整理

    本系列文章导航 Windows下FFmpeg快速入门 ffmpeg参数解释 mencoder和ffmpeg参数详解(Java处理视频) Java 生成视频缩略图(ffmpeg) 使用ffmpeg进行视 ...

  8. hutool 读取扩展名文件_Python OpenCV视觉智能感知第一讲——读取摄像头或视频文件并播放显示...

    Python OpenCV视觉智能感知 第一讲--读取摄像头或视频文件并播放显示 本部分内容将深入.全面.详细地介绍如何使用Anaconda Python和OpenCV读取摄像头或视频文件,并进行播放 ...

  9. ffmpeg合并视频文件

    ffmpeg合并视频文件 创建列表文件filelist.txt,将需要合并的文件按顺序加入,内容如下所示: ffconcat version 1.0 file path/test_1.mp4 file ...

最新文章

  1. 抛弃VS Code,我还能用啥编辑器?| 技术头条
  2. Python3 如何优雅地使用正则表达式(详解五)
  3. VS2010配置opencv2.4.9
  4. 数据挖掘、生信、meta?临床医生如何选择
  5. Oracle ——如何确定性能差的 SQL
  6. leetcode —— 200. 岛屿数量
  7. qt中dll缺失以及无法启动程序的正确解决方法
  8. Centos7 设置静态IP后重启网络服务出错
  9. “万物控制”是物联网下一个挑战
  10. 机器学习:贝叶斯网络
  11. 完美替代Windows任务计划程序 —— 定时执行专家
  12. [读书笔记]高效15法则 谷歌、苹果都在用的深度工作法
  13. imovie导入媒体没有声音的解决办法
  14. 51单片机:LED流水灯(仿真+代码)
  15. Proxifier使用教程
  16. 楼市、二手、分期……2个月后iPhone X在中国将会一机难求?
  17. uniapp小程序微信支付功能
  18. 有宝妈在家赚钱的兼职副业吗?
  19. 使用Py6S计算瑞利反射率教程
  20. Java设计模式-策略模式作业

热门文章

  1. Nginx的动静分离实验
  2. AI医生的可信度有多高?这取决于用户对机器的态度……
  3. 使用 Yocto Project 构建自定义嵌入式 Linux 发行版
  4. C语言单链表基本操作,非常全面
  5. Python的内存管理以及垃圾回收
  6. AD20怎么铺铜?AD怎么铺铜?Altium designer怎么铺铜?AD明明已经铺铜怎么只有框?AD怎么铺不了铜皮?
  7. java项目文件_访问Java项目中的文件
  8. Linux上网本和XP,上网本Linux系统改装XP系统的经验分享.pdf
  9. easyui datagrid数据加载缓慢问题,优化方法
  10. python边际效用递减_王海洋 - 选择的力量