文章目录

  • 视频解封装
  • 背景音乐解封装
  • 封装音频滤镜
  • 添加背景音乐

在抖音、快手一类的短视频软件中,为了提升视频的效果,我们通常会给视频添加有趣的背景音乐。通过FFmpeg库我们也可以给视频添加背景音乐,这里就介绍一下如何通过FFmpeg库给视频添加背景音乐。添加背景音乐的原理图如下图所示:

首先我们解封装原始视频得到视频流和音频流,之后我们解封装音频文件得到音频流,通过对应的混音滤镜我们将原始的音频流和背景音乐的音频流进行混合,混合完毕之后我们再将混合的音频流和原始视频流进行封装,就得到了带背景音乐的视频。

视频解封装

对原始视频进行解封装,得到视频的上下文和对应的音视频流。

//打开视频文件
//@1文件地址 @2媒体上下文  @3音频解码器上下文  @4视频解码器上下文  @5音频流
//@6视频流
int openVideoFile(const char *file, AVFormatContext *&formatContext, AVCodecContext *&audioContext,AVCodecContext *&videoContext, AVStream *&audioStream, AVStream *&videoStream) {int ret = 0;ret = avformat_open_input(&formatContext, file, nullptr, nullptr);if (ret < 0) {return -1;}ret = avformat_find_stream_info(formatContext, nullptr);if (ret < 0) {return -1;}for (int j = 0; j < formatContext->nb_streams; ++j) {if (formatContext->streams[j]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {videoStream = formatContext->streams[j];AVCodec *codec = avcodec_find_decoder(videoStream->codecpar->codec_id);videoContext = avcodec_alloc_context3(codec);avcodec_parameters_to_context(videoContext, videoStream->codecpar);avcodec_open2(videoContext, codec, nullptr);}else if (formatContext->streams[j]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {audioStream = formatContext->streams[j];AVCodec *codec = avcodec_find_decoder(audioStream->codecpar->codec_id);audioContext = avcodec_alloc_context3(codec);avcodec_parameters_to_context(audioContext, audioStream->codecpar);avcodec_open2(audioContext, codec, nullptr);}if (videoStream && audioStream) break;}if (!videoStream) {return -1;}if (!audioContext) {return -1;}return 0;
}

背景音乐解封装

视频文件解封装完毕之后,我们对背景音乐进行解封装,得到对应的音频流,用来和视频文件中的音频流进行混音处理。

//打开音频文件
//@1文件地址 @2媒体上下文  @3音频解码器上下文  @4音频流
int openAudioFile(const char *file, AVFormatContext *&formatContext, AVCodecContext *&audioContext,AVStream *&audioStream) {int ret = 0;ret = avformat_open_input(&formatContext, file, nullptr, nullptr);if (ret < 0) {return -1;}ret = avformat_find_stream_info(formatContext, nullptr);if (ret < 0) {return -1;}for (int j = 0; j < formatContext->nb_streams; ++j) {if (formatContext->streams[j]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {audioStream = formatContext->streams[j];AVCodec *codec = avcodec_find_decoder(audioStream->codecpar->codec_id);audioContext = avcodec_alloc_context3(codec);avcodec_parameters_to_context(audioContext, audioStream->codecpar);avcodec_open2(audioContext, codec, nullptr);}}if (!audioStream) {return -1;}return 0;
}

封装音频滤镜

为了方便使用音频滤镜,这里我们对音频滤镜的操作进行一下封装,对应的封装类如下:

//audio_filter.h
#ifndef VIDEOBOX_AUDIO_FILTER_H
#define VIDEOBOX_AUDIO_FILTER_Hextern "C"
{#include <libavcodec/avcodec.h>
#include <libavfilter/avfilter.h>
#include <libavfilter/buffersrc.h>
#include <libavfilter/buffersink.h>
#include <libavutil/channel_layout.h>
#include <libavutil/opt.h>
}struct AudioConfig
{//采样的格式AVSampleFormat format = AV_SAMPLE_FMT_NONE;//采样率int sample_rate = 0;//声道的布局uint64_t ch_layout = AV_CH_LAYOUT_STEREO;//时间基AVRational timebase = { 1, 1 };AudioConfig(AVSampleFormat format, int sample_rate, uint64_t ch_layout, AVRational timebase) {this->format = format;this->sample_rate = sample_rate;this->ch_layout = ch_layout;this->timebase = timebase;}};class AudioFilter
{protected://输出槽AVFilterContext *buffersink_ctx = nullptr;//输入缓存1AVFilterContext *buffersrc1_ctx = nullptr;//输入缓存2AVFilterContext *buffersrc2_ctx = nullptr;//滤镜图AVFilterGraph *filter_graph;//滤镜描述const char *description = nullptr;
public:AudioFilter() = default;//创建音频滤镜//@paramer filter_descr 滤镜的描述//@paramer inConfig1 输入音频配置1//@paramer inConfig2 输入音频配置2//@paramer outConfig 输出音频配置//@return 0正常 其它错误int create(const char *filter_descr, AudioConfig* inConfig1, AudioConfig* inConfig2, AudioConfig* outConfig);//创建音频滤镜//@paramer filter_descr 滤镜的描述//@paramer inConfig1 输入音频配置1//@paramer outConfig 输出音频配置//@return 0正常 其它错误int create(const char *filter_descr, AudioConfig* inConfig, AudioConfig* outConfig);//输出音频滤镜流程图void dumpGraph();//创建音频滤镜//@paramer input1 输入帧1//@paramer input2 输入帧2//@paramer reuslt 输出帧//@return 0正常 其它错误int filter(AVFrame *input1, AVFrame* input2, AVFrame* result);//添加输入帧1int addInput1(AVFrame * input);//添加输入帧2int addInput2(AVFrame* input);//获取处理之后的结果int getFrame(AVFrame* result);//销毁滤镜void destroy();
};#endif
//audio_filter.cpp
#include "audio_filter.h"int AudioFilter::create(const char *filter_descr, AudioConfig *inConfig1,AudioConfig *inConfig2, AudioConfig *outConfig) {this->description = filter_descr;char args[512];int ret = 0;//设置缓存滤镜和输出滤镜const AVFilter *buffersrc = avfilter_get_by_name("abuffer");const AVFilter *buffersink = avfilter_get_by_name("abuffersink");AVFilterInOut *output = avfilter_inout_alloc();AVFilterInOut *inputs[2];inputs[0] = avfilter_inout_alloc();inputs[1] = avfilter_inout_alloc();char ch_layout[128];int nb_channels = 0;int pix_fmts[] = {outConfig->format, AV_SAMPLE_FMT_NONE };//创建滤镜容器filter_graph = avfilter_graph_alloc();if (!inputs[0] || !inputs[1] || !output || !filter_graph) {ret = AVERROR(ENOMEM);goto end;}//声道布局nb_channels = av_get_channel_layout_nb_channels(inConfig1->ch_layout);av_get_channel_layout_string(ch_layout, sizeof(ch_layout), nb_channels, inConfig1->ch_layout);//输入缓存1的配置snprintf(args, sizeof(args),"sample_rate=%d:sample_fmt=%d:channel_layout=%s:channels=%d:time_base=%d/%d",inConfig1->sample_rate,inConfig1->format,ch_layout,nb_channels,inConfig1->timebase.num,inConfig1->timebase.den);ret = avfilter_graph_create_filter(&buffersrc1_ctx, buffersrc, "in1",args, nullptr, filter_graph);if (ret < 0) {goto end;}//输入缓存2的配置nb_channels = av_get_channel_layout_nb_channels(inConfig2->ch_layout);av_get_channel_layout_string(ch_layout, sizeof(ch_layout), nb_channels, inConfig2->ch_layout);snprintf(args, sizeof(args),"sample_rate=%d:sample_fmt=%d:channel_layout=%s:channels=%d:time_base=%d/%d",inConfig2->sample_rate,inConfig2->format,ch_layout,nb_channels,inConfig2->timebase.num,inConfig2->timebase.den);ret = avfilter_graph_create_filter(&buffersrc2_ctx, buffersrc, "in2",args, nullptr, filter_graph);if (ret < 0) {goto end;}//创建输出ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",nullptr, nullptr, filter_graph);if (ret < 0){goto end;}ret = av_opt_set_int_list(buffersink_ctx, "sample_fmts", pix_fmts,AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN);if (ret < 0) {goto end;}inputs[0]->name = av_strdup("in1");inputs[0]->filter_ctx = buffersrc1_ctx;inputs[0]->pad_idx = 0;inputs[0]->next = inputs[1];inputs[1]->name = av_strdup("in2");inputs[1]->filter_ctx = buffersrc2_ctx;inputs[1]->pad_idx = 0;inputs[1]->next = nullptr;output->name = av_strdup("out");output->filter_ctx = buffersink_ctx;output->pad_idx = 0;output->next = nullptr;//引脚的输出和输入与滤镜容器的相反avfilter_graph_set_auto_convert(filter_graph, AVFILTER_AUTO_CONVERT_NONE);if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_descr,&output, inputs, nullptr)) < 0) {goto end;}//使滤镜容器生效if ((ret = avfilter_graph_config(filter_graph, nullptr)) < 0) {goto end;}end:avfilter_inout_free(inputs);avfilter_inout_free(&output);return ret;
}int AudioFilter::create(const char *filter_descr, AudioConfig *inConfig, AudioConfig *outConfig)
{this->description = filter_descr;char args[512];int ret = 0;const AVFilter *buffersrc = avfilter_get_by_name("abuffer");const AVFilter *buffersink = avfilter_get_by_name("abuffersink");AVFilterInOut *output = avfilter_inout_alloc();AVFilterInOut *input = avfilter_inout_alloc();char ch_layout[128];int nb_channels = 0;int pix_fmts[] = { outConfig->format, AV_SAMPLE_FMT_NONE };filter_graph = avfilter_graph_alloc();if (!input || !output || !filter_graph) {ret = AVERROR(ENOMEM);goto end;}//缓存源和槽定义nb_channels = av_get_channel_layout_nb_channels(inConfig->ch_layout);av_get_channel_layout_string(ch_layout, sizeof(ch_layout), nb_channels, inConfig->ch_layout);snprintf(args, sizeof(args),"sample_rate=%d:sample_fmt=%d:channel_layout=%s:channels=%d:time_base=%d/%d",inConfig->sample_rate,inConfig->format,ch_layout,nb_channels,inConfig->timebase.num,inConfig->timebase.den);ret = avfilter_graph_create_filter(&buffersrc1_ctx, buffersrc, "in1",args, nullptr, filter_graph);if (ret < 0) {goto end;}ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",nullptr, nullptr, filter_graph);if (ret < 0) {goto end;}ret = av_opt_set_int_list(buffersink_ctx, "sample_fmts", pix_fmts,AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN);if (ret < 0) {goto end;}input->name = av_strdup("in");input->filter_ctx = buffersrc1_ctx;input->pad_idx = 0;input->next = nullptr;output->name = av_strdup("out");output->filter_ctx = buffersink_ctx;output->pad_idx = 0;output->next = nullptr;if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_descr,&output, &input, nullptr)) < 0) {goto end;}if ((ret = avfilter_graph_config(filter_graph, nullptr)) < 0) {goto end;}end:avfilter_inout_free(&input);avfilter_inout_free(&output);return ret;
}void AudioFilter::dumpGraph() {printf("%s\n%s", description, avfilter_graph_dump(filter_graph, nullptr));
}void AudioFilter::destroy() {if (filter_graph)avfilter_graph_free(&filter_graph);filter_graph = nullptr;}int AudioFilter::filter(AVFrame *input1, AVFrame *input2, AVFrame *result)
{int ret = av_buffersrc_add_frame_flags(buffersrc1_ctx, input1, AV_BUFFERSRC_FLAG_KEEP_REF);if (ret < 0) {return ret;}ret = av_buffersrc_add_frame_flags(buffersrc2_ctx, input2, AV_BUFFERSRC_FLAG_KEEP_REF);if (ret < 0) {return ret;}return av_buffersink_get_samples(buffersink_ctx, result, result->nb_samples);
}int AudioFilter::getFrame(AVFrame *result) {if (filter_graph != nullptr){int ret = av_buffersink_get_samples(buffersink_ctx, result, result->nb_samples);return ret;}return -1;
}int AudioFilter::addInput1(AVFrame *input) {if (filter_graph != nullptr){return av_buffersrc_add_frame_flags(buffersrc1_ctx, input, AV_BUFFERSRC_FLAG_KEEP_REF);}return - 1;
}int AudioFilter::addInput2(AVFrame *input) {if (filter_graph != nullptr){return av_buffersrc_add_frame_flags(buffersrc2_ctx, input, AV_BUFFERSRC_FLAG_KEEP_REF);}return -1;
}

添加背景音乐

完成上述操作之后,我们就可以给对应的视频添加背景音乐了。在指定背景音乐的时候,我们一定要注意调整背景音乐的音量,防止背景音乐喧宾夺主。添加背景音乐的操作如下所示:

//给视频添加背景音乐
//@1输出视频地址  @2输入视频地址 @3背景音乐地址 @4背景音乐的音量
int add_bgm_to_video(const char *output_filename, const char *input_filename, const char *bgm_filename, float bgm_volume)
{int ret = 0;//各种解码器的上下文AVFormatContext *outFmtContext = nullptr;AVFormatContext *inFmtContext = nullptr;AVFormatContext *bgmFmtContext = nullptr;AVCodecContext *inAudioContext = nullptr;AVCodecContext *inVideoContext = nullptr;AVCodecContext *outAudioContext = nullptr;AVCodecContext *bgmAudioContext = nullptr;AudioFilter filter;//音视频流信息AVStream *inAudioStream = nullptr;AVStream *inVideoStream = nullptr;AVStream *outAudioStream = nullptr;AVStream *outVideoStream = nullptr;AVStream *bgmAudioStream = nullptr;AVCodec *audioCodec = nullptr;//打开视频文件获取上下文ret = openVideoFile(input_filename, inFmtContext, inAudioContext, inVideoContext, inAudioStream,inVideoStream);if (ret < 0) return ret;//打开音频文件获取上下文ret = openAudioFile(bgm_filename, bgmFmtContext, bgmAudioContext, bgmAudioStream);if (ret < 0) return ret;//创建输出的上下文ret = avformat_alloc_output_context2(&outFmtContext, nullptr, nullptr, output_filename);audioCodec = avcodec_find_encoder(inAudioStream->codecpar->codec_id);//创建输出视频流,不需要编码outVideoStream = avformat_new_stream(outFmtContext, nullptr);if (!outVideoStream) {return -1;}outVideoStream->id = outFmtContext->nb_streams - 1;ret = avcodec_parameters_copy(outVideoStream->codecpar, inVideoStream->codecpar);if (ret < 0) {return -1;}outVideoStream->codecpar->codec_tag = 0;//创建音频流,需要编码outAudioStream = avformat_new_stream(outFmtContext, audioCodec);if (!outAudioStream) {return -1;}outAudioStream->id = outFmtContext->nb_streams - 1;//设置音频参数outAudioContext = avcodec_alloc_context3(audioCodec);avcodec_parameters_to_context(outAudioContext, inAudioStream->codecpar);outAudioContext->codec_type = inAudioContext->codec_type;outAudioContext->codec_id = inAudioContext->codec_id;outAudioContext->sample_fmt = inAudioContext->sample_fmt;outAudioContext->sample_rate = inAudioContext->sample_rate;outAudioContext->bit_rate = inAudioContext->bit_rate;outAudioContext->channel_layout = inAudioContext->channel_layout;outAudioContext->channels = inAudioContext->channels;outAudioContext->time_base = AVRational{ 1, outAudioContext->sample_rate };outAudioContext->flags |= AV_CODEC_FLAG_LOW_DELAY;outAudioStream->time_base = outAudioContext->time_base;if (outFmtContext->oformat->flags & AVFMT_GLOBALHEADER) {outAudioContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;}//打开编码器ret = avcodec_open2(outAudioContext, audioCodec, nullptr);if (ret < 0) {return -1;}ret = avcodec_parameters_from_context(outAudioStream->codecpar, outAudioContext);if (ret < 0) {return -1;}//拷贝原始数据av_dict_copy(&outFmtContext->metadata, inFmtContext->metadata, 0);av_dict_copy(&outVideoStream->metadata, inVideoStream->metadata, 0);av_dict_copy(&outAudioStream->metadata, inAudioStream->metadata, 0);//设置输入输出配置AudioConfig inputConfig{ inAudioContext->sample_fmt,inAudioContext->sample_rate,inAudioContext->channel_layout,inAudioContext->time_base };AudioConfig bgmConfig{ bgmAudioContext->sample_fmt,bgmAudioContext->sample_rate,bgmAudioContext->channel_layout,bgmAudioContext->time_base };AudioConfig outputConfig{ outAudioContext->sample_fmt,outAudioContext->sample_rate,outAudioContext->channel_layout,outAudioContext->time_base };//通过滤镜修改音频的音量和采样率char filter_description[256];char ch_layout[128];av_get_channel_layout_string(ch_layout, 128, av_get_channel_layout_nb_channels(outAudioContext->channel_layout),outAudioContext->channel_layout);snprintf(filter_description, sizeof(filter_description),"[in1]aresample=%d[a1];[in2]aresample=%d,volume=volume=%f[a2];[a1][a2]amix[out]",outAudioContext->sample_rate,outAudioContext->sample_rate,bgm_volume);filter.create(filter_description, &inputConfig, &bgmConfig, &outputConfig);filter.dumpGraph();if (!(outFmtContext->oformat->flags & AVFMT_NOFILE)) {ret = avio_open(&outFmtContext->pb, output_filename, AVIO_FLAG_WRITE);if (ret < 0) {return -1;}}//写文件头ret = avformat_write_header(outFmtContext, nullptr);if (ret < 0) {return -1;}AVFrame *inputFrame = av_frame_alloc();AVFrame *bgmFrame = av_frame_alloc();AVFrame *mixFrame = av_frame_alloc();do {AVPacket packet{ nullptr };av_init_packet(&packet);ret = av_read_frame(inFmtContext, &packet);if (ret == AVERROR_EOF){break;}else if (ret < 0){break;}if (packet.flags & AV_PKT_FLAG_DISCARD) continue;if (packet.stream_index == inVideoStream->index) {packet.stream_index = outVideoStream->index;av_packet_rescale_ts(&packet, inVideoStream->time_base, outVideoStream->time_base);packet.duration = av_rescale_q(packet.duration, inVideoStream->time_base, outVideoStream->time_base);packet.pos = -1;ret = av_interleaved_write_frame(outFmtContext, &packet);}else if (packet.stream_index == inAudioStream->index){packet.stream_index = outAudioStream->index;av_packet_rescale_ts(&packet, inAudioStream->time_base, outAudioStream->time_base);// decode input frameret = avcodec_send_packet(inAudioContext, &packet);ret = avcodec_receive_frame(inAudioContext, inputFrame);if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {continue;}else if (ret < 0) {return -1;}filter.addInput1(inputFrame);//添加背景音乐对应的音频帧decode:int got_bgm = 0;while (true) {AVPacket bgmPacket{ nullptr };av_init_packet(&bgmPacket);ret = av_read_frame(bgmFmtContext, &bgmPacket);if (ret == AVERROR_EOF) {av_seek_frame(bgmFmtContext, bgmAudioStream->index, 0, 0);continue;}else if (ret != 0) {break;}if (bgmPacket.stream_index == bgmAudioStream->index) {avcodec_send_packet(bgmAudioContext, &bgmPacket);ret = avcodec_receive_frame(bgmAudioContext, bgmFrame);if (ret == 0) {got_bgm = 1;break;}}}//读取混合之后的音频帧filter.addInput2(bgmFrame);int got_mix = 0;if (got_bgm) {ret = filter.getFrame(mixFrame);got_mix = ret == 0;}if (!got_mix) {goto decode;}mixFrame->pts = inputFrame->pts;av_frame_unref(inputFrame);av_frame_unref(bgmFrame);avcodec_send_frame(outAudioContext, mixFrame);//将混合之后的音频帧写入到文件中encode:AVPacket mixPacket{ nullptr };ret = avcodec_receive_packet(outAudioContext, &mixPacket);if (ret == 0) {mixPacket.stream_index = outAudioStream->index;ret = av_interleaved_write_frame(outFmtContext, &mixPacket);goto encode;}else if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {}else {return -1;}}} while (true);filter.destroy();av_write_trailer(outFmtContext);if (!(outFmtContext->oformat->flags & AVFMT_NOFILE)) {avio_closep(&outFmtContext->pb);}//清理分配之后的数据av_frame_free(&inputFrame);av_frame_free(&bgmFrame);av_frame_free(&mixFrame);avformat_free_context(outFmtContext);avformat_free_context(inFmtContext);avformat_free_context(bgmFmtContext);avcodec_free_context(&inAudioContext);avcodec_free_context(&inVideoContext);avcodec_free_context(&bgmAudioContext);avcodec_free_context(&outAudioContext);return 0;
}int main(int argc, char* argv[])
{if (argc != 4){return -1;}std::string video_input = std::string(argv[1]);  //视频输入地址std::string bgm_input = std::string(argv[2]);    //背景音乐地址std::string video_output = std::string(argv[3]);  //视频输出add_bgm_to_video(video_output.c_str(), video_input.c_str(), bgm_input.c_str(), 0.5);
}

FFmpeg进阶: 给视频添加背景音乐相关推荐

  1. FFmpeg进阶:给视频添加模糊滤镜

    文章目录 全部画面模糊处理 部分画面模糊处理 在视频的录制过程中很多时候为了保护视频中某些敏感信息不泄露,我们会给对应的视频部位添加模糊处理.这时候就需要用到FFmpeg的模糊滤镜了,模糊滤镜有很多种 ...

  2. Android之FFmpeg(3)--添加为视频添加背景音乐

    既然开始了FFmpeg开发,那不做个Demo,怎么知道你的猜想,你的学的东西到底对不对呢?嗯,所以我们今儿个做个demo,为视频添加背景音乐! Activity的代码如下(其实就一个按钮): 加载na ...

  3. java操作ffmpeg为视频添加背景音乐

    最近学习仿抖音微信小程序遇到一个坑,视频中使用以下语句为视频添加背景音乐 ffmpeg.exe -i input.mp4 -i 音乐.mp3 -t 7 -y 新视频.mp4 ,然而我怎么尝试都不行,上 ...

  4. python设置背景音乐_python给视频添加背景音乐并改变音量的具体方法

    用到给视频添加背景音乐,并改变音量.记录一下,与碰到同样问题的朋友共享. import subprocess inmp4='E:/PycharmProjects/untitled2/hecheng/1 ...

  5. Golang 给视频添加背景音乐 | Golang工具

    目录 前言 环境依赖 代码 总结 前言 本文提供给视频添加背景音乐,一如既往的实用主义. 主要也是学习一下golang使用ffmpeg工具的方式. 环境依赖 ffmpeg环境安装,可以参考我的另一篇文 ...

  6. python给视频添加声音_python给视频添加背景音乐并改变音量的具体方法

    用到给视频添加背景音乐,并改变音量.记录一下,与碰到同样问题的朋友共享. import subprocess inmp4='E:/PycharmProjects/untitled2/hecheng/1 ...

  7. Java 给视频添加背景音乐 | Java工具

    目录 前言 Maven依赖 环境依赖 代码 总结 前言 本文提供给视频添加背景音乐的java工具,一如既往的实用主义. Maven依赖 <dependency><groupId> ...

  8. Python 给视频添加背景音乐 | Python工具

    目录 前言 环境依赖 代码 总结 前言 本文提供给视频添加背景音乐的python工具,一如既往的实用主义. 环境依赖 ffmpeg环境安装,可以参考我的另一篇文章:windows ffmpeg安装部署 ...

  9. python给视频加背景音乐_python给视频添加背景音乐并改变音量的具体方法

    用到给视频添加背景音乐,并改变音量.记录一下,与碰到同样问题的朋友共享. import subprocess inmp4='E:/PycharmProjects/untitled2/hecheng/1 ...

  10. 抖音app开发时,在录制视频添加背景音乐功能实现流程

    抖音app之所以"横扫"整个短视频领域,最主要的还是占据了短视频玩法的"先机",其中在录制视频时添加背景音乐,是很多用户都非常喜欢的步骤,但是在抖音app开发时 ...

最新文章

  1. tplink路由器dns服务器未响应,小米路由器AC2100恢复出厂设置的方法
  2. javascript之parseInt的用法分析
  3. UPS不断电割接流程和步骤
  4. 数跑科技联合阿里云创造基于云原生的无边界数字新体验
  5. Quartz集群部署
  6. nginx_hash表
  7. jquery中常容易出错的checkbox的获得值
  8. JavaScript使用技巧(2)
  9. 编译器错误不能找到元数据文件
  10. 一个盒子两个摄像头,你就不知道怎么办了?
  11. c语言课程设计报告书通讯录,C语言课程设计学生通讯录管理系统设计
  12. 供应链管理环境下,企业采购管理面临哪些要求?
  13. Q_D以及Q_Q指针理解
  14. 快来看看!飞机上的Wi-Fi黑科技
  15. shell脚本 -d 是目录文件,那么-e,-f等说明
  16. 高通骁龙835无线充电手机_高通的快速充电5可以在15分钟内为手机充电
  17. GitHub 从零开始的保姆级使用教程
  18. permute()--对维度进行换位
  19. 九九重阳节,小草义工到福利院开展尊老敬老爱老助老的活动
  20. PHP 自学全套书籍推荐

热门文章

  1. 系统之家启动维护光盘v3.0[小盘·贺岁篇]
  2. 群体遗传学瓶颈效应bottleneck effect
  3. 20sccm_SCCM安装及配置过程总结
  4. BZOJ 3505 【CQOI2014】 数三角形
  5. C# EXCEL的帮助类,仅使用NPOI,不用安装Office
  6. go 实现从服务器导出excel 到浏览器
  7. 电信光纤猫与无线路由器连接
  8. 1005. F.Snowy Roads最小生成树Kruskal算法
  9. 汉化破解:ASPack 2.12 -gt; Alexey Solodovnikov -- Dump
  10. php 微信公共平台开发