ffmpeg学习(10)音视频文件muxer(1)封装格式转换 中介绍了媒体文件的封装格式转换,ffmpeg学习(11)音视频文件muxer(2)多输入混流 中介绍了音视频的混流,本文介绍基于ffmpeg的转码,将一种视频格式(编码格式、封装格式)转换为另一种视频格式,该过程先解码、再编码,以下图为例说明转码流程。

输入视频的封装格式是MP4,视频编码标准是H.264,音频编码标准是AAC;输出视频的封装格式是AVI,视频编码标准是MPEG4,音频编码标准是MP3。首先从输入视频中分离出视频码流和音频压缩码流,然后分别将视频码流和音频码流进行解码,获取到非压缩的像素数据/音频采样数据,接着将非压缩的像素数据/音频采样数据重新进行编码,获得重新编码后的视频码流和音频码流,最后将视频码流和音频码流重新封装成一个文件。

在转码过程中,解码后的非压缩的像素数据/音频采样数据可能需要进行图像变换/重采样才能再送入编码器中进行编码。按如下两种方式分文介绍:

  • 1、使用sws_scale()、swr_convert()函数
    先对解码后的非压缩数据先进行转换,再进行编码。转换功能单一,结构流程简单,但是转换代码复杂。
  • 2、使用AVFilterGraph
    可根据输入输出的要求创建一个AVFilterGraph,可以实现复杂功能。对解码的每一帧数据进行filter,在将处理的结果进行编码。新加入的AVFilterGraph创建初始化复杂,但是其使用方式简单。

前文 ffmpeg学习(12)音视频转码(1)使用sws、swr 已介绍第一种方式,本文介绍第二种方式流程图如下


在流程图中,转换部分代码从使用sws_scale/swr_convert换成了filter处理的函数,有关libavfilter库使用,见文章 ffmpeg学习 libavfilter使用。

示例代码

代码结构和前文一致,音频未做任何改变,视频在画面左上角叠加了原画面缩放4倍的结果。

/*转码,非压缩数据转换使用filter函数
*/
#include <stdio.h>#ifdef __cplusplus
extern "C" {#endif  #include "libavformat/avformat.h"#include "libavfilter/avfilter.h"
#include "libavfilter/buffersink.h"
#include "libavfilter/buffersrc.h"#include "libavutil/opt.h"#ifdef __cplusplus
}
#endif// 输入、输出封装上下文
static AVFormatContext *ifmt_ctx;
static AVFormatContext *ofmt_ctx;// filter上下文
typedef struct FilteringContext {AVFilterContext *buffersink_ctx;AVFilterContext *buffersrc_ctx;AVFilterGraph *filter_graph;
} FilteringContext;static FilteringContext *filter_ctx; // 数组指针,元素个数对应流个数// 编解码器上下文对,便于参数传递
typedef struct StreamContext {AVCodecContext *dec_ctx;AVCodecContext *enc_ctx;
} StreamContext;static StreamContext *stream_ctx; // 数组指针,元素个数对应流个数static int open_input_file(const char *filename)
{int ret;ifmt_ctx = NULL;if((ret = avformat_open_input(&ifmt_ctx, filename, NULL, NULL)) < 0) {av_log(NULL, AV_LOG_ERROR, "Cannot open input file %s\n", filename);return ret;}if((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0) {av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");return ret;}stream_ctx = (StreamContext *)av_mallocz_array(ifmt_ctx->nb_streams, sizeof(StreamContext));if(!stream_ctx) {return AVERROR(ENOMEM);}for(unsigned int i = 0; i < ifmt_ctx->nb_streams; i++) {AVStream *stream = ifmt_ctx->streams[i];AVCodec *dec = avcodec_find_decoder(stream->codecpar->codec_id);if(!dec) {av_log(NULL, AV_LOG_ERROR, "Failed to find decoder for stream #%u\n", i);return AVERROR_DECODER_NOT_FOUND;}AVCodecContext *codec_ctx = avcodec_alloc_context3(dec);if(!codec_ctx) {av_log(NULL, AV_LOG_ERROR, "Failed to allocate the decoder context for stream #%u\n", i);return AVERROR(ENOMEM);}ret = avcodec_parameters_to_context(codec_ctx, stream->codecpar);if(ret < 0) {av_log(NULL, AV_LOG_ERROR, "Failed to copy decoder parameters to input decoder context ""for stream #%u\n", i);return ret;}/* Reencode video & audio and remux subtitles etc. */if(codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO || codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {if(codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO)codec_ctx->framerate = av_guess_frame_rate(ifmt_ctx, stream, NULL);else {// 去除警告 Could not update timestamps for skipped samples (AUDIO  AAC)codec_ctx->pkt_timebase = stream->time_base;}/* Open decoder */if((ret = avcodec_open2(codec_ctx, dec, NULL)) < 0) {av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);return ret;}}stream_ctx[i].dec_ctx = codec_ctx;}av_dump_format(ifmt_ctx, 0, filename, 0);return 0;
}static int open_output_file(const char *filename)
{int ret;ofmt_ctx = NULL;avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, filename);if(!ofmt_ctx) {av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");return AVERROR_UNKNOWN;}for(unsigned int i = 0; i < ifmt_ctx->nb_streams; i++) {AVStream *in_stream = ifmt_ctx->streams[i];AVStream *out_stream = avformat_new_stream(ofmt_ctx, NULL);if(!out_stream) {av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");return AVERROR_UNKNOWN;}AVCodecContext *dec_ctx = stream_ctx[i].dec_ctx;if(dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO || dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {AVCodec *encoder = avcodec_find_encoder(dec_ctx->codec_id);if(!encoder) {av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");return AVERROR_INVALIDDATA;}AVCodecContext *enc_ctx = avcodec_alloc_context3(encoder);if(!enc_ctx) {av_log(NULL, AV_LOG_FATAL, "Failed to allocate the encoder context\n");return AVERROR(ENOMEM);}/* encoder parameters */if(dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {enc_ctx->height = dec_ctx->height;enc_ctx->width = dec_ctx->width;enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;/* take first format from list of supported formats */enc_ctx->pix_fmt = encoder->sample_fmts ? encoder->pix_fmts[0] : dec_ctx->pix_fmt;/* video time_base can be set to whatever is handy and supported by encoder */enc_ctx->time_base = av_inv_q(dec_ctx->framerate);}else{ // AVMEDIA_TYPE_AUDIOenc_ctx->sample_rate = dec_ctx->sample_rate;enc_ctx->channel_layout = dec_ctx->channel_layout;enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);/* take first format from list of supported formats */enc_ctx->sample_fmt = encoder->sample_fmts ? encoder->sample_fmts[0] : enc_ctx->sample_fmt;enc_ctx->time_base = AVRational{1, enc_ctx->sample_rate };}                /* Third parameter can be used to pass settings to encoder */if((ret = avcodec_open2(enc_ctx, encoder, NULL)) < 0){av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);return ret;}if((ret = avcodec_parameters_from_context(out_stream->codecpar, enc_ctx)) < 0){av_log(NULL, AV_LOG_ERROR, "Failed to copy encoder parameters to output stream #%u\n", i);return ret;}if(ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;out_stream->time_base = enc_ctx->time_base;stream_ctx[i].enc_ctx = enc_ctx; // 音视频}else if(dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN){av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i);return AVERROR_INVALIDDATA;}else {/* if this stream must be remuxed */ret = avcodec_parameters_copy(out_stream->codecpar, in_stream->codecpar);if(ret < 0) {av_log(NULL, AV_LOG_ERROR, "Copying parameters for stream #%u failed\n", i);return ret;}out_stream->time_base = in_stream->time_base;}}av_dump_format(ofmt_ctx, 0, filename, 1);if(!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {if((ret = avio_open(&ofmt_ctx->pb, filename, AVIO_FLAG_WRITE)) < 0) {av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", filename);return ret;}}/* init muxer, write output file header */if((ret = avformat_write_header(ofmt_ctx, NULL)) < 0) {av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");return ret;}
}static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,AVCodecContext *enc_ctx, const char *filter_spec)
{int ret = 0;char args[512];AVFilterGraph *filter_graph = avfilter_graph_alloc();AVFilterContext *buffersrc_ctx = NULL;AVFilterContext *buffersink_ctx = NULL;const AVFilter *buffersrc = NULL;const AVFilter *buffersink = NULL;AVFilterInOut *outputs = avfilter_inout_alloc();AVFilterInOut *inputs = avfilter_inout_alloc();if(!outputs || !inputs || !filter_graph) {ret = AVERROR(ENOMEM);goto end;}if(dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {buffersrc = avfilter_get_by_name("buffer");buffersink = avfilter_get_by_name("buffersink");if(!buffersrc || !buffersink) {av_log(NULL, AV_LOG_ERROR, "video filtering source or sink element not found\n");ret = AVERROR_UNKNOWN;goto end;}// buffer filtersnprintf(args, sizeof(args),"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,dec_ctx->time_base.num, dec_ctx->time_base.den,dec_ctx->sample_aspect_ratio.num,dec_ctx->sample_aspect_ratio.den);ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in", args, NULL, filter_graph);if(ret < 0) {av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");goto end;}// buffersink filterret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out", NULL, NULL, filter_graph);if(ret < 0) {av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");goto end;}ret = av_opt_set_bin(buffersink_ctx, "pix_fmts", (uint8_t*)&enc_ctx->pix_fmt, sizeof(enc_ctx->pix_fmt),AV_OPT_SEARCH_CHILDREN);if(ret < 0) {av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");goto end;}}else if(dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {buffersrc = avfilter_get_by_name("abuffer");buffersink = avfilter_get_by_name("abuffersink");if(!buffersrc || !buffersink) {av_log(NULL, AV_LOG_ERROR, "audio filtering source or sink element not found\n");ret = AVERROR_UNKNOWN;goto end;}// abuffer filterif(!dec_ctx->channel_layout)dec_ctx->channel_layout = av_get_default_channel_layout(dec_ctx->channels);snprintf(args, sizeof(args),"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%x",dec_ctx->time_base.num, dec_ctx->time_base.den, dec_ctx->sample_rate,av_get_sample_fmt_name(dec_ctx->sample_fmt),dec_ctx->channel_layout);ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in", args, NULL, filter_graph);if(ret < 0) {av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");goto end;}// abuffersink filterAVABufferSinkParams *abuffersink_params = av_abuffersink_params_alloc();abuffersink_params->sample_fmts = &enc_ctx->sample_fmt;abuffersink_params->channel_layouts = (int64_t*)&enc_ctx->channel_layout;abuffersink_params->sample_rates = &enc_ctx->sample_rate;ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out", NULL, abuffersink_params, filter_graph);if(ret < 0) {av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");goto end;}}else {ret = AVERROR_UNKNOWN;goto end;}/* Endpoints for the filter graph. */outputs->name = av_strdup("in");outputs->filter_ctx = buffersrc_ctx;outputs->pad_idx = 0;outputs->next = NULL;inputs->name = av_strdup("out");inputs->filter_ctx = buffersink_ctx;inputs->pad_idx = 0;inputs->next = NULL;if(!outputs->name || !inputs->name) {ret = AVERROR(ENOMEM);goto end;}if((ret = avfilter_graph_parse_ptr(filter_graph, filter_spec, &inputs, &outputs, NULL)) < 0)goto end;if((ret = avfilter_graph_config(filter_graph, NULL)) < 0)goto end;/* Fill FilteringContext */fctx->buffersrc_ctx = buffersrc_ctx;fctx->buffersink_ctx = buffersink_ctx;fctx->filter_graph = filter_graph;end:avfilter_inout_free(&inputs);avfilter_inout_free(&outputs);return ret;
}static int init_filters(void)
{const char *filter_spec;unsigned int i;int ret;filter_ctx = (FilteringContext*)av_malloc_array(ifmt_ctx->nb_streams, sizeof(*filter_ctx));if(!filter_ctx)return AVERROR(ENOMEM);for(i = 0; i < ifmt_ctx->nb_streams; i++) {filter_ctx[i].buffersrc_ctx = NULL;filter_ctx[i].buffersink_ctx = NULL;filter_ctx[i].filter_graph = NULL;if(!(ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO|| ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO))continue;if(ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {//filter_spec = "null"; /* passthrough (dummy) filter for video *///filter_spec = "hflip";filter_spec = "split [main][tmp]; [tmp]scale=w=iw/4:h=ih/4, hflip [flip]; [main][flip] overlay=W/8:H/8";}else {filter_spec = "anull"; /* passthrough (dummy) filter for audio *///filter_spec = "aresample=24000"; //filter_spec = "volume=volume=1.1";}ret = init_filter(&filter_ctx[i], stream_ctx[i].dec_ctx, stream_ctx[i].enc_ctx, filter_spec);if(ret)return ret;}return 0;
}static int encode_write_frame(AVFrame *frame, unsigned int stream_index)
{AVPacket *enc_pkt = av_packet_alloc();int ret = avcodec_send_frame(stream_ctx[stream_index].enc_ctx, frame);while(ret >= 0) {ret = avcodec_receive_packet(stream_ctx[stream_index].enc_ctx, enc_pkt);if(ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {return ret;}else if(ret < 0) {av_log(NULL, AV_LOG_ERROR, "Error during encoding\n");exit(1);}// prepare packet for muxingenc_pkt->stream_index = stream_index;// 输出容器、流的时间戳转换av_packet_rescale_ts(enc_pkt,stream_ctx[stream_index].enc_ctx->time_base,ofmt_ctx->streams[stream_index]->time_base);// mux encoded frameret = av_interleaved_write_frame(ofmt_ctx, enc_pkt);av_packet_unref(enc_pkt);}av_packet_free(&enc_pkt);return ret;
}int trans_encode_write_frame(AVFrame *frame, unsigned int stream_index)
{int ret;AVFrame *filt_frame;ret = av_buffersrc_add_frame_flags(filter_ctx[stream_index].buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF);if(ret < 0) {av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");return ret;}/* pull filtered frames from the filtergraph */while(1) {filt_frame = av_frame_alloc();if(!filt_frame) {ret = AVERROR(ENOMEM);break;}ret = av_buffersink_get_frame(filter_ctx[stream_index].buffersink_ctx, filt_frame);if(ret < 0) {/* if no more frames for output - returns AVERROR(EAGAIN)* if flushed and no more frames for output - returns AVERROR_EOF* rewrite retcode to 0 to show it as normal procedure completion*/if(ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)ret = 0;av_frame_free(&filt_frame);break;}filt_frame->pict_type = AV_PICTURE_TYPE_NONE;ret = encode_write_frame(filt_frame, stream_index);if(ret < 0)break;av_frame_free(&filt_frame);}return ret;
}int main()
{const char* input_file = "../files/Titanic.mp4";const char* output_file = "Titanic.avi";int ret;if((ret = open_input_file(input_file)) < 0) {exit(0);}if((ret = open_output_file(output_file)) < 0) {exit(0);}if((ret = init_filters()) < 0) {exit(0);}AVPacket *pkt = av_packet_alloc();AVFrame *frame = av_frame_alloc();unsigned int stream_index;AVMediaType media_type;uint64_t frame_idx = 0;while(1) {if((ret = av_read_frame(ifmt_ctx, pkt)) < 0)break;stream_index = pkt->stream_index;media_type = ifmt_ctx->streams[stream_index]->codecpar->codec_type;// 输入容器、流的时间戳转换av_packet_rescale_ts(pkt,ifmt_ctx->streams[stream_index]->time_base,stream_ctx[stream_index].dec_ctx->time_base);// 解码ret = avcodec_send_packet(stream_ctx[stream_index].dec_ctx, pkt);if(pkt->pts == AV_NOPTS_VALUE) {printf("========\n");}while(ret >= 0) {ret = avcodec_receive_frame(stream_ctx[stream_index].dec_ctx, frame);if(ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {break;}else if(ret < 0) {av_log(NULL, AV_LOG_ERROR, "Error while receiving a frame the decoder\n");goto end;}if(media_type == AVMEDIA_TYPE_VIDEO) {frame_idx++;frame->pts = frame_idx;}// 编码、保存//encode_write_frame(frame, stream_index);// 转换,编码、保存trans_encode_write_frame(frame, stream_index);av_frame_unref(frame);}av_packet_unref(pkt);}// flushfor(unsigned int i = 0; i < ifmt_ctx->nb_streams; i++) {// flush decoderret = avcodec_send_packet(stream_ctx[i].dec_ctx, NULL);while(ret >= 0) {ret = avcodec_receive_frame(stream_ctx[i].dec_ctx, frame);if(ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {break;}else if(ret < 0) {av_log(NULL, AV_LOG_ERROR, "Error while receiving a frame the decoder\n");goto end;}if(media_type == AVMEDIA_TYPE_VIDEO) {frame_idx++;frame->pts = frame_idx;}// 编码、保存//encode_write_frame(frame, i);// 转换,编码、保存trans_encode_write_frame(frame, i);av_frame_unref(frame);}// flush encoderencode_write_frame(NULL, i);}av_write_trailer(ofmt_ctx);
end:av_packet_free(&pkt);av_frame_free(&frame);for(unsigned int i = 0; i < ifmt_ctx->nb_streams; i++) {avcodec_free_context(&stream_ctx[i].dec_ctx);if(ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && stream_ctx[i].enc_ctx)avcodec_free_context(&stream_ctx[i].enc_ctx);if(filter_ctx && filter_ctx[i].filter_graph)avfilter_graph_free(&filter_ctx[i].filter_graph);}av_free(filter_ctx);av_free(stream_ctx);avformat_close_input(&ifmt_ctx);if(ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))avio_closep(&ofmt_ctx->pb);avformat_free_context(ofmt_ctx);return 0;
}

本文filter使用在解码和编码中间,编解码的非压缩编码数据参数相同,代码流程:

  • 1、 创建输入、输出AVformatContextAVCodecContext
  • 2、 根据编码器参数,创建filter grpah,加入video filters
  • 3、 解码,filtering,编码
  • 4、 释放资源

处理后的视频展示结果如下

未完待续:

我们这里filters不能修改音、视频的输出数据格式,是因为编码器设置之后再创建filter graph,就要求filter graph处理的结果要符合编码器输入要求。

应该以filter graph的输出结果来设置编码器的参数,保证编码输出的视频就是filters处理结果。filter graph的输出参数可以通过 buffersink的inputs[0]对象AVFilterLink获取,如本文示例中,信息如下

ffmpeg学习(13)音视频转码(2)使用filter相关推荐

  1. FFmpeg学习(音视频理论知识)

    文章目录 1. 音视频理论知识 1.1 基本概念 1.1.1 音视频必备的基本概念 常用的视频封装格式 常用的视频编码器 常用的音频编程器: 视频流 裸数据YUV 1.1.2 音视频常见处理 采集 处 ...

  2. 使用FFmpeg命令实现音视频转码的备忘录

    FFmpeg是比较经典,也是比较流行的多媒体工具库(demux,mux, decode,encode等等),支持格式也比较丰富,也比较实用. 做多媒体开发的经常都需要使用到各种格式的音视频对自己的开发 ...

  3. ffmpeg学习1 音视频基本概念

    https://blog.csdn.net/caofengtao1314/article/details/107220572

  4. 音视频转码FFmpeg

    前言 音视频转码主要指这样的概念: 容器格式的转换,比如MP4转换为MOV 容器中音视频数据编码方式转换,比如H264编码转换成MPEG4编码,MP3换为AAC 音视频码率的转换,比如4M的视频码率降 ...

  5. 基于ffmpeg实现音视频转码

    一.背景 偶然的机会接触了ffmpeg,当时是从B站下载的视频转移到笔记本上看.使用b站手机客户端下载的视频格式为m4s的两个文件(video.m4s和audio.m4s),需要转成普通播放器支持的m ...

  6. android音视频工程师,音视频学习 (十三) Android 中通过 FFmpeg 命令对音视频编辑处理(已开源)...

    ## 音视频学习 (十三) Android 中通过 FFmpeg 命令对音视频编辑处理(已开源) ## 视音频编辑器 ## 前言 有时候我们想对音视频进行加工处理,比如视频编辑.添加字幕.裁剪等功能处 ...

  7. JAVA调用FFmpeg实现音视频转码加水印功能

    目录 目录 写在前面 MAVEN引用 获取音视频基本信息 音频转码成Mp3格式 视频转码成Mp4格式 视频转码成Mp4并添加文字水印 视频转码成Mp4并添加图片水印 测试代码 写在前面 如今各大云厂商 ...

  8. 音视频编解码流程与如何使用 FFMPEG 命令进行音视频处理

    一.前言 FFMPEG 是特别强大的专门用于处理音视频的开源库.你既可以使用它的 API 对音视频进行处理,也可以使用它提供的工具,如 ffmpeg, ffplay, ffprobe,来编辑你的音视频 ...

  9. FFmpeg 工具:音视频开发都用它,快@你兄弟来看丨音视频工具

    (本文基本逻辑:ffmpeg 常用命令介绍 → ffplay 常用命令介绍 → ffprobe 常用命令介绍) 从事音视频开发的程序员几乎都应该知道或使用过 FFmpeg.FFmpeg 是一个开源软件 ...

最新文章

  1. banana pi BPI-R1 原理图下载地址:
  2. Python编程:Tkinter图形界面设计(1)
  3. 搜狗浏览器挡住了任务栏,上下显示不全
  4. Hadoop完全分布式集群安装Hbase
  5. 2021年5月CCAA注册审核员考试认证通用基础真题
  6. 公元纪年法(儒略历-格里高历)转儒略日
  7. Python实战项目:代码秒抢红包详解
  8. mac上安装R和RStudio
  9. 计算机主机散热器,如何自制计算机散热器
  10. win7计算机无法连接投影仪,win7系统无法连接投影仪的解决方法
  11. 联想电脑重装系统总结
  12. 南宁西乡塘区的计算机中专学校,南宁市西乡塘区中专学校
  13. 软件接口测试是什么?怎么测?
  14. [原创]京东技术解密读书笔记
  15. 2016年川师大软件工程本科生博客地址列表
  16. 多阶段渐进式图像恢复 | 去雨、去噪、去模糊 | 有效教程(附源码)|【CVPR 2021】
  17. 细说新中式实木家具的完美逆袭之路
  18. 全网招募P图高手!阿里巴巴持续训练鉴假AI
  19. Windows更新+中间人=远程命令执行
  20. springboot核酸检查预约系统毕业设计毕设作品开题报告开题答辩PPT

热门文章

  1. javascript+css学生信息表综合案例(附详细代码)
  2. 编写高质量代码:改善Java程序的151个建议 --[106~117]
  3. kmplayer绿色版遇到问题'We strongle recommand you...'
  4. AI黑白照片上色系列-藏在英国伦敦图书馆黑白上色,从未发表的100多年前的中国影像
  5. python解压版使用_【Python】使用 zipfile 解压含有中文文件名的 zip 文件
  6. 【Linux-Windows】通过浏览器批量下载图像数据
  7. 看小企业如何玩转大数据? 智慧商贸添助力
  8. COM服务器应用程序本地激活权限错误解决方法
  9. hadoop-3.0.0集群搭建
  10. 关于前端相关的知识点