qt客户端实时播放rtsp音频流demo并且无杂音

推流工具使用EasyDarwin

推流直接使用ffmpeg 推流到 EasyDarwin 服务器,音频流取自电脑拾音器,ffmepg指令为:
ffmpeg -f dshow -i audio=“麦克风 (Realtek® Audio)” -codec:a aac -ac 2 -ar 16000 -f rtsp rtsp://10.1.3.170:554/3_a.sdp
至于怎么推流自行百度呀

客户端采用FFMPEG 取流,解析出PCM 音频裸流,在一个线程中接收rtsp流并解析出音频数据,具体代码如下PlayVoicePlayer.c:

#include "playvoiceplayer.h"#include <QDebug>PlayVoicePlayer::PlayVoicePlayer(QObject *parent) : QThread(parent)
{}void PlayVoicePlayer::startPlay(QString url)
{qDebug() << "Video2PCM::startPlay()";playUrl = url;unGetStream = true;this->start();
}void PlayVoicePlayer::run()
{qDebug() << "Video2PCM::run():"<<playUrl;isStart = true;AVFormatContext     *pFormatCtx = NULL;AVCodecContext      *pCodecCtx = NULL;AVCodec              *pCodec = NULL;AVPacket            packet;AVFrame              *pAudioFrame = NULL;uint8_t                *buffer = NULL;struct SwrContext   *audio_convert_ctx = NULL;int                  got_picture;int                 audioIndex;int                 out_buffer_size;av_register_all();if (avformat_open_input(&pFormatCtx, playUrl.toStdString().data(), NULL, NULL) != 0){emit getPcmStreamStop();qDebug()<< " Video2PCM Couldn't open an input stream.";return;}pFormatCtx->probesize = 5 *1024;   //使用1000*1024 延时大概是2秒开始开始播放1920*1080使用这个参数暂时没发新崩溃的情况pFormatCtx->max_analyze_duration = 1 * AV_TIME_BASE;if (avformat_find_stream_info(pFormatCtx, NULL) < 0){emit getPcmStreamStop();qDebug()<< "Video2PCM Couldn't find stream information.";return;}audioIndex = -1;for (int i = 0; i < pFormatCtx->nb_streams; i++){if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO){audioIndex = i;break;}}if (audioIndex == -1){emit getPcmStreamStop();qDebug()<< "Video2PCM Couldn't find a audio stream.";return;}pCodecCtx = pFormatCtx->streams[audioIndex]->codec;pCodec = avcodec_find_decoder(pCodecCtx->codec_id);if (pCodec == NULL) printf("Codec not found.\n");if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0){emit getPcmStreamStop();qDebug()<< "Video2PCM Could not open codec.";return;}pAudioFrame = av_frame_alloc();if (pAudioFrame == NULL){emit getPcmStreamStop();qDebug()<< "Video2PCM Could not alloc AVFrame";return;}//音频输出参数uint64_t out_channel_layout = AV_CH_LAYOUT_STEREO;//声道格式AVSampleFormat out_sample_fmt = AV_SAMPLE_FMT_S32;//采样格式int out_nb_samples = pCodecCtx->frame_size;//nb_samples: AAC-1024 MP3-1152//   int out_sample_rate = 44100;//采样率int out_sample_rate = 16000;//采样率int out_nb_channels = av_get_channel_layout_nb_channels(out_channel_layout);//根据声道格式返回声道个数out_buffer_size = av_samples_get_buffer_size(NULL, out_nb_channels, out_nb_samples, out_sample_fmt, 1);buffer = (uint8_t *)av_malloc(MAX_AUDIO_FRAME_SIZE);audio_convert_ctx = swr_alloc();if (audio_convert_ctx == NULL){{emit getPcmStreamStop();qDebug()<< " Video2PCM Could not allocate SwrContext";return;}}swr_alloc_set_opts(audio_convert_ctx, out_channel_layout, out_sample_fmt,out_sample_rate,pCodecCtx->channel_layout, pCodecCtx->sample_fmt, pCodecCtx->sample_rate, 0, NULL);swr_init(audio_convert_ctx);int    index = 0;//计数器while (isStart){if(av_read_frame(pFormatCtx, &packet)<0){emit getPcmStreamStop();break;}if (packet.stream_index == audioIndex) {if (avcodec_decode_audio4(pCodecCtx, pAudioFrame, &got_picture, &packet) < 0) {qDebug() <<("Error in decoding audio frame.\n");emit getPcmStreamStop();break;}if (got_picture) {//   int dst_nb_samples = av_rescale_rnd(swr_get_delay(audio_convert_ctx, pAudioFrame->sample_rate) + pAudioFrame->nb_samples, pAudioFrame->sample_rate, pAudioFrame->sample_rate, AVRounding(1));swr_convert(audio_convert_ctx, &buffer, MAX_AUDIO_FRAME_SIZE, (const uint8_t **)pAudioFrame->data, pAudioFrame->nb_samples);if(unGetStream == true){qDebug() << "Video2PCM unGetStream";unGetStream =false;emit getAudioStream();}//                printf("index:%5d\t pts:%lld\t packet size:%d\n", index, packet.pts, packet.size);//Write PCM//                fwrite(buffer, 1, out_buffer_size, fp_pcm);emit decodePCM(packet.pts, QByteArray((char*)buffer, out_buffer_size));index++;}}av_free_packet(&packet);}qDebug() << "Video2PCM close1";swr_free(&audio_convert_ctx);av_free(buffer);av_frame_free(&pAudioFrame);avcodec_close(pCodecCtx);avformat_close_input(&pFormatCtx);isStart= false;}

对应的PlayVoicePlayer.h文件如下:

#ifndef PLAYVOICEPLAYER_H
#define PLAYVOICEPLAYER_H#include <QObject>
#include <QThread>
#ifdef  _WINDOWS
extern "C"
{#include "libavcodec\avcodec.h"
#include "libavformat\avformat.h"
#include "libswresample\swresample.h"
};
#else
extern "C"
{#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswresample/swresample.h"
};
#endif#include <QAudioFormat>
#include <QAudioOutput>#define MAX_AUDIO_FRAME_SIZE 192000class PlayVoicePlayer : public QThread
{Q_OBJECT
public:explicit PlayVoicePlayer(QObject *parent = nullptr);void startPlay(QString url);
private:bool isStart = true;QString  playUrl;bool unGetStream;signals:void getPcmStreamStop();void getAudioStream();void decodePCM(qint64 pts, const QByteArray& pcm);
protected:void run();
};#endif // PLAYVOICEPLAYER_H

通过 decodePCM(packet.pts, QByteArray((char*)buffer, out_buffer_size)); 发送出 PCM数据给播放部分,在播放部分中,首先定义了个QRingBuffer 缓冲区,用于存放接收到的音频流,QRingBuffer不是标准的 库类,所以需要自己添加,这里 不用QSemaphore 来做缓存是因为发现用QSemaphore 做缓存播放有杂音,所以放弃了使用QSemaphore ,有兴趣的童鞋可以再试下用QSemaphore ,QRingBuffer.c具体代码如下:

#include "qringbuffer.h"
#include <string.h>char *QRingBuffer::readPointerAtPosition(qint64 pos, qint64 &length)
{if (pos >= 0){pos += head;for (int i = 0; i < buffers.size(); ++i){length = (i == tailBuffer ? tail : buffers[i].size());if (length > pos){length -= pos;return buffers[i].data() + pos;}pos -= length;}}length = 0;return 0;
}void QRingBuffer::free(qint64 bytes)
{Q_ASSERT(bytes <= bufferSize);while (bytes > 0){const qint64 blockSize = buffers.first().size() - head;if (tailBuffer == 0 || blockSize > bytes){if (bufferSize <= bytes){if (buffers.first().size() <= basicBlockSize){bufferSize = 0;head = tail = 0;} else{clear();}}else{Q_ASSERT(bytes < MaxByteArraySize);head += int(bytes);bufferSize -= bytes;}return;}bufferSize -= blockSize;bytes -= blockSize;buffers.removeFirst();--tailBuffer;head = 0;}
}char *QRingBuffer::reserve(qint64 bytes)
{if (bytes <= 0 || bytes >= MaxByteArraySize)return 0;if (buffers.isEmpty()){buffers.append(QByteArray());buffers.first().resize(qMax(basicBlockSize, int(bytes)));}else{const qint64 newSize = bytes + tail;//如果超过最后一个buffer所含数据的大小,则最后一个buffer需要从新分配if (newSize > buffers.last().size()){//满足以下条件时,将最后一个buffer的容积缩小到其当前所含数据的大小,//然后新开辟一个buffer,并将该buffer数据的结尾位置tail设置为0if (newSize > buffers.last().capacity() && (tail >= basicBlockSize|| newSize >= MaxByteArraySize)){buffers.last().resize(tail);buffers.append(QByteArray());++tailBuffer;tail = 0;}//将最后一个buffer进行扩容buffers.last().resize(qMax(basicBlockSize, tail + int(bytes)));}}char *writePtr = buffers.last().data() + tail;bufferSize += bytes;Q_ASSERT(bytes < MaxByteArraySize);tail += int(bytes);return writePtr;
}char *QRingBuffer::reserveFront(qint64 bytes)
{if (bytes <= 0 || bytes >= MaxByteArraySize)return 0;if (head < bytes){if (buffers.isEmpty()){buffers.append(QByteArray());}else{buffers.first().remove(0, head);if (tailBuffer == 0)tail -= head;}head = qMax(basicBlockSize, int(bytes));if (bufferSize == 0){tail = head;}else{buffers.prepend(QByteArray());++tailBuffer;}buffers.first().resize(head);}head -= int(bytes);bufferSize += bytes;return buffers.first().data() + head;
}void QRingBuffer::chop(qint64 length)
{Q_ASSERT(length <= bufferSize);while (length > 0){if (tailBuffer == 0 || tail > length){if (bufferSize <= length){if (buffers.first().size() <= basicBlockSize){bufferSize = 0;head = tail = 0;}else{clear();}}else{Q_ASSERT(length < MaxByteArraySize);tail -= int(length);bufferSize -= length;}return;}bufferSize -= tail;length -= tail;buffers.removeLast();--tailBuffer;tail = buffers.last().size();}
}void QRingBuffer::clear()
{if (buffers.isEmpty())return;buffers.erase(buffers.begin() + 1, buffers.end());buffers.first().clear();head = tail = 0;tailBuffer = 0;bufferSize = 0;
}qint64 QRingBuffer::indexOf(char c, qint64 maxLength, qint64 pos)
{if (maxLength <= 0 || pos < 0)return -1;qint64 index = -(pos + head);for (int i = 0; i < buffers.size(); ++i){qint64 nextBlockIndex = qMin(index + (i == tailBuffer ? tail : buffers[i].size()),maxLength);if (nextBlockIndex > 0){const char *ptr = buffers[i].data();if (index < 0){ptr -= index;index = 0;}const char *findPtr = reinterpret_cast<const char *>(memchr(ptr, c,nextBlockIndex - index));if (findPtr)return qint64(findPtr - ptr) + index + pos;if (nextBlockIndex == maxLength)return -1;}index = nextBlockIndex;}return -1;
}qint64 QRingBuffer::read(char *data, qint64 maxLength)
{const qint64 bytesToRead = qMin(bufferSize, maxLength);qint64 readSoFar = 0;while (readSoFar < bytesToRead){const qint64 bytesToReadFromThisBlock = qMin(bytesToRead - readSoFar,nextDataBlockSize());if (data)memcpy(data + readSoFar, readPointer(), bytesToReadFromThisBlock);readSoFar += bytesToReadFromThisBlock;free(bytesToReadFromThisBlock);}return readSoFar;
}QByteArray QRingBuffer::read()
{if (bufferSize == 0)return QByteArray();QByteArray qba(buffers.takeFirst());//避免调整大小时不必要的内存分配,使QByteArray更高效qba.reserve(0);if (tailBuffer == 0){qba.resize(tail);tail = 0;} else{--tailBuffer;}qba.remove(0, head);head = 0;bufferSize -= qba.size();return qba;
}qint64 QRingBuffer::peek(char *data, qint64 maxLength, qint64 pos)
{qint64 readSoFar = 0;if (pos >= 0){pos += head;for (int i = 0; readSoFar < maxLength && i < buffers.size(); ++i){qint64 blockLength = (i == tailBuffer ? tail : buffers[i].size());if (pos < blockLength){blockLength = qMin(blockLength - pos, maxLength - readSoFar);memcpy(data + readSoFar, buffers[i].data() + pos, blockLength);readSoFar += blockLength;pos = 0;}else{pos -= blockLength;}}}return readSoFar;
}void QRingBuffer::append(const char *data, qint64 size)
{char *writePointer = reserve(size);if (size == 1)*writePointer = *data;else if (size)::memcpy(writePointer, data, size);
}void QRingBuffer::append(const QByteArray &qba)
{if (tail == 0){if (buffers.isEmpty())buffers.append(qba);elsebuffers.last() = qba;}else{buffers.last().resize(tail);buffers.append(qba);++tailBuffer;}tail = qba.size();bufferSize += tail;
}qint64 QRingBuffer::readLine(char *data, qint64 maxLength)
{if (!data || --maxLength <= 0)return -1;qint64 i = indexOf('\n', maxLength);i = read(data, i >= 0 ? (i+1) : maxLength);data[i] = '\0';return i;
}

对应的QRingBuffer .h 文件如下:

#ifndef QRINGBUFFER_P_H
#define QRINGBUFFER_P_H#include <QByteArray>
#include <QList>#ifndef QRINGBUFFER_CHUNKSIZE
#define QRINGBUFFER_CHUNKSIZE 4096
#endif
enum
{//1G-1字节MaxAllocSize = (1 << (std::numeric_limits<int>::digits - 1)) - 1
};enum
{//1G-1-16字节MaxByteArraySize = MaxAllocSize - 16
};class QRingBuffer
{public://默认分配QRINGBUFFER_CHUNKSIZE大小的bufferQRingBuffer(int growth = QRINGBUFFER_CHUNKSIZE) :head(0), tail(0), tailBuffer(0), basicBlockSize(growth), bufferSize(0) { }~QRingBuffer(){}//获取环形缓冲区指定位置的指针//length,输出这个指定位置到缓冲区结尾的长度char *readPointerAtPosition(qint64 pos, qint64 &length);//申请空间:从尾开始,返回新空间的指针char *reserve(qint64 bytes);//申请空间:从头开始,返回新空间的指针char *reserveFront(qint64 bytes);//缩短空间void truncate(qint64 pos){if (pos < bufferSize)chop(bufferSize - pos);}//判断buffers数据是否为空bool isEmpty(){return bufferSize == 0;}//从头读取一个字符,并转换为int返回int getChar(){if (isEmpty())return -1;char c = *readPointer();free(1);return int(uchar(c));}//在缓冲区尾部添加字符void putChar(char c){char *ptr = reserve(1);*ptr = c;}//在缓冲区头部添加字符void ungetChar(char c){if (head > 0) {--head;buffers.first()[head] = c;++bufferSize;} else {char *ptr = reserveFront(1);*ptr = c;}}//清空缓冲区void clear();//读取maxLength长度数据到data中,如果buffers中的数据少于maxLength,则读取所有数据,//返回读取数据的长度qint64 read(char *data, qint64 maxLength);//读取buffers中的第一个bufferQByteArray read();//从指定位置pos拷贝maxLength长度的数据到data中//返回实际截取的数据长度qint64 peek(char *data, qint64 maxLength, qint64 pos = 0);//扩展最后一个buffervoid append(const char *data, qint64 size);//在最后添加一个新buffervoid append(const QByteArray &qba);//从头释放lenght长度空间,一般需要配合reserve使用qint64 skip(qint64 length){qint64 bytesToSkip = qMin(length, bufferSize);free(bytesToSkip);return bytesToSkip;}//从尾释放length长度空间,一般需要配合reserve使用void chop(qint64 length);//读取一行,包括该行的结束标志'\n'qint64 readLine(char *data, qint64 maxLength);bool canReadLine(){return indexOf('\n', bufferSize) >= 0;}
private://获取下一个数据块的大小//如果只剩一个buffer,返回最后一个buffer所含数据的大小;否则返回第一个buffer所含数据的大小。qint64 nextDataBlockSize(){return (tailBuffer == 0 ? tail : buffers.first().size()) - head;}//获取缓冲区第一个有效数据的指针char *readPointer(){return bufferSize == 0 ? Q_NULLPTR : (buffers.first().data() + head);}qint64 indexOf(char c, qint64 maxLength, qint64 pos = 0);//释放空间void free(qint64 bytes);
private:QList<QByteArray> buffers;//标识第一个buffer数据起始位置和最后一个buffer数据的结尾位置int head, tail;//大小为buffers.size()-1,如果为0,说明只剩一个bufferint tailBuffer;//初始分配空间的大小int basicBlockSize;//buffers数据总大小qint64 bufferSize;
};#endif // QRINGBUFFER_P_H

另外编写了 MyDevice.h,这个类继承于QIODevice 类,这个类的作用是,当音频播放需要数据的时候,会回调readData函数,这里需要给readData填充数据,注意,不能直接给QIODevice 写值,因为如果接收到数据立刻给QIODevice writedata会导致播放杂音,所以需要音频需要多少值再给音频写入多少值,具体MyDevice.c代码如下:

#include "mydevice.h"#include <QDebug>
#include "qringbuffer.h"#include <QMutex>QRingBuffer ringBuffer;QMutex audioMutex;MyDevice::MyDevice(void)
{this->open(QIODevice::ReadWrite); // 为了解决QIODevice::read (QIODevice): device not open.
}MyDevice::~MyDevice()
{this->close();
}
// data为声卡的数据缓冲区地址, maxlen为声卡缓冲区最大能存放的字节数.
qint64 MyDevice::readData(char *data, qint64 maxlen)
{int getNum=0;qDebug()<< "maxlen="<<maxlen;if(maxlen != 0){audioMutex.lock();getNum= ringBuffer.read(data,maxlen);audioMutex.unlock();static long readbufdata=0;readbufdata+= getNum;qDebug()<< "音频接收到的readbufdata="<<readbufdata;}static long readData = 0;readData += maxlen;qDebug()<< "音频需要数据共计read data"<<readData;qDebug()<< "getNum="<<getNum;return getNum;}qint64 MyDevice::writeData(const char *data, qint64 len)
{return len;
}void MyDevice::setData(QByteArray audiodata)
{audioMutex.lock();ringBuffer.append(audiodata);audioMutex.unlock();}

对应的MyDevice.h文件如下:

#ifndef MYDEVICE_H
#define MYDEVICE_H#include <QIODevice>
#include <QQueue>
#include <QMutex>
#include <QWaitCondition>
#include "QSemaphore"class MyDevice : public QIODevice
{private:public:MyDevice(); //创建对象传递pcm数据~MyDevice();void setData(QByteArray setData);qint64 readData(char *data, qint64 maxlen); //重新实现的虚函数qint64 writeData(const char *data, qint64 len); //它是个纯虚函数, 不得不实现
};#endif // MYDEVICE_H

剩下的就是主页面,绑定接受到是音频流,并写入到声卡的程序:

#include "mainwindow.h"
#include "ui_mainwindow.h"
#include "playvoiceplayer.h"
#include <QAudioFormat>
#include <QAudioOutput>
#include <QFile>
#include <qDebug>
#include <qringbuffer.h>
#include "mydevice.h"MainWindow::MainWindow(QWidget *parent): QMainWindow(parent), ui(new Ui::MainWindow)
{ui->setupUi(this);//    QFile inputFile;//        inputFile.setFileName("test.pcm");//        inputFile.open(QIODevice::ReadOnly);//设置采样格式
//            QAudioFormat audioFormat;
//            //设置采样率
//            audioFormat.setSampleRate(44100);
//            //设置通道数
//            audioFormat.setChannelCount(2);
//            //设置采样大小,一般为8位或16位
//            audioFormat.setSampleSize(16);
//            //设置编码方式
//            audioFormat.setCodec("audio/pcm");
//            //设置字节序
//            audioFormat.setByteOrder(QAudioFormat::LittleEndian);
//            //设置样本数据类型
//            audioFormat.setSampleType(QAudioFormat::UnSignedInt);//        QAudioOutput *audio = new QAudioOutput( audioFormat, 0);//        audio->start(&inputFile);fmt.setSampleRate(16000);fmt.setChannelCount(2);fmt.setSampleSize(32);fmt.setByteOrder(QAudioFormat::LittleEndian); //设置字节序fmt.setCodec("audio/pcm");fmt.setSampleType(QAudioFormat::SignedInt); //设置样本数据类型audioOutput = new QAudioOutput(fmt);//   streamOut = audioOutput->start();//   int size = audioOutput->periodSize();//   qDebug()<< "size ="<<size;PlayVoicePlayer *voicePlay  = new PlayVoicePlayer();connect(voicePlay,&PlayVoicePlayer::decodePCM,this,[=](qint64 pts, const QByteArray& pcm){static int beginflag= false;if(beginflag== false){beginflag= true;dev = new MyDevice();audioOutput->start(dev);connect(dev,&QIODevice::readyRead,this,[=](){qDebug()<< "readOver";});}static long getdata =0;getdata += pcm.size();qDebug()<< "get data"<<pcm.size();qDebug()<< "get data counter"<<getdata;dev->setData(pcm);// streamOut->write(pcm);QFile file("test.pcm");file.open(QIODevice::WriteOnly | QIODevice::Append);file.write(pcm);file.close();});voicePlay->startPlay("rtsp://localhost/3_a.sdp");//    QFile *inputFile= new QFile("test.pcm");//    inputFile->open(QIODevice::ReadOnly);//    audioOutput->start(inputFile);}MainWindow::~MainWindow()
{delete ui;
}

说明:这个程序有些bug,比如 断流重连没处理,等有空再完善;这里只是提供了个解决办法,另外也可以调用SDL 来实现,我这里没有用SDL 直接用的 qt 调用音频实现音频流播放;

其中源码下载地址为:
https://download.csdn.net/download/heguobo111/20545333

注意:我的编译环境选用给的 msvc2017 64位下编译的,如果需要其他编译环境的,请行修改FFMPEG库

qt 播放rtsp音频数据流相关推荐

  1. html调用rpst 源码_在web页面中播放rtsp直播数据流方法

    WEB播放RTSP直播数据流方法 附录一些RTSP测试地址: 1.rtsp://184.72.239.149/vod/mp4://BigBuckBunny_175k.mov 一段动画片 2.rtsp: ...

  2. windows下使用Qt播放PCM音频文件(通过QAudioOutput和QIODevice)

    在博主之前的博文<windows下使用FFmpeg生成PCM音频文件并播放(通过命令的方式)>(链接https://blog.csdn.net/u014552102/article/det ...

  3. QT播放Wav音频并显示波形

    1.前言 因为项目需要,做了个小工具来做前期准备. 这个需求实现两步:播放和显示波形. 播放方面,一开始选择FMod,小工具快做好的时候偶然发现FMod需要商业授权,所以只能放弃.试了试ffmpeg+ ...

  4. 【开源项目】QT播放PCM音频实例详细

    #include<QtCore/QCoreApplication> #include<QAudioFormat> #include<QAudioOutput> #i ...

  5. Python Gstreamer播放rtsp视频(含音频)(海康IPCAM)

    Python Gstreamer播放rtsp视频(海康IPCAM) 播放思路详见博客:Python Gstreamer播放rtsp视频流(海康IPCAM) 元件连接图解:   这里开始想使用tee分流 ...

  6. QT——制作简易音频播放器

    应用前提:只需要从阿里云数据库中读取选定的某一条实验数据的音频(.wav),现在的测试版本只是播放本地音频,所以做的这个音频播放器只有6个功能:播放.暂停.音量条.静音.进度条.显示当前播放进度的时间 ...

  7. Qt+FFmpeg播放RTSP H264视频流(1)- 在Qt项目加入FFmpeg库

    Qt FFmpeg播放RTSP H264视频流(1) QtCreator引入FFmpeg库 下载FFmpeg库 添加FFmpeg库到Qt项目 测试FFmpeg库是否能正常使用 QtCreator引入F ...

  8. Qt实现数字音频均衡器[文末附代码]

    Qt实现数字音频均衡器 在实现音频播放器的时候,我们常常需要一个均衡器来调节各个频段的增益,就是我们平常说的调重低音.一个数字均衡器的架构通常都如图所示: 从图中可以看到,这里的数字均衡器实际上就是三 ...

  9. Android直播开发之旅(13):使用FFmpeg+OpenSL ES播放PCM音频

    文章目录 1. OpenSL ES原理 1.1 OpenSL ES核心API讲解 1.1.1 对象(Object)与接口(Interface) 1.1.2 [OpenSL ES的状态机制](https ...

最新文章

  1. 算法--------打家劫舍(动态规划,Java版本)
  2. Tor真的匿名和安全吗?——如果是http数据,则在出口节点容易被嗅探明文流量,这就是根本问题...
  3. php接收流文件,PHP传输文件流及文件流的保存
  4. Webpack —— tree-starking 解析
  5. 【Vue】Vue与ASP.NET Core WebAPI的集成
  6. 【阿里云EMR实战篇】以EMR测试集群版本为例,详解 Flink SQL Client 集成 Hive 使用步骤
  7. python读取yaml文件的内容_Python读取YAML文件过程详解
  8. php 走马灯轮播,Vue.js轮播图走马灯代码实例(全)
  9. python编程是啥-什么是Python编程课程
  10. tp模式 生命周期 命名空间 路由
  11. php error file_get_contents()
  12. 欧姆龙编程软件SysmacStudio卸载方法
  13. Eclipse的版本、下载网址和安装
  14. 2008年IT日历 02
  15. arcgis 经纬度转大地坐标_深入理解ArcGIS的地理坐标系、大地坐标系
  16. hadoop基础【Shuffle全部流程、OutputFormat输出、ReduceJoin案例实操】
  17. 方舟手游服务器设置文件翻译,方舟生存进化单机模式设置中英文对照翻译一览...
  18. tp摄像头的默认地址_tplink的ip默认地址是什么?
  19. a possible low-level optimization
  20. 19款Windows实用软件推荐,满满的干货,总有一款是你必备的

热门文章

  1. Edraw Max分子结构图怎么绘制?
  2. selenium之浏览器、元素、鼠标等操作总结
  3. Loadrunner安装详解以及破解版
  4. 常用的软件过程模型(软件生存周期模型)
  5. ISO/IEC 9126质量模型
  6. 移除List集合中特定的元素
  7. 机器学习基石12:非线性变换(Nonlinear Transformation)
  8. 基于数据安全的风险评估-威胁性识别
  9. win10禁用驱动程序强制签名_微软警告Windows10的新驱动程序更改可能导致错误
  10. pytorch 定义dice_coeff, SoftDiceLoss,BCELoss2d损失函数