注:问号以及未注释部分 会在x265-1.9版本内更新

/*****************************************************************************
* Copyright (C) 2013 x265 project
*
* Authors: Chung Shin Yee <shinyee@multicorewareinc.com>
*          Min Chen <chenm003@163.com>
*          Steve Borho <steve@borho.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at license @ x265.com.
*****************************************************************************/
#include "common.h"
#include "frame.h"
#include "framedata.h"
#include "wavefront.h"
#include "param.h"
#include "encoder.h"
#include "frameencoder.h"
#include "common.h"
#include "slicetype.h"
#include "nal.h"
namespace X265_NS {
void weightAnalyse(Slice& slice, Frame& frame, x265_param& param);
/** 函数功能       : 构造FrameEncoder并初始化为0
/*  调用范围       : 只在Encoder::create()函数中被调用
**/
FrameEncoder::FrameEncoder()
{
m_prevOutputTime = x265_mdate();
m_isFrameEncoder = true;
m_threadActive = true;
m_slicetypeWaitTime = 0;
m_activeWorkerCount = 0;
m_completionCount = 0;
m_bAllRowsStop = false;
m_vbvResetTriggerRow = -1;
m_outStreams = NULL;
m_substreamSizes = NULL;
m_nr = NULL;
m_tld = NULL;
m_rows = NULL;
m_top = NULL;
m_param = NULL;
m_frame = NULL;
m_cuGeoms = NULL;
m_ctuGeomMap = NULL;
m_localTldIdx = 0;
memset(&m_rce, 0, sizeof(RateControlEntry));
}
void FrameEncoder::destroy()
{
if (m_pool)
{
if (!m_jpId)
{
int numTLD = m_pool->m_numWorkers;
if (!m_param->bEnableWavefront)
numTLD += m_pool->m_numProviders;
for (int i = 0; i < numTLD; i++)
m_tld[i].destroy();
delete [] m_tld;
}
}
else
{
m_tld->destroy();
delete m_tld;
}
delete[] m_rows;
delete[] m_outStreams;
X265_FREE(m_cuGeoms);
X265_FREE(m_ctuGeomMap);
X265_FREE(m_substreamSizes);
X265_FREE(m_nr);
m_frameFilter.destroy();
if (m_param->bEmitHRDSEI || !!m_param->interlaceMode)
{
delete m_rce.picTimingSEI;
delete m_rce.hrdTiming;
}
}
/** 函数功能       : 初始化FrameEncoder
/*  调用范围       : 只在Encoder::create()函数中被调用
* \参数 top        : 上层encoder类
* \参数 numRows    : 一帧的CTU行数
* \参数 numCols    : 一帧的CTU列数
*   返回值         : 成功返回ture 失败返回 false
**/
bool FrameEncoder::init(Encoder *top, int numRows, int numCols)
{
m_top = top; //获取Encoder类
m_param = top->m_param;//配置参数
m_numRows = numRows;//一帧的CTU行数
m_numCols = numCols;//一帧的CTU列数
m_filterRowDelay = (m_param->bEnableSAO && m_param->bSaoNonDeblocked) ?
2 : (m_param->bEnableSAO || m_param->bEnableLoopFilter ? 1 : 0);//?????
m_filterRowDelayCus = m_filterRowDelay * numCols;//???
m_rows = new CTURow[m_numRows];//申请空间
bool ok = !!m_numRows;
/* determine full motion search range */
int range  = m_param->searchRange;       /* fpel search */
range += !!(m_param->searchMethod < 2);  /* diamond/hex range check lag */
range += NTAPS_LUMA / 2;                 /* subpel filter half-length */
range += 2 + MotionEstimate::hpelIterationCount(m_param->subpelRefine) / 2; /* subpel refine steps */
m_refLagRows = 1 + ((range + g_maxCUSize - 1) / g_maxCUSize);
// NOTE: 2 times of numRows because both Encoder and Filter in same queue
if (!WaveFront::init(m_numRows * 2))//初始化WaveFront
{
x265_log(m_param, X265_LOG_ERROR, "unable to initialize wavefront queue\n");
m_pool = NULL;
}
m_frameFilter.init(top, this, numRows);//???
// initialize HRD parameters of SPS
if (m_param->bEmitHRDSEI || !!m_param->interlaceMode)
{
m_rce.picTimingSEI = new SEIPictureTiming;
m_rce.hrdTiming = new HRDTiming;
ok &= m_rce.picTimingSEI && m_rce.hrdTiming;
}
if (m_param->noiseReductionIntra || m_param->noiseReductionInter)
m_nr = X265_MALLOC(NoiseReduction, 1);
if (m_nr)
memset(m_nr, 0, sizeof(NoiseReduction));
else
m_param->noiseReductionIntra = m_param->noiseReductionInter = 0;
return ok;
}
/* Generate a complete list of unique geom sets for the current picture dimensions */
/** 函数功能       : 计算CU所以情况的几何信息
/*  调用范围       : 只在FrameEncoder::startCompressFrame函数中被调用
*   返回值         : 成功返回ture 失败返回 false
**/
bool FrameEncoder::initializeGeoms()
{
/* Geoms only vary between CTUs in the presence of picture edges */
int maxCUSize = m_param->maxCUSize;//最大CTU
int minCUSize = m_param->minCUSize;//最小CTU
int heightRem = m_param->sourceHeight & (maxCUSize - 1);//高度不够CTU的余数
int widthRem = m_param->sourceWidth & (maxCUSize - 1);//宽度不够CTU的余数
int allocGeoms = 1; // body    //存储的个数:分别为:CTU中全部有像素值  CTU右边不够像素值   CTU下边不够像素值  CTU右边和下边不够像素值
if (heightRem && widthRem)
allocGeoms = 4; // body, right, bottom, corner
else if (heightRem || widthRem)
allocGeoms = 2; // body, right or bottom
m_ctuGeomMap = X265_MALLOC(uint32_t, m_numRows * m_numCols);
m_cuGeoms = X265_MALLOC(CUGeom, allocGeoms * CUGeom::MAX_GEOMS);
if (!m_cuGeoms || !m_ctuGeomMap)
return false;
// body
CUData::calcCTUGeoms(maxCUSize, maxCUSize, maxCUSize, minCUSize, m_cuGeoms);//申请 CTU中全部有像素值 部分
memset(m_ctuGeomMap, 0, sizeof(uint32_t) * m_numRows * m_numCols);
if (allocGeoms == 1)
return true;
int countGeoms = 1;
if (widthRem)
{
// right
CUData::calcCTUGeoms(widthRem, maxCUSize, maxCUSize, minCUSize, m_cuGeoms + countGeoms * CUGeom::MAX_GEOMS); //申请 CTU右边不够像素值 部分
for (uint32_t i = 0; i < m_numRows; i++)
{
uint32_t ctuAddr = m_numCols * (i + 1) - 1;
m_ctuGeomMap[ctuAddr] = countGeoms * CUGeom::MAX_GEOMS;
}
countGeoms++;
}
if (heightRem)
{
// bottom
CUData::calcCTUGeoms(maxCUSize, heightRem, maxCUSize, minCUSize, m_cuGeoms + countGeoms * CUGeom::MAX_GEOMS);//申请 CTU下边不够像素值 部分
for (uint32_t i = 0; i < m_numCols; i++)
{
uint32_t ctuAddr = m_numCols * (m_numRows - 1) + i;
m_ctuGeomMap[ctuAddr] = countGeoms * CUGeom::MAX_GEOMS;
}
countGeoms++;
if (widthRem)
{
// corner
CUData::calcCTUGeoms(widthRem, heightRem, maxCUSize, minCUSize, m_cuGeoms + countGeoms * CUGeom::MAX_GEOMS);//申请 CTU右边和下边不够像素值 部分
uint32_t ctuAddr = m_numCols * m_numRows - 1;
m_ctuGeomMap[ctuAddr] = countGeoms * CUGeom::MAX_GEOMS;
countGeoms++;
}
X265_CHECK(countGeoms == allocGeoms, "geometry match check failure\n");
}
return true;
}
/** 函数功能       : 触发compressframe()进行编码
/*  调用范围       : 只在Encoder::encode函数中被调用
* \参数 curFrame   : 待编码帧
*   返回值         : 成功返回true 异常返回false
**/
bool FrameEncoder::startCompressFrame(Frame* curFrame)
{
m_slicetypeWaitTime = x265_mdate() - m_prevOutputTime;//计算从上一帧编码完毕到开始编码新一帧的等待时间
m_frame = curFrame;//设置当前处理的帧
m_param = curFrame->m_param;//获取配置参数
m_sliceType = curFrame->m_lowres.sliceType;//获取当前的帧类型
curFrame->m_encData->m_frameEncoderID = m_jpId;//获取当前的job id
curFrame->m_encData->m_jobProvider = this;//让当前帧获取当前所在线程的位置
curFrame->m_encData->m_slice->m_mref = m_mref;//获取参考帧信息
if (!m_cuGeoms)//编码器只执行framethread个数,申请所有情况CU的几何信息
{
if (!initializeGeoms())//计算CU所以情况的几何信息
return false;
}
m_enable.trigger();//当前帧的帧类型信息 已经确定完毕 触发准备编码
return true;
}
/** 函数功能       : ??触发compressframe()进行编码
/*  调用范围       : 线程循环触发,encoder->create() frame start开始 (frameThread配置多少,起几个线程)
*   返回值         : null
**/
void FrameEncoder::threadMain()
{
THREAD_NAME("Frame", m_jpId);
//本函数会被执行配置的framethread次数,因为起了framethread个线程进行compressframe
if (m_pool)
{
m_pool->setCurrentThreadAffinity();//设置线程间能够在不同的核运行,而不会同时占用同一个核
/* the first FE on each NUMA node is responsible for allocating thread
* local data for all worker threads in that pool. If WPP is disabled, then
* each FE also needs a TLD instance */
if (!m_jpId) //只在m_jpid = 0 时才会进入,也就说在framethread个线程中只有第一个才会进入 (因为这次初始化为把所有并行的analysis都初始化)
{
int numTLD = m_pool->m_numWorkers; //获取当前机器核数  单机4核测试是4
if (!m_param->bEnableWavefront)   //如果关闭WPP
numTLD += m_pool->m_numProviders;
m_tld = new ThreadLocalData[numTLD];//申请并行空间
for (int i = 0; i < numTLD; i++)
{
m_tld[i].analysis.initSearch(*m_param, m_top->m_scalingList);//???
m_tld[i].analysis.create(m_tld);//申请模式决策内存 初始化相关CU内存
}
//将所有的Encoder的m_tld都指向第一个m_tld
for (int i = 0; i < m_pool->m_numProviders; i++) //遍历所有线程任务
{
if (m_pool->m_jpTable[i]->m_isFrameEncoder) //如果当前 是frameEcnoder 不是lookachead /* ugh; over-allocation and other issues here */
{
FrameEncoder *peer = dynamic_cast<FrameEncoder*>(m_pool->m_jpTable[i]);//强制转换类型
peer->m_tld = m_tld;//公用同一ThreadLocalData buffer
}
}
}
if (m_param->bEnableWavefront)
m_localTldIdx = -1; //??? cause exception if used
else
m_localTldIdx = m_pool->m_numWorkers + m_jpId;//?????
}
else//不使用并行,直接单线程初始化
{
m_tld = new ThreadLocalData;
m_tld->analysis.initSearch(*m_param, m_top->m_scalingList);//???
m_tld->analysis.create(NULL);//申请模式决策内存 初始化相关CU内存
m_localTldIdx = 0;//?????
}
m_done.trigger();     /* signal that thread is initialized */
m_enable.wait();      /* Encoder::encode() triggers this event */
//m_done在encoder.create中先进行wait 每完成一帧即compressFrame()之后才会触发
//以上两个是一个PV原语,m_enable表示准备开始编码,在startCompressFrame函数中触发在compressFrame之后wait
//m_done表示编码完成,在getEncodedPicture之前wait,在compressFrame之后触发
while (m_threadActive)//循环执行任务,知道encoder->stop 中设置结束
{
compressFrame();//编码视频
m_done.trigger();//编码完毕触发完成 /* FrameEncoder::getEncodedPicture() blocks for this event */
m_enable.wait();//等待下一帧是否准备
}
}
/** 函数功能             : 分析加权信息
/*  调用范围             : 只在WorkerThread::threadMain()函数中被调用
* \返回                  : null * */
void FrameEncoder::WeightAnalysis::processTasks(int /* workerThreadId */)
{
Frame* frame = master.m_frame;//获取当前分析帧
weightAnalyse(*frame->m_encData->m_slice, *frame, *master.m_param);//分析加权信息
}
/** 函数功能       : ??触发compressframe()进行编码
/*  调用范围       : 只在FrameEncoder::threadMain()函数中被调用
*   返回值         : null
**/
void FrameEncoder::compressFrame()
{
ProfileScopeEvent(frameThread);
//功能:????
//    1. 初始化数据
//    2. NAL??????
//    3. 参考帧加权分析
//    4. 配置参考帧信息
//    5. 计算估计当前帧应用的量化参数
//    6.
m_startCompressTime = x265_mdate();//获取开始编码的时间点
m_totalActiveWorkerCount = 0;//统计m_activeWorkerCount和(经过CTU压缩后统计 进入帧前初始化为0)
m_activeWorkerCountSamples = 0;//当前帧已经编码分析完毕的CTU个数
m_totalWorkerElapsedTime = 0;//初始化所有CTU编码滤波占用的时间
m_totalNoWorkerTime = 0;//初始化当前帧编码占用的时间
m_countRowBlocks = 0;//计数正在运行的CTU行因为上一行没有完成完毕而强制退出的个数 在帧编码前 初始化为0
m_allRowsAvailableTime = 0;//初始化当前帧所有CTU行准备好的时间点(此处无意义)
m_stallStartTime = 0;//初始化正在进行编码的rows个数为0时的时间点(此处无意义)
m_completionCount = 0;//???
m_bAllRowsStop = false;//是否将所有CTU的编码停止  在每帧进入前初始化为false 在CTU编码决策中需要重新编码时将置为true
m_vbvResetTriggerRow = -1;//需要重新编码的CTU行号 每帧开始编码前初始化为-1
m_SSDY = m_SSDU = m_SSDV = 0;//???
m_ssim = 0;//???
m_ssimCnt = 0;//???
memset(&(m_frame->m_encData->m_frameStats), 0, sizeof(m_frame->m_encData->m_frameStats));//将当前帧的统计信息初始化为0
/* Emit access unit delimiter unless this is the first frame and the user is
* not repeating headers (since AUD is supposed to be the first NAL in the access
* unit) */
Slice* slice = m_frame->m_encData->m_slice;//获取当前slice
if (m_param->bEnableAccessUnitDelimiters && (m_frame->m_poc || m_param->bRepeatHeaders))//????
{
m_bs.resetBits();
m_entropyCoder.setBitstream(&m_bs);
m_entropyCoder.codeAUD(*slice);
m_bs.writeByteAlignment();
m_nalList.serialize(NAL_UNIT_ACCESS_UNIT_DELIMITER, m_bs);
}
if (m_frame->m_lowres.bKeyframe && m_param->bRepeatHeaders)//????
m_top->getStreamHeaders(m_nalList, m_entropyCoder, m_bs);
// Weighted Prediction parameters estimation.
bool bUseWeightP = slice->m_sliceType == P_SLICE && slice->m_pps->bUseWeightPred;    //当前是否应用P帧加权预测
bool bUseWeightB = slice->m_sliceType == B_SLICE && slice->m_pps->bUseWeightedBiPred;//当前是否应用B帧加权预测
if (bUseWeightP || bUseWeightB)
{
#if DETAILED_CU_STATS
m_cuStats.countWeightAnalyze++;
ScopedElapsedTime time(m_cuStats.weightAnalyzeTime);
#endif
WeightAnalysis wa(*this);//用于多线程 加权分析
if (m_pool && wa.tryBondPeers(*this, 1))//从当前job中拥有核并且sleep状态的核可以触发多线程,如果没有可用核则在当前线程中完成进入else
/* use an idle worker for weight analysis */
wa.waitForExit();//一直等待到任务全部完成,这里等待的是核释放,内核释放了任务也就完成了
else
weightAnalyse(*slice, *m_frame, *m_param);//分析加权信息(每个list的第一帧分析加权与否,其它不加权)
}
else
slice->disableWeights();//关闭当前帧的加权预测
// Generate motion references
//配置参考帧信息
int numPredDir = slice->isInterP() ? 1 : slice->isInterB() ? 2 : 0;//获取当前有几个list
for (int l = 0; l < numPredDir; l++) //遍历两个list
{
for (int ref = 0; ref < slice->m_numRefIdx[l]; ref++)//遍历当前list的所有参考帧
{
WeightParam *w = NULL;
if ((bUseWeightP || bUseWeightB) && slice->m_weightPredTable[l][ref][0].bPresentFlag)//如果当前应用加权预测,并且前面分析加权优
w = slice->m_weightPredTable[l][ref];//获取加权参数
m_mref[l][ref].init(slice->m_refPicList[l][ref]->m_reconPic, w, *m_param);//获取参考帧信息,申请加权帧内存
}
}
/* Get the QP for this frame from rate control. This call may block until
* frames ahead of it in encode order have called rateControlEnd() */
int qp = m_top->m_rateControl->rateControlStart(m_frame, &m_rce, m_top);//计算估计当前帧应用的量化参数???
m_rce.newQp = qp;//获取当前估计的量化参数
/* Clip slice QP to 0-51 spec range before encoding */
slice->m_sliceQp = x265_clip3(-QP_BD_OFFSET, QP_MAX_SPEC, qp);//获取当前估计的量化参数
m_initSliceContext.resetEntropy(*slice);//????
m_frameFilter.start(m_frame, m_initSliceContext, qp);//????
/* ensure all rows are blocked prior to initializing row CTU counters */
WaveFront::clearEnabledRowMask();//将当前WPPmap全部初始化为不可执行
/* reset entropy coders */
m_entropyCoder.load(m_initSliceContext);//????
for (uint32_t i = 0; i < m_numRows; i++)
m_rows[i].init(m_initSliceContext);//???
uint32_t numSubstreams = m_param->bEnableWavefront ? slice->m_sps->numCuInHeight : 1;//一帧并行的流数(CTU行数)
if (!m_outStreams)//申请相关空间
{
m_outStreams = new Bitstream[numSubstreams];//申请空间
m_substreamSizes = X265_MALLOC(uint32_t, numSubstreams);//申请空间
if (!m_param->bEnableSAO)//???
for (uint32_t i = 0; i < numSubstreams; i++)
m_rows[i].rowGoOnCoder.setBitstream(&m_outStreams[i]);
}
else
for (uint32_t i = 0; i < numSubstreams; i++)//???
m_outStreams[i].resetBits();
int prevBPSEI = m_rce.encodeOrder ? m_top->m_lastBPSEI : 0;//???
if (m_frame->m_lowres.bKeyframe)//?????
{
if (m_param->bEmitHRDSEI)
{
SEIBufferingPeriod* bpSei = &m_top->m_rateControl->m_bufPeriodSEI;
// since the temporal layer HRD is not ready, we assumed it is fixed
bpSei->m_auCpbRemovalDelayDelta = 1;
bpSei->m_cpbDelayOffset = 0;
bpSei->m_dpbDelayOffset = 0;
// hrdFullness() calculates the initial CPB removal delay and offset
m_top->m_rateControl->hrdFullness(bpSei);
m_bs.resetBits();
bpSei->write(m_bs, *slice->m_sps);
m_bs.writeByteAlignment();
m_nalList.serialize(NAL_UNIT_PREFIX_SEI, m_bs);
m_top->m_lastBPSEI = m_rce.encodeOrder;
}
}
if (m_param->bEmitHRDSEI || !!m_param->interlaceMode)//??????
{
SEIPictureTiming *sei = m_rce.picTimingSEI;
const VUI *vui = &slice->m_sps->vuiParameters;
const HRDInfo *hrd = &vui->hrdParameters;
int poc = slice->m_poc;
if (vui->frameFieldInfoPresentFlag)
{
if (m_param->interlaceMode == 2)
sei->m_picStruct = (poc & 1) ? 1 /* top */ : 2 /* bottom */;
else if (m_param->interlaceMode == 1)
sei->m_picStruct = (poc & 1) ? 2 /* bottom */ : 1 /* top */;
else
sei->m_picStruct = 0;
sei->m_sourceScanType = 0;
sei->m_duplicateFlag = false;
}
if (vui->hrdParametersPresentFlag)
{
// The m_aucpbremoval delay specifies how many clock ticks the
// access unit associated with the picture timing SEI message has to
// wait after removal of the access unit with the most recent
// buffering period SEI message
sei->m_auCpbRemovalDelay = X265_MIN(X265_MAX(1, m_rce.encodeOrder - prevBPSEI), (1 << hrd->cpbRemovalDelayLength));
sei->m_picDpbOutputDelay = slice->m_sps->numReorderPics + poc - m_rce.encodeOrder;
}
m_bs.resetBits();
sei->write(m_bs, *slice->m_sps);
m_bs.writeByteAlignment();
m_nalList.serialize(NAL_UNIT_PREFIX_SEI, m_bs);
}
/* CQP and CRF (without capped VBV) doesn't use mid-frame statistics to
* tune RateControl parameters for other frames.
* Hence, for these modes, update m_startEndOrder and unlock RC for previous threads waiting in
* RateControlEnd here, after the slicecontexts are initialized. For the rest - ABR
* and VBV, unlock only after rateControlUpdateStats of this frame is called */
if (m_param->rc.rateControlMode != X265_RC_ABR && !m_top->m_rateControl->m_isVbv)//因为m_startEndOrder 在rateControlUpdateStats中只对ABR或者VBV模式更新  在此更新为了配合RC的线程控制
{
m_top->m_rateControl->m_startEndOrder.incr();//更新计数
if (m_rce.encodeOrder < m_param->frameNumThreads - 1)//刚启动时多更新一次
m_top->m_rateControl->m_startEndOrder.incr(); // faked rateControlEnd calls for negative frames
}
/* Analyze CTU rows, most of the hard work is done here.  Frame is
* compressed in a wave-front pattern if WPP is enabled. Row based loop
* filters runs behind the CTU compression and reconstruction */
m_rows[0].active = true;//触发第一个CTU行
if (m_param->bEnableWavefront)//如果应用WPP
{
for (uint32_t row = 0; row < m_numRows; row++)//遍历所有CTU行
{
// block until all reference frames have reconstructed the rows we need
for (int l = 0; l < numPredDir; l++)//当前list的个数
{
for (int ref = 0; ref < slice->m_numRefIdx[l]; ref++)//当前list 中ref的个数
{
Frame *refpic = slice->m_refPicList[l][ref];//获取参考帧
uint32_t reconRowCount = refpic->m_reconRowCount.get();
while ((reconRowCount != m_numRows) && (reconRowCount < row + m_refLagRows))
reconRowCount = refpic->m_reconRowCount.waitForChange(reconRowCount);
if ((bUseWeightP || bUseWeightB) && m_mref[l][ref].isWeighted)
m_mref[l][ref].applyWeight(row + m_refLagRows, m_numRows);
}
}
enableRowEncoder(row); //当前外部参考块(如参考帧对应的参考块)准备完毕 将当前row对应位置的map置为1 标记可以执行/* clear external dependency for this row */
if (!row)//如果是第一行
{
m_row0WaitTime = x265_mdate();//获取当前帧开始编码的的时间点  用于计算当前帧的编码时间
enqueueRowEncoder(0); //当前内部参考块  准备完毕 将当前row对应位置的map置为1 标记可以执行/* clear internal dependency, start wavefront */
}
tryWakeOne();//CTU行准备好并触发wpp, 在findjob中运行
}
m_allRowsAvailableTime = x265_mdate();//当前帧所有CTU行准备好的时间点
tryWakeOne(); //多触发一次/* ensure one thread is active or help-wanted flag is set prior to blocking */
static const int block_ms = 250;//超时时间
while (m_completionEvent.timedWait(block_ms))//每250ms触发一次 保证全部CTU行都能够执行  (如果m_completionEvent 在某一位置触发,则会造成不超时,循环退出)
tryWakeOne();//触发
}
else//??????
{
for (uint32_t i = 0; i < m_numRows + m_filterRowDelay; i++)
{
// compress
if (i < m_numRows)
{
// block until all reference frames have reconstructed the rows we need
for (int l = 0; l < numPredDir; l++)
{
int list = l;
for (int ref = 0; ref < slice->m_numRefIdx[list]; ref++)
{
Frame *refpic = slice->m_refPicList[list][ref];
uint32_t reconRowCount = refpic->m_reconRowCount.get();
while ((reconRowCount != m_numRows) && (reconRowCount < i + m_refLagRows))
reconRowCount = refpic->m_reconRowCount.waitForChange(reconRowCount);
if ((bUseWeightP || bUseWeightB) && m_mref[l][ref].isWeighted)
m_mref[list][ref].applyWeight(i + m_refLagRows, m_numRows);
}
}
if (!i)
m_row0WaitTime = x265_mdate();
else if (i == m_numRows - 1)
m_allRowsAvailableTime = x265_mdate();
processRowEncoder(i, m_tld[m_localTldIdx]);
}
// filter
if (i >= m_filterRowDelay)
m_frameFilter.processRow(i - m_filterRowDelay);
}
}
if (m_param->rc.bStatWrite)
{
int totalI = 0, totalP = 0, totalSkip = 0;
// accumulate intra,inter,skip cu count per frame for 2 pass
for (uint32_t i = 0; i < m_numRows; i++)
{
m_frame->m_encData->m_frameStats.mvBits    += m_rows[i].rowStats.mvBits;
m_frame->m_encData->m_frameStats.coeffBits += m_rows[i].rowStats.coeffBits;
m_frame->m_encData->m_frameStats.miscBits  += m_rows[i].rowStats.miscBits;
totalI                                     += m_rows[i].rowStats.intra8x8Cnt;
totalP                                     += m_rows[i].rowStats.inter8x8Cnt;
totalSkip                                  += m_rows[i].rowStats.skip8x8Cnt;
}
int totalCuCount = totalI + totalP + totalSkip;
m_frame->m_encData->m_frameStats.percent8x8Intra = (double)totalI / totalCuCount;
m_frame->m_encData->m_frameStats.percent8x8Inter = (double)totalP / totalCuCount;
m_frame->m_encData->m_frameStats.percent8x8Skip  = (double)totalSkip / totalCuCount;
}
for (uint32_t i = 0; i < m_numRows; i++)
{
m_frame->m_encData->m_frameStats.cntIntraNxN      += m_rows[i].rowStats.cntIntraNxN;
m_frame->m_encData->m_frameStats.totalCu          += m_rows[i].rowStats.totalCu;
m_frame->m_encData->m_frameStats.totalCtu         += m_rows[i].rowStats.totalCtu;
m_frame->m_encData->m_frameStats.lumaDistortion   += m_rows[i].rowStats.lumaDistortion;
m_frame->m_encData->m_frameStats.chromaDistortion += m_rows[i].rowStats.chromaDistortion;
m_frame->m_encData->m_frameStats.psyEnergy        += m_rows[i].rowStats.psyEnergy;
m_frame->m_encData->m_frameStats.lumaLevel        += m_rows[i].rowStats.lumaLevel;
if (m_rows[i].rowStats.maxLumaLevel > m_frame->m_encData->m_frameStats.maxLumaLevel)
m_frame->m_encData->m_frameStats.maxLumaLevel = m_rows[i].rowStats.maxLumaLevel;
for (uint32_t depth = 0; depth <= g_maxCUDepth; depth++)
{
m_frame->m_encData->m_frameStats.cntSkipCu[depth] += m_rows[i].rowStats.cntSkipCu[depth];
m_frame->m_encData->m_frameStats.cntMergeCu[depth] += m_rows[i].rowStats.cntMergeCu[depth];
for (int m = 0; m < INTER_MODES; m++)
m_frame->m_encData->m_frameStats.cuInterDistribution[depth][m] += m_rows[i].rowStats.cuInterDistribution[depth][m];
for (int n = 0; n < INTRA_MODES; n++)
m_frame->m_encData->m_frameStats.cuIntraDistribution[depth][n] += m_rows[i].rowStats.cuIntraDistribution[depth][n];
}
}
m_frame->m_encData->m_frameStats.avgLumaDistortion   = (double)(m_frame->m_encData->m_frameStats.lumaDistortion) / m_frame->m_encData->m_frameStats.totalCtu;
m_frame->m_encData->m_frameStats.avgChromaDistortion = (double)(m_frame->m_encData->m_frameStats.chromaDistortion) / m_frame->m_encData->m_frameStats.totalCtu;
m_frame->m_encData->m_frameStats.avgPsyEnergy        = (double)(m_frame->m_encData->m_frameStats.psyEnergy) / m_frame->m_encData->m_frameStats.totalCtu;
m_frame->m_encData->m_frameStats.avgLumaLevel        = m_frame->m_encData->m_frameStats.lumaLevel / m_frame->m_encData->m_frameStats.totalCtu;
m_frame->m_encData->m_frameStats.percentIntraNxN     = (double)(m_frame->m_encData->m_frameStats.cntIntraNxN * 100) / m_frame->m_encData->m_frameStats.totalCu;
for (uint32_t depth = 0; depth <= g_maxCUDepth; depth++)
{
m_frame->m_encData->m_frameStats.percentSkipCu[depth]  = (double)(m_frame->m_encData->m_frameStats.cntSkipCu[depth] * 100) / m_frame->m_encData->m_frameStats.totalCu;
m_frame->m_encData->m_frameStats.percentMergeCu[depth] = (double)(m_frame->m_encData->m_frameStats.cntMergeCu[depth] * 100) / m_frame->m_encData->m_frameStats.totalCu;
for (int n = 0; n < INTRA_MODES; n++)
m_frame->m_encData->m_frameStats.percentIntraDistribution[depth][n] = (double)(m_frame->m_encData->m_frameStats.cuIntraDistribution[depth][n] * 100) / m_frame->m_encData->m_frameStats.totalCu;
uint64_t cuInterRectCnt = 0; // sum of Nx2N, 2NxN counts
cuInterRectCnt += m_frame->m_encData->m_frameStats.cuInterDistribution[depth][1] + m_frame->m_encData->m_frameStats.cuInterDistribution[depth][2];
m_frame->m_encData->m_frameStats.percentInterDistribution[depth][0] = (double)(m_frame->m_encData->m_frameStats.cuInterDistribution[depth][0] * 100) / m_frame->m_encData->m_frameStats.totalCu;
m_frame->m_encData->m_frameStats.percentInterDistribution[depth][1] = (double)(cuInterRectCnt * 100) / m_frame->m_encData->m_frameStats.totalCu;
m_frame->m_encData->m_frameStats.percentInterDistribution[depth][2] = (double)(m_frame->m_encData->m_frameStats.cuInterDistribution[depth][3] * 100) / m_frame->m_encData->m_frameStats.totalCu;
}
m_bs.resetBits();
m_entropyCoder.load(m_initSliceContext);
m_entropyCoder.setBitstream(&m_bs);
m_entropyCoder.codeSliceHeader(*slice, *m_frame->m_encData);
// finish encode of each CTU row, only required when SAO is enabled
if (m_param->bEnableSAO)//???
encodeSlice();
// serialize each row, record final lengths in slice header
uint32_t maxStreamSize = m_nalList.serializeSubstreams(m_substreamSizes, numSubstreams, m_outStreams);//???
// complete the slice header by writing WPP row-starts
m_entropyCoder.setBitstream(&m_bs);
if (slice->m_pps->bEntropyCodingSyncEnabled)
m_entropyCoder.codeSliceHeaderWPPEntryPoints(*slice, m_substreamSizes, maxStreamSize);
m_bs.writeByteAlignment();
m_nalList.serialize(slice->m_nalUnitType, m_bs);//???
if (m_param->decodedPictureHashSEI)
{
if (m_param->decodedPictureHashSEI == 1)
{
m_seiReconPictureDigest.m_method = SEIDecodedPictureHash::MD5;
for (int i = 0; i < 3; i++)
MD5Final(&m_state[i], m_seiReconPictureDigest.m_digest[i]);
}
else if (m_param->decodedPictureHashSEI == 2)
{
m_seiReconPictureDigest.m_method = SEIDecodedPictureHash::CRC;
for (int i = 0; i < 3; i++)
crcFinish(m_crc[i], m_seiReconPictureDigest.m_digest[i]);
}
else if (m_param->decodedPictureHashSEI == 3)
{
m_seiReconPictureDigest.m_method = SEIDecodedPictureHash::CHECKSUM;
for (int i = 0; i < 3; i++)
checksumFinish(m_checksum[i], m_seiReconPictureDigest.m_digest[i]);
}
m_bs.resetBits();
m_seiReconPictureDigest.write(m_bs, *slice->m_sps);
m_bs.writeByteAlignment();
m_nalList.serialize(NAL_UNIT_SUFFIX_SEI, m_bs);
}
uint64_t bytes = 0;
for (uint32_t i = 0; i < m_nalList.m_numNal; i++)
{
int type = m_nalList.m_nal[i].type;
// exclude SEI
if (type != NAL_UNIT_PREFIX_SEI && type != NAL_UNIT_SUFFIX_SEI)
{
bytes += m_nalList.m_nal[i].sizeBytes;
// and exclude start code prefix
bytes -= (!i || type == NAL_UNIT_SPS || type == NAL_UNIT_PPS) ? 4 : 3;
}
}
m_accessUnitBits = bytes << 3;//????
m_endCompressTime = x265_mdate();//获取compressframe完成完毕时间点
/* rateControlEnd may also block for earlier frames to call rateControlUpdateStats */
if (m_top->m_rateControl->rateControlEnd(m_frame, m_accessUnitBits, &m_rce) < 0)//???
m_top->m_aborted = true;//一般不进入  错误返回
/* Decrement referenced frame reference counts, allow them to be recycled */
for (int l = 0; l < numPredDir; l++)//遍历list
{
for (int ref = 0; ref < slice->m_numRefIdx[l]; ref++)//遍历每个List的参考帧
{
Frame *refpic = slice->m_refPicList[l][ref];//获取参考帧地址
ATOMIC_DEC(&refpic->m_countRefEncoders);//被参考计数减一
}
}
int numTLD;//当前并行线程个数
if (m_pool)
numTLD = m_param->bEnableWavefront ? m_pool->m_numWorkers : m_pool->m_numWorkers + m_pool->m_numProviders;//应用WPP,为当前机器的核数,否则为当前核数加上当前的线程个数
else
numTLD = 1;//不应用多线程,并行个数为1
if (m_nr)//如果需要去噪:功能:更新去噪偏移值
{
/* Accumulate NR statistics from all worker threads */
for (int i = 0; i < numTLD; i++)//遍历当前帧编码应用所有线程:累加当前帧各个系数的统计数字
{
NoiseReduction* nr = &m_tld[i].analysis.m_quant.m_frameNr[m_jpId];//获取当前线程对应当前帧的去噪类;i 确定当前帧的所有线程,如每个WPP行  m_jpId 确定当前帧
for (int cat = 0; cat < MAX_NUM_TR_CATEGORIES; cat++)//遍历所有TU类别
{
for (int coeff = 0; coeff < MAX_NUM_TR_COEFFS; coeff++)
m_nr->residualSum[cat][coeff] += nr->residualSum[cat][coeff];//累加当前帧所有TU对应位置的系数绝对值和
m_nr->count[cat] += nr->count[cat];//累加当前帧的TU计数
}
}
noiseReductionUpdate();//更新去噪偏移值
/* Copy updated NR coefficients back to all worker threads */
for (int i = 0; i < numTLD; i++)//遍历当前帧编码应用所有线程
{
NoiseReduction* nr = &m_tld[i].analysis.m_quant.m_frameNr[m_jpId];//获取当前线程对应当前帧的去噪类;i 确定当前帧的所有线程,如每个WPP行  m_jpId 确定当前帧
memcpy(nr->offsetDenoise, m_nr->offsetDenoise, sizeof(uint16_t) * MAX_NUM_TR_CATEGORIES * MAX_NUM_TR_COEFFS);//将当前更新的去噪偏移值拷贝到各个线程中的去噪偏移中去
memset(nr->count, 0, sizeof(uint32_t) * MAX_NUM_TR_CATEGORIES);//初始为0
memset(nr->residualSum, 0, sizeof(uint32_t) * MAX_NUM_TR_CATEGORIES * MAX_NUM_TR_COEFFS);//初始为0
}
}
#if DETAILED_CU_STATS//统计数据
/* Accumulate CU statistics from each worker thread, we could report
* per-frame stats here, but currently we do not. */
for (int i = 0; i < numTLD; i++)
m_cuStats.accumulate(m_tld[i].analysis.m_stats[m_jpId]);
#endif
m_endFrameTime = x265_mdate();//获取整帧全部完成的时间点
}
void FrameEncoder::encodeSlice()
{
Slice* slice = m_frame->m_encData->m_slice;
const uint32_t widthInLCUs = slice->m_sps->numCuInWidth;
const uint32_t lastCUAddr = (slice->m_endCUAddr + NUM_4x4_PARTITIONS - 1) / NUM_4x4_PARTITIONS;
const uint32_t numSubstreams = m_param->bEnableWavefront ? slice->m_sps->numCuInHeight : 1;
SAOParam* saoParam = slice->m_sps->bUseSAO ? m_frame->m_encData->m_saoParam : NULL;
for (uint32_t cuAddr = 0; cuAddr < lastCUAddr; cuAddr++)
{
uint32_t col = cuAddr % widthInLCUs;
uint32_t lin = cuAddr / widthInLCUs;
uint32_t subStrm = lin % numSubstreams;
CUData* ctu = m_frame->m_encData->getPicCTU(cuAddr);
m_entropyCoder.setBitstream(&m_outStreams[subStrm]);
// Synchronize cabac probabilities with upper-right CTU if it's available and we're at the start of a line.
if (m_param->bEnableWavefront && !col && lin)
{
m_entropyCoder.copyState(m_initSliceContext);
m_entropyCoder.loadContexts(m_rows[lin - 1].bufferedEntropy);
}
if (saoParam)
{
if (saoParam->bSaoFlag[0] || saoParam->bSaoFlag[1])
{
int mergeLeft = col && saoParam->ctuParam[0][cuAddr].mergeMode == SAO_MERGE_LEFT;
int mergeUp = lin && saoParam->ctuParam[0][cuAddr].mergeMode == SAO_MERGE_UP;
if (col)
m_entropyCoder.codeSaoMerge(mergeLeft);
if (lin && !mergeLeft)
m_entropyCoder.codeSaoMerge(mergeUp);
if (!mergeLeft && !mergeUp)
{
if (saoParam->bSaoFlag[0])
m_entropyCoder.codeSaoOffset(saoParam->ctuParam[0][cuAddr], 0);
if (saoParam->bSaoFlag[1])
{
m_entropyCoder.codeSaoOffset(saoParam->ctuParam[1][cuAddr], 1);
m_entropyCoder.codeSaoOffset(saoParam->ctuParam[2][cuAddr], 2);
}
}
}
else
{
for (int i = 0; i < 3; i++)
saoParam->ctuParam[i][cuAddr].reset();
}
}
// final coding (bitstream generation) for this CU
m_entropyCoder.encodeCTU(*ctu, m_cuGeoms[m_ctuGeomMap[cuAddr]]);
if (m_param->bEnableWavefront)
{
if (col == 1)
// Store probabilities of second CTU in line into buffer
m_rows[lin].bufferedEntropy.loadContexts(m_entropyCoder);
if (col == widthInLCUs - 1)
m_entropyCoder.finishSlice();
}
}
if (!m_param->bEnableWavefront)
m_entropyCoder.finishSlice();
}
/** 函数功能             : ??触发WPP(在threadMain()主动发起) 只进行一个CTU行即退出(其它CTU需要重新触发)
/*  调用范围             : 只在WaveFront::findJob函数中被调用
* \参数 row              : 当前的row号(CTU行*2+x)x=0 为编码  x=1为滤波
* \参数 threadId         : 当前的内核号
* \返回                  : null * */
void FrameEncoder::processRow(int row, int threadId)
{
int64_t startTime = x265_mdate();//获取处理当前row之前的时间点
if (ATOMIC_INC(&m_activeWorkerCount) == 1 && m_stallStartTime)//运行rows自加一 如果当前运行rows个数为1 并且 m_stallStartTime不为0
m_totalNoWorkerTime += x265_mdate() - m_stallStartTime;//统计当前帧编码占用的时间
const uint32_t realRow = row >> 1;//获取当前真正的CTU行号
const uint32_t typeNum = row & 1; //0 表示需要编码  1表示需要滤波
if (!typeNum)//如果当前需要编码
processRowEncoder(realRow, m_tld[threadId]);//???
else
{
m_frameFilter.processRow(realRow);//???
// NOTE: Active next row
if (realRow != m_numRows - 1)//如果当前不是最后一行
enqueueRowFilter(realRow + 1);//将当前row对应位置的map置为1 标记可以执行 启动下一行
}
if (ATOMIC_DEC(&m_activeWorkerCount) == 0)//当前rows编码结束 运行rows自减一
m_stallStartTime = x265_mdate();//记录时间点
m_totalWorkerElapsedTime += x265_mdate() - startTime; //统计所有CTU编码滤波占用的时间  not thread safe, but good enough
}
// Called by worker threads
/** 函数功能             : ??触发WPP(在threadMain()主动发起) 只进行一个CTU行即退出(其它CTU需要重新触发)
/*  调用范围             : 只在FrameEncoder::processRow(开启WPP时)和FrameEncoder::compressFrame(关闭WPP时)函数中被调用
* \参数 intRow           : 当前的CTU行号
* \参数 tld              : 对应核号的编码分析类
* \返回                  : null * */
void FrameEncoder::processRowEncoder(int intRow, ThreadLocalData& tld)
{
uint32_t row = (uint32_t)intRow;//获取当前CTU行号
CTURow& curRow = m_rows[row];//获取当前row存放位置
tld.analysis.m_param = m_param;//获取配置参数
if (m_param->bEnableWavefront)//如果应用WPP
{
ScopedLock self(curRow.lock);//局部锁
if (!curRow.active)
/* VBV restart is in progress, exit out */
return;//一般不会进入 (active一般能够保证为true)
if (curRow.busy)//一般为false 不会进入
{
/* On multi-socket Windows servers, we have seen problems with
* ATOMIC_CAS which resulted in multiple worker threads processing
* the same CU row, which often resulted in bad pointer accesses. We
* believe the problem is fixed, but are leaving this check in place
* to prevent crashes in case it is not */
x265_log(m_param, X265_LOG_WARNING,
"internal error - simultaneous row access detected. Please report HW to x265-devel@videolan.org\n");
return;
}
curRow.busy = true;//置为true 当前行正在进行
}
/* When WPP is enabled, every row has its own row coder instance. Otherwise
* they share row 0 */
Entropy& rowCoder = m_param->bEnableWavefront ? m_rows[row].rowGoOnCoder : m_rows[0].rowGoOnCoder;//????
FrameData& curEncData = *m_frame->m_encData;//获取当前编码帧相关数据
Slice *slice = curEncData.m_slice;//获取当前帧的slice
const uint32_t numCols = m_numCols;//获取当前行有多少CTU
const uint32_t lineStartCUAddr = row * numCols;//获取当前CTU行的第一个CTU在帧中的号
bool bIsVbv = m_param->rc.vbvBufferSize > 0 && m_param->rc.vbvMaxBitrate > 0;//是否应用VBV
while (curRow.completed < numCols)//遍历当前CTU行所有的CTU
{
ProfileScopeEvent(encodeCTU);
uint32_t col = curRow.completed;//获取当前CTU在CTU行的索引
const uint32_t cuAddr = lineStartCUAddr + col;//获取CTU在帧中的编号
CUData* ctu = curEncData.getPicCTU(cuAddr);//获取在帧中对应位置的CTU
ctu->initCTU(*m_frame, cuAddr, slice->m_sliceQp);//初始化CTU
if (bIsVbv)//如果应用VBV
{
if (!row)//第一行
{
curEncData.m_rowStat[row].diagQp = curEncData.m_avgQpRc;//获取在rateControlStart中预估qp参数值(未四舍五入)
curEncData.m_rowStat[row].diagQpScale = x265_qp2qScale(curEncData.m_avgQpRc);//获取在rateControlStart中预估qp参数值(未四舍五入) 对应的qscale
}
FrameData::RCStatCU& cuStat = curEncData.m_cuStat[cuAddr];//获取当前CTU统计信息的位置
if (row >= col && row && m_vbvResetTriggerRow != intRow)//如果当前行号大于等于列号(下三角) 并且 当前重置行号不是当前行号
cuStat.baseQp = curEncData.m_cuStat[cuAddr - numCols + 1].baseQp;//获取右上角CTU的baseQp
else
cuStat.baseQp = curEncData.m_rowStat[row].diagQp;//获取当前row的对角QP
/* TODO: use defines from slicetype.h for lowres block size */
uint32_t maxBlockCols = (m_frame->m_fencPic->m_picWidth + (16 - 1)) / 16;//一行有多少16x16块 对应下采样图像就是8x8块的个数
uint32_t maxBlockRows = (m_frame->m_fencPic->m_picHeight + (16 - 1)) / 16;//有多少16x16块行  对应下采样图像就是8x8块的行数
uint32_t noOfBlocks = g_maxCUSize / 16;//一个CTU块对应下采样8x8块的个数 的log2
uint32_t block_y = (cuAddr / curEncData.m_slice->m_sps->numCuInWidth) * noOfBlocks;//对应下采样图像的block坐标 y
uint32_t block_x = (cuAddr * noOfBlocks) - block_y * curEncData.m_slice->m_sps->numCuInWidth;//对应下采样图像的block坐标 x
cuStat.vbvCost = 0;//初始化为0
cuStat.intraVbvCost = 0;//初始化为0
for (uint32_t h = 0; h < noOfBlocks && block_y < maxBlockRows; h++, block_y++)//遍历行(被CTU覆盖住的部分)
{
uint32_t idx = block_x + (block_y * maxBlockCols);//当前行的第一个8x8块(被CTU覆盖住的部分)
for (uint32_t w = 0; w < noOfBlocks && (block_x + w) < maxBlockCols; w++, idx++)//遍历当前行的8x8块(被CTU覆盖住的部分)
{
cuStat.vbvCost += m_frame->m_lowres.lowresCostForRc[idx] & LOWRES_COST_MASK;//累加对应8x8块的对应最优CUcost
cuStat.intraVbvCost += m_frame->m_lowres.intraCost[idx];//累加对应8x8块的对应intraCUcost
}
}
}
else //如果不应用VBV
curEncData.m_cuStat[cuAddr].baseQp = curEncData.m_avgQpRc;//获取在rateControlStart中 获取预估qp参数值(未四舍五入)
if (m_param->bEnableWavefront && !col && row)//如果应用WPP 并且 是第一列 并且不是第一行
{
// Load SBAC coder context from previous row and initialize row state.
rowCoder.copyState(m_initSliceContext);//????
rowCoder.loadContexts(m_rows[row - 1].bufferedEntropy);
}
// Does all the CU analysis, returns best top level mode decision
Mode& best = tld.analysis.compressCTU(*ctu, *m_frame, m_cuGeoms[m_ctuGeomMap[cuAddr]], rowCoder);//????
// take a sample of the current active worker count
ATOMIC_ADD(&m_totalActiveWorkerCount, m_activeWorkerCount);//统计m_activeWorkerCount和
ATOMIC_INC(&m_activeWorkerCountSamples);//当前帧已经编码分析完毕的CTU个数
/* advance top-level row coder to include the context of this CTU.
* if SAO is disabled, rowCoder writes the final CTU bitstream */
rowCoder.encodeCTU(*ctu, m_cuGeoms[m_ctuGeomMap[cuAddr]]);//?????
if (m_param->bEnableWavefront && col == 1)//????
// Save CABAC state for next row
curRow.bufferedEntropy.loadContexts(rowCoder);
// Completed CU processing
curRow.completed++;//计数当前CTU行编码的CTU个数
FrameStats frameLog;//???
curEncData.m_rowStat[row].sumQpAq += collectCTUStatistics(*ctu, &frameLog);//统计当前CTU划分角度qp等信息   累加当前CTU的所有mqp
// copy no. of intra, inter Cu cnt per row into frame stats for 2 pass
if (m_param->rc.bStatWrite)//????
{
curRow.rowStats.mvBits    += best.mvBits;//累加MVbits
curRow.rowStats.coeffBits += best.coeffBits;
curRow.rowStats.miscBits  += best.totalBits - (best.mvBits + best.coeffBits);
for (uint32_t depth = 0; depth <= g_maxCUDepth; depth++)
{
/* 1 << shift == number of 8x8 blocks at current depth */
int shift = 2 * (g_maxCUDepth - depth);
int cuSize = g_maxCUSize >> depth;
if (cuSize == 8)
curRow.rowStats.intra8x8Cnt += (int)(frameLog.cntIntra[depth] + frameLog.cntIntraNxN);
else
curRow.rowStats.intra8x8Cnt += (int)(frameLog.cntIntra[depth] << shift);
curRow.rowStats.inter8x8Cnt += (int)(frameLog.cntInter[depth] << shift);
curRow.rowStats.skip8x8Cnt += (int)((frameLog.cntSkipCu[depth] + frameLog.cntMergeCu[depth]) << shift);
}
}
curRow.rowStats.totalCtu++;//???
curRow.rowStats.lumaDistortion   += best.lumaDistortion;//???
curRow.rowStats.chromaDistortion += best.chromaDistortion;//???
curRow.rowStats.psyEnergy        += best.psyEnergy;//???
curRow.rowStats.cntIntraNxN      += frameLog.cntIntraNxN;//???
curRow.rowStats.totalCu          += frameLog.totalCu;//???
for (uint32_t depth = 0; depth <= g_maxCUDepth; depth++)//???
{
curRow.rowStats.cntSkipCu[depth] += frameLog.cntSkipCu[depth];
curRow.rowStats.cntMergeCu[depth] += frameLog.cntMergeCu[depth];
for (int m = 0; m < INTER_MODES; m++)
curRow.rowStats.cuInterDistribution[depth][m] += frameLog.cuInterDistribution[depth][m];
for (int n = 0; n < INTRA_MODES; n++)
curRow.rowStats.cuIntraDistribution[depth][n] += frameLog.cuIntraDistribution[depth][n];
}
/* calculate maximum and average luma levels */
uint32_t ctuLumaLevel = 0;//用于统计当前CTU的像素和
uint32_t ctuNoOfPixels = best.fencYuv->m_size * best.fencYuv->m_size;//CTU中像素的总个数
for (uint32_t i = 0; i < ctuNoOfPixels; i++)//遍历所有像素值
{
pixel p = best.fencYuv->m_buf[0][i];//取相应位置的亮度像素值
ctuLumaLevel += p;//累加像素值
curRow.rowStats.maxLumaLevel = X265_MAX(p, curRow.rowStats.maxLumaLevel);//获取亮度最大值
}
curRow.rowStats.lumaLevel += (double)(ctuLumaLevel) / ctuNoOfPixels;//获取当前CTU的平均值 并累加到 CTU行中
curEncData.m_cuStat[cuAddr].totalBits = best.totalBits;//???
x265_emms();//清除MMX寄存器中的内容,即初始化(以避免和浮点数操作发生冲突)。
if (bIsVbv)//应用VBV
{
// Update encoded bits, satdCost, baseQP for each CU
curEncData.m_rowStat[row].diagSatd      += curEncData.m_cuStat[cuAddr].vbvCost;//累加当前CTU的 对应下采样计算的SATD值
curEncData.m_rowStat[row].diagIntraSatd += curEncData.m_cuStat[cuAddr].intraVbvCost;//累加当前CTU的 对应下采样计算的intraSATD值
curEncData.m_rowStat[row].encodedBits   += curEncData.m_cuStat[cuAddr].totalBits;//???
curEncData.m_rowStat[row].sumQpRc       += curEncData.m_cuStat[cuAddr].baseQp;//累加当前CTU的 baseQp
curEncData.m_rowStat[row].numEncodedCUs = cuAddr;//更新当前CTU行最后一个编码完毕的CTU位置
// If current block is at row diagonal checkpoint, call vbv ratecontrol.
if (row == col && row)//对角线上 并且出去帧中的第一个CTU
{
double qpBase = curEncData.m_cuStat[cuAddr].baseQp;//??????
int reEncode = m_top->m_rateControl->rowDiagonalVbvRateControl(m_frame, row, &m_rce, qpBase);//更新predictor、根据当前编码情况估计当前帧占用的bits、计算最优qp参数值、判断是否需要重新编码
qpBase = x265_clip3((double)QP_MIN, (double)QP_MAX_MAX, qpBase);//clip最新QP 防止越界
curEncData.m_rowStat[row].diagQp = qpBase;//更新最新的对角QP
curEncData.m_rowStat[row].diagQpScale =  x265_qp2qScale(qpBase);//更新最新对角Qscale
if (reEncode < 0)//如果需要重新编码
{
x265_log(m_param, X265_LOG_DEBUG, "POC %d row %d - encode restart required for VBV, to %.2f from %.2f\n",
m_frame->m_poc, row, qpBase, curEncData.m_cuStat[cuAddr].baseQp);
// prevent the WaveFront::findJob() method from providing new jobs
m_vbvResetTriggerRow = row;//记录当前需要重新编码的CTU行号
m_bAllRowsStop = true;//标记为true 停止当前所有CTU的编码
for (uint32_t r = m_numRows - 1; r >= row; r--)//从最后一行到现在的行号  因为有参考关系 当前以后的行号多需要重新编码
{
CTURow& stopRow = m_rows[r];//获取当前CTU行
if (r != row)//如果不是在当前CTU行
{
/* if row was active (ready to be run) clear active bit and bitmap bit for this row */
stopRow.lock.acquire();//对当前CTU行加锁 防止多线程破坏数据
while (stopRow.active)
{
if (dequeueRow(r * 2))//将当前row标记为不可执行
stopRow.active = false;//标记当前行不可执行  一般不进入此
else
{
/* we must release the row lock to allow the thread to exit */
stopRow.lock.release();
GIVE_UP_TIME();//放弃此线程
stopRow.lock.acquire();
}
}
stopRow.lock.release();//释放锁
bool bRowBusy = true;//标记当前状态
do
{
stopRow.lock.acquire();//对当前CTU行加锁 防止多线程破坏数据
bRowBusy = stopRow.busy;//获取当前CTU行的编码状态
stopRow.lock.release();//释放锁
if (bRowBusy)//如果忙
{
GIVE_UP_TIME();//放弃此线程
}
}
while (bRowBusy);
}
m_outStreams[r].resetBits();//初始化bits
stopRow.completed = 0;//重新计数 当前CTU行重新编码
memset(&stopRow.rowStats, 0, sizeof(stopRow.rowStats));//将当前CTU行的统计信息重置为0
curEncData.m_rowStat[r].numEncodedCUs = 0;//重新编码 初始化为0
curEncData.m_rowStat[r].encodedBits = 0;//重新编码 初始化为0
curEncData.m_rowStat[r].diagSatd = 0;//重新编码 初始化为0
curEncData.m_rowStat[r].diagIntraSatd = 0;//重新编码 初始化为0
curEncData.m_rowStat[r].sumQpRc = 0;//重新编码 初始化为0
curEncData.m_rowStat[r].sumQpAq = 0;//重新编码 初始化为0
}
m_bAllRowsStop = false;
}
}
}
/* SAO parameter estimation using non-deblocked pixels for CTU bottom and right boundary areas */
if (m_param->bEnableSAO && m_param->bSaoNonDeblocked)  //???
m_frameFilter.m_sao.calcSaoStatsCu_BeforeDblk(m_frame, col, row);
if (m_param->bEnableWavefront && curRow.completed >= 2 && row < m_numRows - 1 &&   //应用WPP  并且 当前行已经编码完毕两个CTU以上  并且当前行不是最后一行
(!m_bAllRowsStop || intRow + 1 < m_vbvResetTriggerRow))                        //并且 (当前行的下一行没有标记重新编码)
{
/* activate next row */
ScopedLock below(m_rows[row + 1].lock); //局部加锁 当前行的下一行
if (m_rows[row + 1].active == false &&
m_rows[row + 1].completed + 2 <= curRow.completed) //如果当前行的下一CTU行没有触发  并且当前CTU行完成的CTU个数比下一CTU行完成的CTU个数至少多余两个
{
m_rows[row + 1].active = true;//将下一行标记为true
enqueueRowEncoder(row + 1);//标记下一行可以触发
tryWakeOne(); //当前行完成CTU个数两个以上 触发下一个CTU行, 在findjob中运行 /* wake up a sleeping thread or set the help wanted flag */
}
}
ScopedLock self(curRow.lock);//局部锁 当前CTU行
if ((m_bAllRowsStop && intRow > m_vbvResetTriggerRow) ||   //如果有CTU行重新编码标记 并且 当前CTU行号大于需要重新编码的CTU行号 或者
(row > 0 && curRow.completed < numCols - 1 && m_rows[row - 1].completed < m_rows[row].completed + 2))// 当前行号不是第一行 并且 当前行完成编码CTU数目小于总数减一  并且 上一行完成CTU个数少于当前行完成个数加2
{
curRow.active = false;//当前将置为不可用
curRow.busy = false;//将当前行置为不忙
ATOMIC_INC(&m_countRowBlocks);//计数正在运行的CTU行因为上一行没有完成完毕而强制退出的个数 在帧编码前 初始化为0
return;//退出
}
}//结束while (curRow.completed < numCols)
/** this row of CTUs has been compressed **/
//编码完毕CTU行  后序部分一帧只进入CTU行次
/* If encoding with ABR, update update bits and complexity in rate control
* after a number of rows so the next frame's rateControlStart has more
* accurate data for estimation. At the start of the encode we update stats
* after half the frame is encoded, but after this initial period we update
* after refLagRows (the number of rows reference frames must have completed
* before referencees may begin encoding) */
uint32_t rowCount = 0;//用于存储需要RC更新的CTU行数位置  (我们需要在编码一定CTU行数后 更新RC 这样我们下一次rateControlStart估计QP时会更准确,开始时选用一半CTU行数 后面选择延迟的CTU行数)
if (m_param->rc.rateControlMode == X265_RC_ABR || bIsVbv)//如果当前应用ABR或者应用VBV
{
if ((uint32_t)m_rce.encodeOrder <= 2 * (m_param->fpsNum / m_param->fpsDenom))//如果当前编码帧数 小于等于两秒钟播放帧数
rowCount = X265_MIN((m_numRows + 1) / 2, m_numRows - 1);//取一半的CTU行数(向上取整)
else
rowCount = X265_MIN(m_refLagRows, m_numRows - 1);//取延迟的CTU行数
if (row == rowCount)//达到指定CTU行数
{
m_rce.rowTotalBits = 0;//初始化为0 (用于累加当前帧rowcount以前CTU行占用的总bits)
if (bIsVbv)//如果应用VBV
for (uint32_t i = 0; i < rowCount; i++)//累加当前帧指定行的bits总是
m_rce.rowTotalBits += curEncData.m_rowStat[i].encodedBits;//(当前rowCount以前各个CTU行中的CTU应编码完毕)
else
for (uint32_t cuAddr = 0; cuAddr < rowCount * numCols; cuAddr++)
m_rce.rowTotalBits += curEncData.m_cuStat[cuAddr].totalBits;//不应用VBV 没有存储curEncData.m_rowStat[i].encodedBits信息,所以将各个CTU中进行累加
m_top->m_rateControl->rateControlUpdateStats(&m_rce);//当前帧编码一半时即时更新数据,便于后续帧快速估计  更新m_startEndOrder计数
}
}
/* flush row bitstream (if WPP and no SAO) or flush frame if no WPP and no SAO */
if (!m_param->bEnableSAO && (m_param->bEnableWavefront || row == m_numRows - 1))//????
rowCoder.finishSlice();
if (m_param->bEnableWavefront)//如果开启WPP
{
/* trigger row-wise loop filters */
if (row >= m_filterRowDelay)//如果当前行号大于延迟数
{
enableRowFilter(row - m_filterRowDelay);//标记最近可以滤波的CTU行
/* NOTE: Activate filter if first row (row 0) */
if (row == m_filterRowDelay)
enqueueRowFilter(0);//标记第一CTU可滤波
tryWakeOne();//触发
}
if (row == m_numRows - 1)//如果当前为最后一个CTU行
{
for (uint32_t i = m_numRows - m_filterRowDelay; i < m_numRows; i++)//将剩余的CTU行全部标记可滤波
enableRowFilter(i);//标记可滤波滤波
tryWakeOne();//触发
}
}
tld.analysis.m_param = NULL;//编码参数置空
curRow.busy = false;//标记当前CTU行不忙
if (ATOMIC_INC(&m_completionCount) == 2 * (int)m_numRows)//???
m_completionEvent.trigger();
}
/* collect statistics about CU coding decisions, return total QP */
/** 函数功能             : 统计当前CTU划分角度qp等信息
/*  调用范围             : 只在processRowEncoder函数中被调用
* \参数 ctu              : 当前刚编码完毕的CTU
* \参数 log              : processRowEncoder 临时变量用于统计CTU信息
* \返回                  : 返回当前CTU下所有的CU的mqp*CU占用的4x4块个数 * */
int FrameEncoder::collectCTUStatistics(const CUData& ctu, FrameStats* log)
{
int totQP = 0;//累加所有ctu.mqp和
if (ctu.m_slice->m_sliceType == I_SLICE)//如果当前为I帧
{
uint32_t depth = 0;//初始深度0
/*以CTU64x64为例
ctu.m_numPartitions >> (0 * 2) = 256   depth = 0 64x64  有256个4x4块
ctu.m_numPartitions >> (1 * 2) = 64    depth = 1 32x32  有64 个4x4块
ctu.m_numPartitions >> (2 * 2) = 16    depth = 2 16x16  有16 个4x4块
ctu.m_numPartitions >> (3 * 2) = 4     depth = 3  8x 8  有 4 个4x4块
**/
for (uint32_t absPartIdx = 0; absPartIdx < ctu.m_numPartitions; absPartIdx += ctu.m_numPartitions >> (depth * 2))//遍历下一个CU
{
depth = ctu.m_cuDepth[absPartIdx];//获取当前CU深度
log->totalCu++;//计数CU
log->cntIntra[depth]++;//计数intraCU处在当前深度的个数
totQP += ctu.m_qp[absPartIdx] * (ctu.m_numPartitions >> (depth * 2));//计数当前CU的mqp*4x4块个数
if (ctu.m_predMode[absPartIdx] == MODE_NONE)//当前处于边界 不是具体CU
{
log->totalCu--;//撤销计数
log->cntIntra[depth]--;//撤销计数
}
else if (ctu.m_partSize[absPartIdx] != SIZE_2Nx2N)//当前是NxN模式
{
/* TODO: log intra modes at absPartIdx +0 to +3 */
X265_CHECK(ctu.m_log2CUSize[absPartIdx] == 3 && ctu.m_slice->m_sps->quadtreeTULog2MinSize < 3, "Intra NxN found at improbable depth\n");
log->cntIntraNxN++;//统计intra NxN个数
log->cntIntra[depth]--;//撤销计数
}
else if (ctu.m_lumaIntraDir[absPartIdx] > 1)//如果是角度模式
log->cuIntraDistribution[depth][ANGULAR_MODE_ID]++;//计数角度模式
else
log->cuIntraDistribution[depth][ctu.m_lumaIntraDir[absPartIdx]]++;//计数Planar DC模式
}
}
else
{
uint32_t depth = 0;//初始深度0
for (uint32_t absPartIdx = 0; absPartIdx < ctu.m_numPartitions; absPartIdx += ctu.m_numPartitions >> (depth * 2))//遍历下一个CU
{
depth = ctu.m_cuDepth[absPartIdx];//获取当前CU深度
log->totalCu++;//计数CU
totQP += ctu.m_qp[absPartIdx] * (ctu.m_numPartitions >> (depth * 2));//计数当前CU的mqp*4x4块个数
if (ctu.m_predMode[absPartIdx] == MODE_NONE)//当前处于边界 不是具体CU
log->totalCu--;//撤销计数
else if (ctu.isSkipped(absPartIdx))//如果是skip模式
{
if (ctu.m_mergeFlag[0])//当前是否应用merge模式
log->cntMergeCu[depth]++;//计数merge
else
log->cntSkipCu[depth]++;//计数skip
}
else if (ctu.isInter(absPartIdx))//当前是inter
{
log->cntInter[depth]++;//计数inter块个数
if (ctu.m_partSize[absPartIdx] < AMP_ID)//矩阵模式 SIZE_2Nx2N SIZE_2NxN SIZE_Nx2N
log->cuInterDistribution[depth][ctu.m_partSize[absPartIdx]]++;
else //非对称模式以及NxN模式 SIZE_NxN,SIZE_2NxnU,SIZE_2NxnD,SIZE_nLx2N,SIZE_nRx2N
log->cuInterDistribution[depth][AMP_ID]++;
}
else if (ctu.isIntra(absPartIdx))//intra CU
{
log->cntIntra[depth]++;//计数intra CU
if (ctu.m_partSize[absPartIdx] != SIZE_2Nx2N)//NxN模式
{
X265_CHECK(ctu.m_log2CUSize[absPartIdx] == 3 && ctu.m_slice->m_sps->quadtreeTULog2MinSize < 3, "Intra NxN found at improbable depth\n");
log->cntIntraNxN++;//计数当前intra CU是NxN划分的个数
log->cntIntra[depth]--;//撤销计数
/* TODO: log intra modes at absPartIdx +0 to +3 */
}
else if (ctu.m_lumaIntraDir[absPartIdx] > 1)//如果是角度模式
log->cuIntraDistribution[depth][ANGULAR_MODE_ID]++;//计数角度模式
else
log->cuIntraDistribution[depth][ctu.m_lumaIntraDir[absPartIdx]]++;//计数Planar DC模式
}
}
}
return totQP;//返回当前CTU下所有的CU的mqp*CU占用的4x4块个数
}
/* DCT-domain noise reduction / adaptive deadzone from libavcodec */
/** 函数功能             : 更新去噪偏移值
/*  调用范围             : 只在compressFrame()函数中被调用
* \返回                  : null * */
void FrameEncoder::noiseReductionUpdate()
{
static const uint32_t maxBlocksPerTrSize[4] = {1 << 18, 1 << 16, 1 << 14, 1 << 12};//统计的最大个数
for (int cat = 0; cat < MAX_NUM_TR_CATEGORIES; cat++)//遍历所有TU类别个数
{
int trSize = cat & 3;//TU大小 - 2
int coefCount = 1 << ((trSize + 2) * 2);//当前TU的系数个数
if (m_nr->count[cat] > maxBlocksPerTrSize[trSize])//如果统计个数大于最大统计个数
{
for (int i = 0; i < coefCount; i++)//遍历所有系数位置的绝对值和
m_nr->residualSum[cat][i] >>= 1;//将统计的DCT系数绝对值和除以2
m_nr->count[cat] >>= 1;//将统计个数除以2
}
int nrStrength = cat < 8 ? m_param->noiseReductionIntra : m_param->noiseReductionInter;//选取去噪因子(intra/inter)
uint64_t scaledCount = (uint64_t)nrStrength * m_nr->count[cat];//去噪强度
for (int i = 0; i < coefCount; i++)//遍历所有系数
{
uint64_t value = scaledCount + m_nr->residualSum[cat][i] / 2;//去噪强度,m_nr->residualSum[cat][i] / 2用于四舍五入
uint64_t denom = m_nr->residualSum[cat][i] + 1;//分母: 对应位置的系数绝对值和
m_nr->offsetDenoise[cat][i] = (uint16_t)(value / denom);//去噪偏移值  去噪结果:AC系数的绝对值 - offset)*符号
}
// Don't denoise DC coefficients
m_nr->offsetDenoise[cat][0] = 0;//DC系数不去噪
}
}
Frame *FrameEncoder::getEncodedPicture(NALList& output)
{
if (m_frame)//m_done一开始为wait ,待初始化完毕触发一次 但是此时m_frame为null 因为m_frame 在m_enable.trigger()前为null
{
/* block here until worker thread completes */
m_done.wait();//执行wait操作,等待编码完毕
Frame *ret = m_frame;
m_frame = NULL;
output.takeContents(m_nalList);
m_prevOutputTime = x265_mdate();
return ret;
}
return NULL;
}
}

x265-1.8版本-encoder/frameencoder.cpp注释相关推荐

  1. x265-1.7版本-encoder/frameencoder.cpp注释

    注:问号以及未注释部分 会在x265-1.8版本内更新 /*********************************************************************** ...

  2. x265-1.8版本-encoder/frameencoder.h注释

    注:问号以及未注释部分 会在x265-1.9版本内更新 /*********************************************************************** ...

  3. x265-1.7版本-encoder/frameencoder.h注释

    注:问号以及未注释部分 会在x265-1.8版本内更新 /*********************************************************************** ...

  4. x265-1.8版本-encoder/ratecontrol.cpp注释

    注:问号以及未注释部分 会在x265-1.9版本内更新 /*********************************************************************** ...

  5. x265-1.8版本-encoder/dpb.cpp注释

    注:问号以及未注释部分 会在x265-1.9版本内更新 /*********************************************************************** ...

  6. x265-1.7版本-encoder/dpb.cpp注释

    注:问号以及未注释部分 会在x265-1.8版本内更新 /*********************************************************************** ...

  7. x265-1.7版本-encoder/slicetype.cpp注释

    注:问号以及未注释部分 会在x265-1.8版本内更新 /*********************************************************************** ...

  8. x265-1.8版本-encoder/slicetype.cpp注释

    注:问号以及未注释部分 会在x265-1.9版本内更新 /*********************************************************************** ...

  9. x265-1.8版本-encoder/motion.cpp注释

    注:问号以及未注释部分 会在x265-1.9版本内更新 /*********************************************************************** ...

最新文章

  1. [C编码笔记] 空串与NULL是不一样的
  2. Django的model模型
  3. matlab径向分布函数作图_常见的概率分布(matlab作图)
  4. [转]基于图的机器学习技术:谷歌众多产品和服务背后的智能
  5. 虽然有失落的即时通讯
  6. java调用FFmpeg及mencoder转换视频为FLV并截图
  7. mysql简单指令_Mysql基本指令
  8. python编写一个程序、计算字符串中子串出现的次数_急求。。。C语言实现,计算字符串中子串出现的次数,就是先输入一个字符串,再输入一个上面字符串中存在...
  9. controller层没反应_打过蜡的石材再做结晶没效果怎么办?
  10. trackpoint_为什么TrackPoint键盘是ThinkPad爱好者的好礼物
  11. 区块链-WeBase企业部署
  12. 容器服务ACK+容器网络文件系统CNFS快速搭建NGINX网站(体验有礼)
  13. adc芯片资料——电子迷你秤芯片CS1180
  14. R3 3200G和R3 2200G性能差距 锐龙R33200G和R32200G对比
  15. vue + gifshot 实现GIF动图
  16. 数据挖掘中最容易犯的几个错误,你知道吗?
  17. linux定时启动服务
  18. 每天一个---- 吉尔德定律和迈特卡尔定律
  19. java二维数组遍历与元素查找
  20. VC Socket编程源码

热门文章

  1. 社区发现算法——SCAN算法
  2. vue:日历表格(element-ui)
  3. 使用carbide生成sisx文件
  4. java实现约瑟夫环完整算法_Java简单实现约瑟夫环算法示例
  5. 【网络】trunk和vlan配置
  6. ping添加时间输出
  7. COOX培训材料 — SCADA(7.Buffer)
  8. DirectX11 With Windows SDK--06 键盘和鼠标输入
  9. 恭贺象过河软件在郑州市场3年内突破2000家用户
  10. VR视频播放器Video Player