程序:

/*

pImage:输入图像/输出图像

uiXRes:图像的宽,彩色图像为宽*3

uiYRes:图像高

Min:图像的像素的最小值

Max:图像像素的最大值

uiNrX:最小值大于2,最大值小于16,分割块的宽度

uiNrY:最小值大于2,最大值小于16,分割块的高度

uiNrBins:色阶数

fCliplimit:<=1.0不做剪裁,>1.0剪裁

*/

void CLAHE(uchar* pImage,uint uiXRes,uint uiYRes,uchar Min,

uchar Max,uint uiNrX,uint uiNrY,uint uiNrBins,float fCliplimit)

{

uint uiX, uiY;

uint uiXSize, uiYSize; //分割块的宽高的数量

uint uiSubX, uiSubY;

uint uiXL, uiXR, uiYU, uiYB;//线性插值的变量

ulong ulClipLimit, ulNrPixels; //剪裁区域和区域的像素数量

uchar* pImPointer;

uchar aLUT[256];

ulong* pulHist;//直方图

ulong *pulMapArray; //映射函数

ulong* pulLU, *pulLB, *pulRU, *pulRB;//线性插值的数据,分别表示左上、左下、右上、右下

pulMapArray=(ulong *)malloc(sizeof(ulong)*uiNrX*uiNrY*uiNrBins);

uiXSize = uiXRes/uiNrX;

uiYSize = uiYRes/uiNrY;

ulNrPixels = (ulong)uiXSize * (ulong)uiYSize;

if(fCliplimit > 0.0)

{

//计算实际剪裁

ulClipLimit = (ulong)(fCliplimit*(uiXSize*uiYSize)/uiNrBins);

ulClipLimit = (ulClipLimit<1UL)?1UL:ulClipLimit;

}

else

{

ulClipLimit = 1UL<<14;

}

MakeLut(aLUT,Min,Max,uiNrBins);//生成查找表

//每个块计算映射

for (uiY = 0, pImPointer = pImage; uiY < uiNrY; uiY++)

{

for (uiX = 0; uiX < uiNrX; uiX++, pImPointer += uiXSize)

{

pulHist = &pulMapArray[uiNrBins * (uiY * uiNrX + uiX)];

MakeHistogram(pImPointer,uiXRes,uiXSize,uiYSize,pulHist,uiNrBins,aLUT);

ClipHistogram(pulHist, uiNrBins, ulClipLimit);

MapHistogram(pulHist, Min, Max, uiNrBins, ulNrPixels);

}

pImPointer += (uiYSize - 1) * uiXRes;          //skip lines, set pointer

}

//插值

for (pImPointer = pImage, uiY = 0; uiY < uiNrY; uiY++)

{

uiSubY = uiYSize;

if (uiY == uiNrY-1)

{

uiYU = uiNrY-1;

uiYB = uiNrY-1;

}

else

{

uiYU = uiY ;

uiYB = uiY + 1;

}

for (uiX = 0; uiX < uiNrX; uiX++)

{

uiSubX = uiXSize;

if (uiX == uiNrX-1)

{

uiXL = uiNrX-1;

uiXR = uiNrX-1;

}

else

{

uiXL = uiX;

uiXR = uiX + 1;

}

pulLU = &pulMapArray[uiNrBins * (uiYU * uiNrX + uiXL)];

pulRU = &pulMapArray[uiNrBins * (uiYU * uiNrX + uiXR)];

pulLB = &pulMapArray[uiNrBins * (uiYB * uiNrX + uiXL)];

pulRB = &pulMapArray[uiNrBins * (uiYB * uiNrX + uiXR)];

Interpolate(pImPointer,uiXRes,pulLU,pulRU,pulLB,pulRB,uiSubX,uiSubY,aLUT);

pImPointer += uiSubX;              //set pointer on next matrix

}

pImPointer += (uiSubY - 1) * uiXRes;

}

free(pulMapArray); //free space for histograms

}

void MakeLut(uchar* pLUT,uchar Min,uchar Max,uint uiNrBins)

{

uchar BinSize = (uchar)(1+(Max-Min)/uiNrBins);

for (int i=Min;i<=Max;i++)

pLUT[i] = (i - Min) / BinSize;

}

void MakeHistogram (uchar* pImage,uint uiXRes,uint uiSizeX,uint uiSizeY,

ulong* pulHistogram,uint uiNrGreylevels,uchar* pLookupTable)

{

uchar* pImagePointer;

uint i;

for (i = 0; i < uiNrGreylevels; i++)

pulHistogram[i]=0L;

for (i = 0; i < uiSizeY; i++)

{

pImagePointer = &pImage[uiSizeX];

while (pImage < pImagePointer)

pulHistogram[pLookupTable[*pImage++]]++;

pImagePointer += uiXRes;

pImage = pImagePointer-uiSizeX;

}

}

void MapHistogram (ulong* pulHistogram,uchar Min,uchar Max,

uint uiNrGreylevels,ulong ulNrOfPixels)

{

uint i;

ulong ulSum = 0;

float fScale = ((float)(Max - Min)) / ulNrOfPixels;

ulong ulMin = (ulong) Min;

for (i = 0; i < uiNrGreylevels; i++)

{

ulSum += pulHistogram[i];

pulHistogram[i]=(ulong)(ulMin+ulSum*fScale);

if (pulHistogram[i] > Max)

pulHistogram[i] = Max;

}

// int I, Sum = 0, Amount = 0;

// for (I = 0; I < 256; I++) Amount += pulHistogram[I];

// for (I = 0; I < 256; I++)

// {

// Sum += pulHistogram[I];

// pulHistogram[I] = Sum * 255/ Amount;     // 计算分布

// }

}

void ClipHistogram (ulong* pulHistogram,uint uiNrGreylevels,ulong ulClipLimit)

{

ulong* pulBinPointer, *pulEndPointer, *pulHisto;

ulong ulNrExcess, ulOldNrExcess, ulUpper, ulBinIncr, ulStepSize, i;

long lBinExcess;

ulNrExcess = 0;  pulBinPointer = pulHistogram;

for (i = 0; i < uiNrGreylevels; i++)

{ /* calculate total number of excess pixels */

lBinExcess = (long) pulBinPointer[i] - (long) ulClipLimit;

if (lBinExcess > 0) ulNrExcess += lBinExcess;      /* excess in current bin */

};

/* Second part: clip histogram and redistribute excess pixels in each bin */

ulBinIncr = ulNrExcess / uiNrGreylevels;          /* average binincrement */

ulUpper =  ulClipLimit - ulBinIncr;  /* Bins larger than ulUpper set to cliplimit */

for (i = 0; i < uiNrGreylevels; i++)

{

if (pulHistogram[i] > ulClipLimit)

pulHistogram[i] = ulClipLimit; /* clip bin */

else

{

if (pulHistogram[i] > ulUpper)

{       /* high bin count */

//ulNrExcess -= (pulHistogram[i] - ulUpper); pulHistogram[i]=ulClipLimit;

ulNrExcess -= (ulClipLimit -pulHistogram[i]);

pulHistogram[i]=ulClipLimit;

}

else

{                   /* low bin count */

ulNrExcess -= ulBinIncr;

pulHistogram[i] += ulBinIncr;

}

}

}

do {   /* Redistribute remaining excess  */

pulEndPointer = &pulHistogram[uiNrGreylevels]; pulHisto = pulHistogram;

ulOldNrExcess = ulNrExcess;     /* Store number of excess pixels for test later. */

while (ulNrExcess && pulHisto < pulEndPointer)

{

ulStepSize = uiNrGreylevels / ulNrExcess;

if (ulStepSize < 1)

ulStepSize = 1;       /* stepsize at least 1 */

for (pulBinPointer=pulHisto; pulBinPointer < pulEndPointer && ulNrExcess;

pulBinPointer += ulStepSize)

{

if (*pulBinPointer < ulClipLimit)

{

(*pulBinPointer)++;  ulNrExcess--;    /* reduce excess */

}

}

pulHisto++;       /* restart redistributing on other bin location */

}

} while ((ulNrExcess) && (ulNrExcess < ulOldNrExcess));

}

void Interpolate(uchar* pImage,int uiXRes,ulong* pulMapLU,ulong* pulMapRU,

ulong* pulMapLB,ulong* pulMapRB,uint uiXSize,uint uiYSize,uchar* pLUT)

{

uint uiIncr = uiXRes-uiXSize; /* Pointer increment after processing row */

uchar GreyValue;

uint uiNum = uiXSize*uiYSize; /* Normalization factor */

uint uiXCoef, uiYCoef, uiXInvCoef, uiYInvCoef, uiShift = 0;

if (uiNum & (uiNum - 1))   /* If uiNum is not a power of two, use division */

for (uiYCoef = 0, uiYInvCoef = uiYSize; uiYCoef < uiYSize;

uiYCoef++, uiYInvCoef--,pImage+=uiIncr)

{

for (uiXCoef = 0, uiXInvCoef = uiXSize; uiXCoef < uiXSize;

uiXCoef++, uiXInvCoef--)

{

GreyValue = pLUT[*pImage];         /* get histogram bin value */

*pImage++ = (uchar ) ((uiYInvCoef * (uiXInvCoef*pulMapLU[GreyValue]

+ uiXCoef * pulMapRU[GreyValue])+ uiYCoef * (uiXInvCoef * pulMapLB[GreyValue]

+ uiXCoef * pulMapRB[GreyValue]))/ uiNum);

}

}

else

{             /* avoid the division and use a right shift instead */

while (uiNum >>= 1) uiShift++;           /* Calculate 2log of uiNum */

for (uiYCoef = 0, uiYInvCoef = uiYSize; uiYCoef < uiYSize;

uiYCoef++, uiYInvCoef--,pImage+=uiIncr)

{

for (uiXCoef = 0, uiXInvCoef = uiXSize; uiXCoef < uiXSize;

uiXCoef++, uiXInvCoef--)

{

GreyValue = pLUT[*pImage];    /* get histogram bin value */

*pImage++ = (uchar)((uiYInvCoef*(uiXInvCoef * pulMapLU[GreyValue]

+ uiXCoef * pulMapRU[GreyValue])+uiYCoef * (uiXInvCoef * pulMapLB[GreyValue]

+ uiXCoef * pulMapRB[GreyValue])) >> uiShift);

}

}

}

//=====================================测试=========================================

#include "clahe.h"

#include "opencv/cv.h"

#include "opencv/cxcore.h"

#include "opencv/highgui.h"

#include "stdio.h"

#include <omp.h>

IplImage* Process1(IplImage *pImage)

{

IplImage *HIS = cvCreateImage( cvGetSize(pImage), 8, 3 );

IplImage *H = cvCreateImage( cvGetSize(pImage), 8, 1 );

IplImage *S = cvCreateImage( cvGetSize(pImage), 8, 1 );

IplImage *I = cvCreateImage( cvGetSize(pImage), 8, 1 );

cvCvtColor( pImage, HIS, CV_RGB2HLS);//改为CV_RGB2HLS可以转hls空间

cvCvtPixToPlane(HIS,H,I,S,0);

IplImage *resimage = cvCreateImage( cvGetSize(HIS), 8, 3 );

long Y_Height,X_Width,Step;

Y_Height=I->height;

X_Width=I->width;

Step=I->widthStep;

unsigned char *data;

data=(unsigned char*)I->imageData;

unsigned int NrX,NrY,uiNrBins;

NrX=2;

NrY=2;

uiNrBins=256;

double Min,Max;

cvMinMaxLoc(I, &Min, &Max);

IplImage *Image = cvCreateImage(cvGetSize(HIS), HIS->depth, HIS->nChannels);

IplImage *pImageChannel[4] = { 0, 0, 0, 0 };

IplImage *pImage_1;

if( I )

{

pImage_1 = cvCreateImage(cvGetSize(HIS), HIS->depth, 1);

cvCopy(I, pImage_1,NULL);

/*S = ContrastExtend(S);*/

CLAHE( (unsigned char *)(pImage_1->imageData),X_Width,Y_Height,0,255,NrX,NrY,uiNrBins,1.2);

CvScalar pixel,pixel_1;

for (int m = 0; m < I->height; m++)

{

for (int n = 0; n < I->width; n++)

{

pixel_1 = cvGet2D(pImage_1, m, n);

pixel.val[0] = pixel_1.val[0];

cvSet2D(I, m, n, pixel);

}

}

// 信道组合

cvMerge(H, I, S, 0, Image);

cvCvtColor(Image, resimage, CV_HLS2RGB);//改为CV_BGR2HLS可以转hls空间

}

// 释放资源

cvReleaseImage( &Image);

Image = 0;

return resimage;

}

void test1()

{

IplImage* test=cvLoadImage("4.jpg");

cvNamedWindow("test1",CV_WINDOW_AUTOSIZE);

cvShowImage("test1",test);

IplImage* res=Process1(test);

cvNamedWindow("res1",CV_WINDOW_AUTOSIZE);

cvShowImage("res1",res);

}

void test2()

{

IplImage* test=cvLoadImage("1.bmp");

int width=test->width;

int height=test->height;

int len=width*height;

unsigned char* dataR=(unsigned char*)calloc(width*height,sizeof(unsigned char));

unsigned char* dataG=(unsigned char*)calloc(width*height,sizeof(unsigned char));

unsigned char* dataB=(unsigned char*)calloc(width*height,sizeof(unsigned char));

for (int i=0;i<height;i++)

{

unsigned char* dataR_ptr=dataR+i*width;

unsigned char* dataG_ptr=dataG+i*width;

unsigned char* dataB_ptr=dataB+i*width;

char* test_ptr=test->imageData+i*test->widthStep;

for (int j=0;j<width;j++)

{

dataR_ptr[j]=test_ptr[j*3];

dataG_ptr[j]=test_ptr[j*3+1];

dataB_ptr[j]=test_ptr[j*3+2];

}

}

clock_t start, finish;

double  duration;

start = clock();

#pragma omp parallel sections

{

#pragma omp section

CLAHE(dataR,width,height,0,255,2,8,256,2.0);

#pragma omp section

CLAHE(dataG,width,height,0,255,2,8,256,2.0);

#pragma omp section

CLAHE(dataB,width,height,0,255,2,8,256,2.0);

}

finish = clock();

duration = (double)(finish - start) / CLOCKS_PER_SEC;

printf("%lf\n",duration);

CvSize size = cvSize(width,height);

IplImage* res=cvCreateImage(size,IPL_DEPTH_8U,3);

for (int i=0;i<height;i++)

{

unsigned char* dataR_ptr=dataR+i*width;

unsigned char* dataG_ptr=dataG+i*width;

unsigned char* dataB_ptr=dataB+i*width;

char* res_ptr=res->imageData+i*res->widthStep;

for (int j=0;j<width;j++)

{

res_ptr[j*3]=dataR_ptr[j];

res_ptr[j*3+1]=dataG_ptr[j];

res_ptr[j*3+2]=dataB_ptr[j];

}

}

cvNamedWindow("res2",CV_WINDOW_AUTOSIZE);

cvShowImage("res2",res);

}

void test3()

{

IplImage* test=cvLoadImage("1.jpg");

int width=test->width;

int height=test->height;

cvNamedWindow("test3",CV_WINDOW_AUTOSIZE);

cvShowImage("test3",test);

unsigned char* data=(unsigned char*)calloc(width*height*3,sizeof(unsigned char));

for (int i=0;i<height;i++)

{

unsigned char* inputData_ptr=data+i*width*3;

char* test_ptr=test->imageData+i*test->widthStep;

for (int j=0;j<width;j++)

{

inputData_ptr[j*3]=test_ptr[j*3];

inputData_ptr[j*3+1]=test_ptr[j*3+1];

inputData_ptr[j*3+2]=test_ptr[j*3+2];

}

}

CLAHE(data,width*3,height,0,255,2,2,256,2.0);

CvSize size = cvSize(width,height);

IplImage* res=cvCreateImage(size,IPL_DEPTH_8U,3);

for (int i=0;i<height;i++)

{

unsigned char* outputData_ptr=data+i*width*3;

char* res_ptr=res->imageData+i*res->widthStep;

for (int j=0;j<width;j++)

{

res_ptr[j*3]=outputData_ptr[j*3];

res_ptr[j*3+1]=outputData_ptr[j*3+1];

res_ptr[j*3+2]=outputData_ptr[j*3+2];

}

}

cvNamedWindow("res3",CV_WINDOW_AUTOSIZE);

cvShowImage("res3",res);

}

int main()

{

test2();

cvWaitKey(0);

return 0;

}

限制比自适应直方图均衡化相关推荐

  1. c++ 绘制函数图像_【图像增强】CLAHE 限制对比度自适应直方图均衡化

    文章来自:微信公众号[机器学习炼丹术]. 文章目录: 1 基本概述 2 竞赛中的CLAHE实现 3 openCV绘制直方图 4 对比度Contrast 5 Contrast Stretching 6 ...

  2. 【深度学习入门到精通系列】对比度受限的自适应直方图均衡化(CLAHE)

    1 HE 直方图均衡化(HE)是一种很常用的直方图类方法,基本思想是通过图像的灰度分布直方图确定一条映射曲线,用来对图像进行灰度变换, 2 AHE 为了提高图像的局部对比度,有人提出将图像分成若干子块 ...

  3. Python+OpenCV:图像对比度受限自适应直方图均衡化(CLAHE, Contrast Limited Adaptive Histogram Equalization)

    Python+OpenCV:图像对比度受限自适应直方图均衡化(CLAHE, Contrast Limited Adaptive Histogram Equalization) ############ ...

  4. 直方图均衡化、自适应直方图均衡化

    一.直方图均衡化 简述 直方图均衡化的英文名称是:Histogram Equalization.  图像对比度增强的方法可以分成两类:一类是直接对比度增强方法;另一类是间接对比度增强方法.直方图拉伸和 ...

  5. clahe(限制对比度自适应直方图均衡化)

    限制对比度自适应直方图均衡化 直方图均衡化(HE) 数学原理: AHE(自适应直方图均衡) 实现原理: CLAHE( 限制对比度自适应直方图均衡化) 原理: 补充 ) 在讨论clahe(限制对比度自适 ...

  6. 对比度受限的自适应直方图均衡化(CLAHE)(转)

    直方图均衡化(HE)是一种很常用的直方图类方法,基本思想是通过图像的灰度分布直方图确定一条映射曲线,用来对图像进行灰度变换,以达到提高图像对比度的目的.该映射曲线其实就是图像的累计分布直方图(CDF) ...

  7. 对比度受限自适应直方图均衡化方法

    图像增强技术可具体为时域和频域. 时域增强往往被应用于提高图像的对比度且改进其灰度级, 其原理是在灰度映射转化的基础上, 对像素进行直接性地处理: 频域增强的作用是以强化图像的低. 高频来达到改善图像 ...

  8. 限制对比度自适应直方图均衡化算法原理、实现及效果

    一.自适应直方图均衡化(Adaptive histgram equalization/AHE) 1.简述 自适应直方图均衡化(AHE)用来提升图像的对比度的一种计算机图像处理技术.和普通的直方图均衡算 ...

  9. DIP关键算法-自适应直方图均衡化

    1 自适应直方图均衡化(AHE) 自适应直方图均衡化(AHE)用来提升图像的对比度的一种计算机图像处理技术.和普通的直方图均衡算法不同,AHE算法通过计算图像的局部直方图,然后重新分布亮度来来改变图像 ...

  10. 限制对比度自适应直方图均衡化

    一.自适应直方图均衡化(Adaptive histgram equalization/AHE) 1.简述 自适应直方图均衡化(AHE)用来提升图像的对比度的一种计算机图像处理技术.和普通的直方图均衡算 ...

最新文章

  1. Python爬虫2-GET_POST与开发者工具
  2. Android程序员的进阶之路
  3. ADO.NET改进防注入
  4. # Schedulerx正式登陆Ali-k8s应用目录
  5. 一个wepy开发微信小程序时图片在真机上不显示的问题
  6. sqlServer 获取最新的一条数据
  7. WPS显示无法创建对象,请确认对象已在系统注册表中注册
  8. java 中xsd文件在哪_在Java Eclipse项目中存储XSD文件的约定 - java
  9. 用给定的key对字符串进行sha256加密-postman预处理
  10. c#解压缩文件(zip)
  11. 抽奖随机滚动_老板让我做年会抽奖系统,我用Excel制作内定抽到自己的大奖!...
  12. Latex表格单元格内文本顶着上格线解决
  13. JavaScript初学入门(JS打印9*9乘法表,JS制作简易计算器)
  14. 华为 dis ip routing-table怎么看?
  15. python输出时怎么保留两位小数_python输出怎么保留两位小数-Python教程
  16. CSP-S 2022游记
  17. 华为鸿蒙和美的,美的与华为鸿蒙合作,为智能家居领域带来更深度的场景与服务...
  18. Windows驱动开发之第一个驱动程序
  19. 华盈IP PBX UC1910统一网关
  20. 中国移动利润大幅上涨后,降低5G套餐,却悄悄提高固网宽带价格

热门文章

  1. sql server 可更新订阅 配置_电子课本|2020秋 鲁教版初中化学九年级上册教材电子课本(高清更新可打印)...
  2. EPICS calc模块中aCalcout记录介绍
  3. arduino uno电压_了解Arduino UNO电路
  4. MATLAB产生三角波
  5. 3D小人 思考/问号 表情图 png格式
  6. 《吴忠老年书画作品集》序
  7. (心得一)java俄罗斯方块小游戏编写心得
  8. asp.net1044-学院宿舍报修信息系统#毕业设计
  9. 两岸三地星光大典群星闪耀齐聚微软
  10. getaddrinfo()和getnameinfo()函数用法