改变图片强调可修改r,如s.val[i]*scale*r
/************中心环形矢量场*马鞍矢量场*****卷积白噪声纹理***********/#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <malloc.h>
#include <cv.h>
#include <highgui.h>
#include <iostream>
#include <fstream>
#include "netcdfcpp.h"
using namespace std;#define SQUARE_FLOW_FIELD_SZ 400
#define DISCRETE_FILTER_SIZE 2048 //离散的滤波尺寸
#define LOWPASS_FILTR_LENGTH 10.00000f //低通滤波长度
#define LINE_SQUARE_CLIP_MAX 100000.0f //线性平方夹
#define VECTOR_COMPONENT_MIN 0.050000f //矢量分量最小值void SyntheszSaddle(int n_xres, int n_yres, float* pVectr);
void NormalizVectrs(int n_xres, int n_yres, float* pVectr,float* vecSize);
void GenBoxFiltrLUT(int LUTsiz, float* p_LUT0, float* p_LUT1);
void MakeWhiteNoise(int n_xres, int n_yres, float* pNoise);
void FlowImagingLIC(int n_xres, int n_yres, float* pVectr, float* pNoise, float* pImage, float* p_LUT0, float* p_LUT1, float krnlen);
void WriteImage2PPM(int n_xres, int n_yres, float* pImage,char* f_name);
void color(int n_xres, int n_yres,float *pImage,float* vecSize);double maxvecmag;void main()
{int n_xres = 721;int n_yres = 281;// int n_xres = 1441;
// int n_yres = 561;// int n_xres = 2881;
// int n_yres = 1121;
//
// int n_xres = 5761;
// int n_yres = 2241;// int n_xres = 11521;
// int n_yres = 2241;float* pVectr = (float* ) malloc( sizeof(float ) * n_xres * n_yres * 2 );float* p_LUT0 = (float* ) malloc( sizeof(float ) * DISCRETE_FILTER_SIZE);float* p_LUT1 = (float* ) malloc( sizeof(float ) * DISCRETE_FILTER_SIZE);float* pNoise = (float* ) malloc( sizeof(float) * n_xres * n_yres );float* pImage = (float* ) malloc( sizeof(float) * n_xres * n_yres );float* vecSize = (float* ) malloc( sizeof(float) * n_xres*n_yres );SyntheszSaddle( n_xres, n_yres, pVectr);//CenterFiled(n_xres, n_yres, pVectr);//包含矢量归一化NormalizVectrs(n_xres, n_yres, pVectr,vecSize);//所以这就不用矢量归一化了,因为之前的马鞍矢量场生成函数里没有归一化,才有此步的MakeWhiteNoise(n_xres, n_yres, pNoise);GenBoxFiltrLUT(DISCRETE_FILTER_SIZE, p_LUT0, p_LUT1);FlowImagingLIC(n_xres, n_yres, pVectr, pNoise, pImage, p_LUT0, p_LUT1, LOWPASS_FILTR_LENGTH);color(n_xres, n_yres,pImage,vecSize);//WriteImage2PPM(n_xres, n_yres, pImage, "LIC.ppm");
// WriteImage2PPM(n_xres, n_yres, pImage, "LIC_721_281.ppm");
// WriteImage2PPM(n_xres, n_yres, pImage, "LIC1441_561.ppm");
// WriteImage2PPM(n_xres, n_yres, pImage, "LIC2281_1121.ppm");//WriteImage2PPM(n_xres, n_yres, pImage, "LIC5761_1121.ppm");//WriteImage2PPM(n_xres, n_yres, pImage, "LIC_11521_2241.ppm");//system("pause");free(pVectr); pVectr = NULL;free(p_LUT0); p_LUT0 = NULL;free(p_LUT1); p_LUT1 = NULL;free(pNoise); pNoise = NULL;free(pImage); pImage = NULL;
}/// 中心环形矢量场图形 synthesize a saddle-shaped vector field ///
void SyntheszSaddle(int n_xres, int n_yres, float* pVectr)
{ static const int LatNum = n_yres;static const int LonNum = n_xres;static const int Time = 1;static const int TMP = Time*LonNum*LatNum;NcFile dataReadFile("global_721_281.nc",NcFile::ReadOnly);
// NcFile dataReadFile("global_1441_561.nc",NcFile::ReadOnly);
// NcFile dataReadFile("global_2881_1121.nc",NcFile::ReadOnly);// NcFile dataReadFile("global_5761_2241.nc",NcFile::ReadOnly);//NcFile dataReadFile("global_11521_2241.nc",NcFile::ReadOnly);if (!dataReadFile.is_valid()){std::cout<<"couldn't open file!"<<std::endl;}double *Tmp_UX = new double[TMP];double *Tmp_VY = new double[TMP];double *Tmp_LAT = new double[TMP];double *Tmp_LON = new double[TMP];NcVar *dataTmp_LAT = dataReadFile.get_var("LAT"); NcVar *dataTmp_LON = dataReadFile.get_var("LONN359_361"); NcVar *dataTmp_UX = dataReadFile.get_var("UX"); NcVar *dataTmp_VY = dataReadFile.get_var("VY"); dataTmp_LAT->get(Tmp_LAT,LatNum,LatNum);dataTmp_LON->get(Tmp_LON,LonNum,LonNum);dataTmp_UX->get(Tmp_UX,Time,LatNum,LonNum);dataTmp_VY->get(Tmp_VY,Time,LatNum,LonNum);for(int j = 0; j < n_yres; j ++)for(int i = 0; i < n_xres; i ++){ int index = ( (n_yres - 1 - j) * n_xres + i ) << 1;//int index = j*n_yres+i;pVectr[index ] = Tmp_UX[j*LonNum+i];pVectr[index +1 ]= Tmp_VY[j*LonNum+i];} delete []Tmp_UX;delete []Tmp_VY;delete []Tmp_LAT;}/// normalize the vector field ///
// void NormalizVectrs(int n_xres, int n_yres, float* pVectr)
// {
//
//
// for(int j = 0; j < n_yres; j ++)
// for(int i = 0; i < n_xres; i ++)
// {
// int index = (j * n_xres + i) << 1;
// float vcMag = float( sqrt( double(pVectr[index] * pVectr[index] + pVectr[index + 1] * pVectr[index + 1]) ) );
//
// float scale = (vcMag == 0.0f) ? 0.0f : 1.0f / vcMag;//矢量大小归一化后的矢量值
// //pVectr[index ] *= scale;
// pVectr[index ]=pVectr[index ]*scale;
// // cout<<"pVectr[index ]="<<pVectr[index ];
// pVectr[index + 1] *= scale;
// //cout<<"pVectr[index ]="<<pVectr[index +1 ];
//
// }
// }
void NormalizVectrs(int n_xres, int n_yres, float* pVectr,float* vecSize)
{ for(int j = 0; j < n_yres; j ++)for(int i = 0; i < n_xres; i ++){ int index = (j * n_xres + i) << 1;float vcMag = float( sqrt( double(pVectr[index] * pVectr[index] + pVectr[index + 1] * pVectr[index + 1]) ) );vecSize[j * n_xres + i]=vcMag;if (vcMag<10000&&vcMag>maxvecmag){maxvecmag=vcMag;}float scale = (vcMag == 0.0f) ? 0.0f : 1.0f / vcMag;pVectr[index ] *= scale*5.5;//????????????????????????????????????????????????????????????原来问题出在这pVectr[index + 1] *= scale*5.5;
// cout<<"pVectr["<<index<<"]=="<< pVectr[index]<<endl;
// cout<<"pVectr["<<index+1<<"]=="<< pVectr[index+1]<<endl;
// //fin>>index>>"=">>pVectr[index ] >>index+1>>pVectr[index+1]>>"/n";}
}/// make white noise as the LIC input texture ///
// void MakeWhiteNoise(int n_xres, int n_yres, float* pNoise)
// {
// IplImage * NoiseImg=cvCreateImage(cvSize(n_xres,n_yres),IPL_DEPTH_8U,1);
// CvScalar s;
//
// for(int j = 0; j < n_yres; j ++)
// {
// for(int i = 0; i < n_xres; i ++)
//
// // for(int j = 0; j < n_yres; j=j +10)//产生稀疏白噪声
// // {
// // for(int i = 0; i < n_xres; i=i+10)
//
// {
// int r = rand();
// r = ( (r & 0xff) + ( (r & 0xff00) >> 8 ) ) & 0xff;
// pNoise[j * n_xres + i] = (float) r;
// s = cvGet2D(NoiseImg,i,j);
// s.val[0]=r;
// s.val[1]=r;
// s.val[2]=r;
// cvSet2D(NoiseImg,i,j,s);
// }
// }
//
// }void MakeWhiteNoise(int n_xres, int n_yres, float* pNoise)
{ for(int j = 0; j < n_yres; j ++)for(int i = 0; i < n_xres; i ++){ int r = rand();r = ( (r & 0xff) + ( (r & 0xff00) >> 8 ) ) & 0xff;pNoise[j * n_xres + i] = (float) r;}
}
/// generate box filter LUTs ///
void GenBoxFiltrLUT(int LUTsiz, float* p_LUT0, float* p_LUT1)
{ for(int i = 0; i < LUTsiz; i ++) p_LUT0[i] = p_LUT1[i] = i;
}void color(int n_xres,int n_yres, float* pImage,float* vecSize)
{IplImage * licImage = cvCreateImage(cvSize(n_xres,n_yres),IPL_DEPTH_8U,3);IplImage* img = cvLoadImage("11.jpg",1);int k = 0;double magind;double mag;double newMag;double x = 0.1;//x为非线性映射因子,且x!=1CvScalar colorTable[500];CvScalar s,s1;for (int i = 0;i < img->width;i++){s = cvGet2D(img,1,i);colorTable[i] =s;}for (int j=0;j<n_yres;j++){for (int i= 0;i<n_xres;i++){if (k>=img->width){k=0;}double scale= pImage[j * n_xres + i]/255.0f;成功生成彩色图像的关键mag = vecSize[j * n_xres + i];//********矢量大小归一化******if (mag<1000){magind = (mag/maxvecmag);}//非线性颜色增强LICnewMag =(pow(x,magind)-1)/(x-1);s = cvGet2D(licImage,j,i);//渐变颜色映射表int k = int(newMag*446); s1.val[0]=colorTable[k].val[0]*(k+1-newMag*446)+colorTable[k+1].val[0]*(newMag*446-k);s1.val[1]=colorTable[k].val[1]*(k+1-newMag*446)+colorTable[k+1].val[1]*(newMag*446-k);s1.val[2]=colorTable[k].val[2]*(k+1-newMag*446)+colorTable[k+1].val[2]*(newMag*446-k);s1.val[0]*=scale*2.1;?????????????????????????????????????可改s1.val[1]*=scale*2.1;s1.val[2]*=scale*2.1;// cout<<"s1.val[3]="<<s1.val[1]<<endl;cvSet2D(licImage,j,i,s1);}}//Mat AlphaImage= imread("s.jpg");//cv::Mat AlphaImage = imread("licImage",1);cvNamedWindow("lic_three channles",0);cvShowImage("lic_three channles",licImage);cvWaitKey(0);system("pause");cvDestroyWindow("lic_three channles");cvReleaseImage(&licImage);
}
/// write the LIC image to a PPM file ///
void WriteImage2PPM(int n_xres, int n_yres, float* pImage, char* f_name)
{ FILE* o_file;if( ( o_file = fopen(f_name, "w") ) == NULL ) { printf("Can't open output file\n"); return; }fprintf(o_file, "P6\n%d %d\n255\n", n_xres, n_yres);for(int j = 0; j < n_yres; j ++)for(int i = 0; i < n_xres; i ++){unsigned char unchar = pImage[j * n_xres + i];//某点像素的灰度纹理值fprintf(o_file, "%c%c%c", unchar, unchar, unchar);//}fclose (o_file); o_file = NULL;
}/// flow imaging (visualization) through Line Integral Convolution ///
void FlowImagingLIC(int n_xres, int n_yres, float* pVectr, float* pNoise, float* pImage, float* p_LUT0, float* p_LUT1, float krnlen)
{ int vec_id; ///ID in the VECtor buffer (for the input flow field)int advDir; ///ADVection DIRection (0: positive; 1: negative)int advcts; ///number of ADVeCTion stepS per direction (a step counter)int ADVCTS = int(krnlen * 3); ///MAXIMUM number of advection steps per direction to break dead loops //跳出死循环的条件float vctr_x; ///x-component of the VeCToR at the forefront pointfloat vctr_y; ///y-component of the VeCToR at the forefront pointfloat clp0_x; ///x-coordinate of CLiP point 0 (current)float clp0_y; ///y-coordinate of CLiP point 0 (current)float clp1_x; ///x-coordinate of CLiP point 1 (next )float clp1_y; ///y-coordinate of CLiP point 1 (next )float samp_x; ///x-coordinate of the SAMPle in the current pixelfloat samp_y; ///y-coordinate of the SAMPle in the current pixelfloat tmpLen; ///TeMPorary LENgth of a trial clipped-segmentfloat segLen; ///SEGment LENgthfloat curLen; ///CURrent LENgth of the streamlinefloat prvLen; ///PReVious LENgth of the streamline float W_ACUM; ///ACcuMulated Weight from the seed to the current streamline forefrontfloat texVal; ///TEXture VALuefloat smpWgt; ///WeiGhT of the current SaMPlefloat t_acum[2]; ///two ACcUMulated composite Textures for the two directions, perspectively 两个方向的卷积和float w_acum[2]; ///two ACcUMulated Weighting values for the two directions, perspectively 两个方向的权重和float* wgtLUT = NULL; ///WeiGhT Look Up Table pointing to the target filter LUT权重查找表float len2ID = (DISCRETE_FILTER_SIZE - 1) / krnlen; ///map a curve LENgth TO an ID in the LUT///for each pixel in the 2D output LIC image///for(int j = 0; j < n_yres; j ++)for(int i = 0; i < n_xres; i ++){ ///init the composite texture accumulators and the weight accumulators///每一个像素点为起始点,初始化一次权重和卷积和t_acum[0] = t_acum[1] = w_acum[0] = w_acum[1] = 0.0f;//初始化正反方向卷积和及权重和///for either advection direction///分别计算正反方向的卷积和及权重和for(advDir = 0; advDir < 2; advDir ++){ ///init the step counter, curve-length measurer, and streamline seed/////初始化当前方向上前进的步数和当前流线的总长advcts = 0;//前进的步数curLen = 0.0f;clp0_x = i + 0.5f;clp0_y = j + 0.5f;///access the target filter LUT///LUT显示查找表wgtLUT = (advDir == 0) ? p_LUT0 : p_LUT1;///until the streamline is advected long enough or a tightly spiralling center / focus is encountered///while( curLen < krnlen && advcts < ADVCTS ) //??????{///access the vector at the sample///vec_id = ( int(clp0_y) * n_xres + int(clp0_x) ) << 1;vctr_x = pVectr[vec_id ];vctr_y = pVectr[vec_id + 1];///in case of a critical point///遇到零向量,结束循环if( vctr_x == 0.0f && vctr_y == 0.0f ){ t_acum[advDir] = (advcts == 0) ? 0.0f : t_acum[advDir]; ///this line is indeed unnecessaryw_acum[advDir] = (advcts == 0) ? 1.0f : w_acum[advDir];break;}///negate the vector for the backward-advection case///相反的方向取相反的方向vctr_x = (advDir == 0) ? vctr_x : -vctr_x;vctr_y = (advDir == 0) ? vctr_y : -vctr_y;///clip the segment against the pixel boundaries --- find the shorter from the two clipped segments//////replace all if-statements whenever possible as they might affect the computational speed///segLen = LINE_SQUARE_CLIP_MAX;//cout<<"segLen="<<segLen<<endl;//cout<<"VECTOR_COMPONENT_MIN="<<LINE_SQUARE_CLIP_MAX<<endl;segLen = (vctr_x < -VECTOR_COMPONENT_MIN) ? ( int( clp0_x ) - clp0_x ) / vctr_x : segLen;//int(0.5)=0segLen = (vctr_x > VECTOR_COMPONENT_MIN) ? ( int( int(clp0_x) + 1.5f ) - clp0_x ) / vctr_x : segLen;segLen = (vctr_y < -VECTOR_COMPONENT_MIN) ? ( ( ( tmpLen = ( int( clp0_y) - clp0_y ) / vctr_y ) < segLen ) ? tmpLen : segLen ) : segLen;segLen = (vctr_y > VECTOR_COMPONENT_MIN) ?( ( ( tmpLen = ( int( int(clp0_y) + 1.5f ) - clp0_y ) / vctr_y ) < segLen ) ? tmpLen : segLen ) : segLen;///update the curve-length measurers///prvLen = curLen;curLen+= segLen;segLen+= 0.0004f;///check if the filter has reached either end///segLen = (curLen > krnlen) ? ( (curLen = krnlen) - prvLen ) : segLen;///obtain the next clip point///clp1_x = clp0_x + vctr_x * segLen;clp1_y = clp0_y + vctr_y * segLen;///obtain the middle point of the segment as the texture-contributing sample///samp_x = (clp0_x + clp1_x) * 0.5f;samp_y = (clp0_y + clp1_y) * 0.5f;///obtain the texture value of the sample///texVal = pNoise[ int(samp_y) * n_xres + int(samp_x) ];///update the accumulated weight and the accumulated composite texture (texture x weight)///W_ACUM = wgtLUT[ int(curLen * len2ID) ];smpWgt = W_ACUM - w_acum[advDir]; w_acum[advDir] = W_ACUM; t_acum[advDir] += texVal * smpWgt;///update the step counter and the "current" clip point///advcts ++;clp0_x = clp1_x;clp0_y = clp1_y;///check if the streamline has gone beyond the flow field///if( clp0_x < 0.0f || clp0_x >= n_xres || clp0_y < 0.0f || clp0_y >= n_yres) break;} }///normalize the accumulated composite texture///texVal = (t_acum[0] + t_acum[1]) / (w_acum[0] + w_acum[1]);///clamp the texture value against the displayable intensity range [0, 255]texVal = (texVal < 0.0f) ? 0.0f : texVal;texVal = (texVal > 255.0f) ? 255.0f : texVal; pImage[j * n_xres + i] = (float) texVal;
// if (j * n_xres + i>400000)
// {
// cout<<"pImage["<<j * n_xres + i<<"]="<<pImage[j * n_xres + i]<<endl;
// } }
}
改变图片强调可修改r,如s.val[i]*scale*r相关推荐
- jQuery动态改变图片显示大小(修改)
当我们要显示后台传过来若干个尺寸不一的图片时,为了保证图片大小的一致性及比例的协调,需要动态改变图片显示尺寸.通过搜索,我们可以从网上找到实现此功能的jQuery代码如下.这段代码可以使图片的大小保持 ...
- 怎么改变图片的尺寸大小?图片大小如何修改?
手机像素越来越高,拍摄出来的照片尺寸也越来越大,经常因为图片尺寸过大而导致照片无法上传,所以需要做一些图片大小修改.可以使用压缩图的图片改大小(在线修改图片尺寸大小工具-压缩图)功能来处理,而且这是一 ...
- 如何改变图片大小kb?图片尺寸怎么在线修改?
随着现在拍摄设备的像素越来越高,图片越来越清楚也就让图片的体积越来越大,在使用这些图片素材的时候就经常会遇到图片太大无法发送或者上传的情况,那么这时候应该怎么缩小图片的大小kb呢?今天来教给大家一个图 ...
- 如何改变图片的尺寸大小得到一张缩小后的图片
声明:本篇博客和代码无关,请不要误解. 在平时开发过程中,总会需要一些特定大小的图片,一般设计师会给我们提供,但是有时候我们有一张大图,只是想得到一张它的缩小版,我们还让设计师帮我们,虽然也可以,但是 ...
- ImageButton点击按钮改变图片
一.ImageButton点击按钮改变图片 **************************ImageButton点击按钮改变图片 有两种写法*************************** ...
- php 更改图片后缀名,PHP在图片上传时如何改变图片后缀为jpg
PHP在图片上传时改变图片后缀为jpg的方法:首先获取上传的图片后缀,并获取图片在电脑上的临时存储位置:然后获取临时存储文件的后缀:最后实现后缀转换,代码为[case 'im{ / S # p |ag ...
- python --压缩图片不改变图片尺寸
方法1 from PIL import Image import osdef compress_image(infile, outfile, quality=50):""" ...
- Android 使用ColorMatrix改变图片颜色
ColorMatrix的颜色矩阵介绍 颜色矩阵M是一个5*4的矩阵,在Android中,颜色矩阵M是以一维数组m=[a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t]的方 ...
- Android程序如何在代码中改变图片原有的颜色
最近一边找工作一边完善之前的项目.之前安卓初中级的项目是模仿酷狗音乐播放器的.下载一个apk文件,改后缀,解压,然后根据官方应用的布局,用得到的图片照着做出来.记得酷狗首页有好几种主要图标,解压后得到 ...
- python 改变图片尺寸
python 改变图片尺寸 #!/usr/bin/env python # -*- encoding: utf-8 -*- """ #!/usr/bin/env pyth ...
最新文章
- CSDN 插件限时内测,新用户抢永久免费去广告特权!
- ReactiveCocoa
- android培训内容明细,记录Android开发学习
- 20130320java基础学习笔记-dos命令及java临时环境变量配置
- 定位--position属性
- 如何使用一台PC搭建可以在线迁移的KVM学习环境
- android音量图标不见了,电脑声音图标不见了如何解决?
- 前端学习(3142):react-hello-react之父组件render
- python3绘图_python3绘图示例2(基于matplotlib:柱状图、分布图、三角图等)
- 大数据笔记11:MapReduce的运行流程
- AJAX(XMLHttpRequest)进行跨域请求方法详解(三)
- C++ 类的成员函数指针 ( function/bind )
- 题解 P1378 【油滴扩展】
- Layui数据表格添加时间控件
- 【寻找最佳小程序】02期:腾讯旅游首款小工具“旅行小账本”——创意及研发过程大起底
- java 实现站内信_群发站内信实现
- 基于微信小程序的国产动漫论坛小程序
- 暨南大学计算机专业考研录取目录,21考研必备 暨南大学2020计算机类研究生招生情况汇总...
- 微信小程序:个人页面/我的页面/资料页面
- 集成机器学习服务上架华为应用市场指南