利用随机函数确定(x,y),以其为中心,选择矩形区域矢量场,进行LIC卷积,得到风场纹理图像,再贴图到地球上。

// LiuLppm.cpp : 定义控制台应用程序的入口点。
////#include "stdafx.h"
#include <String>
#include <string.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <malloc.h>
#include "netcdfcpp.h"
#include <cv.h>
#include <ml.h>
#include <highgui.h>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
using namespace std;
using namespace cv;
#define  SQUARE_FLOW_FIELD_SZ   400
#define  DISCRETE_FILTER_SIZE   2048
#define  LOWPASS_FILTR_LENGTH   10.00000f
#define  LINE_SQUARE_CLIP_MAX   100000.0f
#define  VECTOR_COMPONENT_MIN   0.050000f
#define x_width 70
#define y_height 180
int x_size = x_width;
int y_size = y_height;void  SyntheszSaddle( int x0,int y0, double*   pVectr);
void     NormalizVectrs(int  x_size,  int     y_size,  double*   pVectr,float* vecSize);
void  GenBoxFiltrLUT(int  LUTsiz,  float*  p_LUT0,  float*   p_LUT1);
void  MakeWhiteNoise(int  n_xres,  int     n_yres,  float*  pNoise);
void     FlowImagingLIC(int  n_xres,  int     n_yres,  double*   pVectr,  float*  pNoise,  float*  pImage,  float*  p_LUT0,  float*  p_LUT1,  float  krnlen);
void  WriteImage2PPM(int  n_xres,  int     n_yres,  float*  pImage,     char*  f_name);
void  color(int n_xres, int n_yres,float *pImage,float* vecSize);
double maxvecmag;void   main()
{float m = 0;float n = 0;int              n_xres = 721;int               n_yres = 281;double*           pVectr = (double*         ) malloc( sizeof(double        ) * n_xres * n_yres * 2 );float*          p_LUT0 = (float*        ) malloc( sizeof(float        ) * DISCRETE_FILTER_SIZE);float*         p_LUT1 = (float*        ) malloc( sizeof(float        ) * DISCRETE_FILTER_SIZE);float* pNoise = (float* ) malloc( sizeof(float) * n_xres * n_yres     );float*    pImage = (float* ) malloc( sizeof(float) * n_xres * n_yres     );float*    vecSize = (float* ) malloc( sizeof(float) * n_xres*n_yres );int x0 = 0;int y0 = 0;
//首先随进产生视点坐标位置
//  x0 = rand()722-x_size/2;
//  y0 = rand()%282-y_size/2;x0 =( 721%rand())*(rand()%30)-x_size/2;y0 = 281%rand()-y_size/2;cout<<x0<<endl;cout<<y0<<endl;//截取矢量场的某一部分SyntheszSaddle(x0,y0, pVectr);NormalizVectrs(x_size,y_size, pVectr,vecSize);MakeWhiteNoise(x_size, y_size, pNoise);GenBoxFiltrLUT(DISCRETE_FILTER_SIZE, p_LUT0, p_LUT1);FlowImagingLIC(x_size,y_size, pVectr, pNoise, pImage, p_LUT0, p_LUT1, LOWPASS_FILTR_LENGTH);color(x_size,y_size,pImage,vecSize);//WriteImage2PPM(x_size, y_size, pImage, "LIC.ppm");free(pVectr);   pVectr = NULL;free(p_LUT0);    p_LUT0 = NULL;free(p_LUT1);    p_LUT1 = NULL;free(pNoise);    pNoise = NULL;free(pImage);    pImage = NULL;
}
///     synthesize a saddle-shaped vector field     ///
void    SyntheszSaddle( int x0,int y0, double*  pVectr)
{
//  static const int LatNum = 560;
//  static const int LonNum =1440;NcFile dataReadFile("data/global.nc",NcFile::ReadOnly);if (!dataReadFile.is_valid()){std::cout<<"couldn't open file!"<<std::endl;}int LonNum=0;int LatNum=0;for (int i = 0;i<=dataReadFile.num_dims()-1;i++){//        String.Format(String+"dimid=%d, name = %s length=%d/n",dataReadFile.get_dim(i).id(),//            dataReadFile.get_dim(i)->name(),dataReadFile.get_dim(i)->size());//   cout<<"lat="<<dataReadFile.get_dim(i)->name()<<"    "<<dataReadFile.get_dim(i)->size()<<endl;if (i==0){LonNum  =dataReadFile.get_dim(i)->size();}else if (i==1){LatNum=dataReadFile.get_dim(i)->size();}}//    double *lat = new double[LatNum];
//  double *lon = new double[LonNum];//读取Variables变量数据/*for (int i = 0; i<dataReadFile.num_vars();i++){cout<<dataReadFile.get_var(i)->id()<<" "<<dataReadFile.get_var(i)->name()<<"  "<<dataReadFile.get_var(i)->num_dims()<<"   "<<endl;}*///定义二维数组/*float ** temp=new float*[LatNum];for (int i = 0;i<LatNum;i++){temp[i]=new float[LonNum];} *///如何读取二维经纬度交点上的数值*****************************************
//  float *rhs=new float[LonNum*LatNum];
//  long array[2];
//  array[0] = LatNum;
//  array[1] = LonNum;
//
// dataReadFile.get_var("june")->get(rhs,array);
//
//  for (int i = 0;i<LatNum;i++)
//  {
//      for (int j=0;j<LonNum;j++)
//      {
//          dataReadFile.get_var("latitude")->get(lat,LatNum);
//          dataReadFile.get_var("longitude")->get(lon,LonNum);
//          //cout<<"<"<<lat[i]<<","<<lon[j]<<")"<<rhs[i*LonNum+j]<<endl;
//          if (rhs[i*LonNum+j] != -9999)
//          {
//              cout<<i<<" "<<j<<endl;
//              cout<<"<"<<lat[i]<<","<<lon[j]<<")"<<rhs[i*LonNum+j]<<endl;
//          }
//      }
//  }static const int Time = 1;static const int TMP = Time*LonNum*LonNum;double *Tmp_UX = new double[TMP];double *Tmp_VY = new double[TMP];double *Tmp_LAT = new double[TMP];double *Tmp_LON = new double[TMP];NcVar *dataTmp_LAT = dataReadFile.get_var("LAT");   NcVar *dataTmp_LON = dataReadFile.get_var("LONN359_361");    NcVar *dataTmp_UX = dataReadFile.get_var("UX");  NcVar *dataTmp_VY = dataReadFile.get_var("VY");  dataTmp_LAT->get(Tmp_LAT,LatNum,LatNum);dataTmp_LON->get(Tmp_LON,LonNum,LonNum);dataTmp_UX->get(Tmp_UX,Time,LatNum,LonNum);dataTmp_VY->get(Tmp_VY,Time,LatNum,LonNum);
//  for(int  j = y0;  j < (y0+y_size);  j ++)
//      for(int  i =x0;  i < (x0+x_size);  i ++)for (int j = 0;j<y_size;j++)for(int i = 0;i <x_size;i++){    //int    index = (  (y0+y_size - 1 - j) * x_size + i  )  <<  1;//int index = j*n_yres+i;int index = (j*x_size+i)<<1;pVectr[index    ] = Tmp_UX[(y0+j)*LonNum+(x0+i)];pVectr[index +1   ]= Tmp_VY[(y0+j)*LonNum+(x0+i)];}
}
///     normalize the vector field     ///
void    NormalizVectrs(int x_size,  int  y_size,  double*  pVectr,float* vecSize)
{   for(int  j = 0;  j < y_size;  j ++)for(int     i = 0;  i < x_size;  i ++){  int     index = (j * x_size + i) << 1;float vcMag = float(  sqrt( double(pVectr[index] * pVectr[index] + pVectr[index + 1] * pVectr[index + 1]) )  );vecSize[j * x_size + i]=vcMag;if (vcMag<100&&vcMag>maxvecmag){maxvecmag=vcMag;}float  scale = (vcMag == 0.0f) ? 0.0f : 1.0f / vcMag;pVectr[index    ] *= scale*5.5;//????????????????????????????????????????????????????????????原来问题出在这pVectr[index + 1] *= scale*5.5;}
}
void color(int n_xres,int n_yres,float *pImage,float* vecSize)
{IplImage * licImage = cvCreateImage(cvSize(n_xres,n_yres),IPL_DEPTH_8U,3);IplImage* img = cvLoadImage("11.jpg",1);int k = 0;double magind;double mag;double newMag;double x = 0.1;//x为非线性映射因子,且x!=1CvScalar colorTable[500];CvScalar s,s1;for (int i = 0;i < img->width;i++){s = cvGet2D(img,1,i);colorTable[i] =s;}for (int j=0;j<n_yres;j++){for (int i= 0;i<n_xres;i++){if (k>=img->width){k=0;}double  scale=  pImage[j * n_xres + i];mag = vecSize[j * n_xres + i];//********矢量大小归一化******if (mag<100){magind = (mag/maxvecmag);}//非线性颜色增强LICnewMag =(pow(x,magind)-1)/(x-1);s = cvGet2D(licImage,j,i);//渐变颜色映射表int k = int(newMag*446);s1.val[0]=colorTable[k].val[0]*(k+1-newMag*446)+colorTable[k+1].val[0]*(newMag*446-k);s1.val[1]=colorTable[k].val[1]*(k+1-newMag*446)+colorTable[k+1].val[1]*(newMag*446-k);s1.val[2]=colorTable[k].val[2]*(k+1-newMag*446)+colorTable[k+1].val[2]*(newMag*446-k);s1.val[0]*=scale;s1.val[1]*=scale;s1.val[2]*=scale;/*cout<<"s1.val[3]="<<s1.val[3]<<endl;*/cvSet2D(licImage,j,i,s1);}}//Mat AlphaImage= imread("s.jpg");//cv::Mat AlphaImage = imread("licImage",1);cvNamedWindow("lic_three channles",0);cvShowImage("lic_three channles",licImage);cvWaitKey(0);system("pause");cvDestroyWindow("lic_three channles");cvReleaseImage(&licImage);
}///        make white noise as the LIC input texture     ///
void    MakeWhiteNoise(int  n_xres,  int  n_yres,  float*  pNoise)
{       for(int  j = 0;   j < n_yres;  j ++)for(int  i = 0;   i < n_xres;  i ++){   int  r = rand();r = (  (r & 0xff) + ( (r & 0xff00) >> 8 )  ) & 0xff;pNoise[j * n_xres + i] = (unsigned char) r;}
}///        generate box filter LUTs     ///
void    GenBoxFiltrLUT(int  LUTsiz,  float*  p_LUT0,  float*  p_LUT1)
{       for(int  i = 0;  i < LUTsiz;  i ++)  p_LUT0[i] = p_LUT1[i] = i;
}///        write the LIC image to a PPM file     ///
void    WriteImage2PPM(int  n_xres,  int  n_yres,  float*  pImage,  char*  f_name)
{   FILE*   o_file;if(   ( o_file = fopen(f_name, "w") )  ==  NULL   )  {  printf("Can't open output file\n");  return;  }fprintf(o_file, "P6\n%d %d\n255\n", n_xres, n_yres);for(int  j = 0;  j < n_yres;  j ++)for(int  i = 0;  i < n_xres;  i ++){unsigned  char   unchar = pImage[j * n_xres + i];fprintf(o_file, "%c%c%c", unchar, unchar, unchar);}fclose (o_file); o_file = NULL;
}///        flow imaging (visualization) through Line Integral Convolution     ///
void    FlowImagingLIC(int     n_xres,  int     n_yres,  double*  pVectr,  float*  pNoise,  float*  pImage,  float*  p_LUT0,  float*  p_LUT1,  float   krnlen)
{   int     vec_id;                     ///ID in the VECtor buffer (for the input flow field)int        advDir;                     ///ADVection DIRection (0: positive;  1: negative)int       advcts;                     ///number of ADVeCTion stepS per direction (a step counter)int      ADVCTS = int(krnlen * 3);  ///MAXIMUM number of advection steps per direction to break dead loops  //跳出死循环的条件float vctr_x;                     ///x-component  of the Vector at the forefront pointfloat   vctr_y;                     ///y-component  of the Vector at the forefront pointfloat   clp0_x;                     ///x-coordinate of CLiP point 0 (current)float  clp0_y;                     ///y-coordinate of CLiP point 0 (current)float  clp1_x;                     ///x-coordinate of CLiP point 1 (next   )float  clp1_y;                     ///y-coordinate of CLiP point 1 (next   )float  samp_x;                     ///x-coordinate of the Sample in the current pixelfloat samp_y;                     ///y-coordinate of the Sample in the current pixelfloat tmpLen;                     ///Temporary Length of a trial clipped-segment实验剪辑片段的临时长度float  segLen;                     ///Segment   Lengthfloat    curLen;                     ///Current   Length of the streamlinefloat  prvLen;                     ///Previous  Length of the streamline       float   W_ACUM;                     ///Accumulated Weight from the seed to the current streamline forefrontfloat    texVal;                     ///Texture Valuefloat   smpWgt;                     ///Weight of the current Samplefloat    t_acum[2];                  ///two Accumulated composite Textures for the two directions, perspectively 两个方向的纹理卷积累加和float   w_acum[2];                  ///two Accumulated Weighting values   for the two directions, perspectively 两个方向的权重和float*  wgtLUT = NULL;             ///WeiGhT Look Up Table pointing to the target filter LUT权重查找表float len2ID = (DISCRETE_FILTER_SIZE - 1) / krnlen;  ///map a curve Length To an ID in the LUTdouble scale;//颜色映射表比例double maxmag;double magind;double mag;double x = 0.1;//x为非线性映射因子,且x!=1IplImage * licImage = cvCreateImage(cvSize(y_size,x_size),IPL_DEPTH_8U,3);CvScalar s;///for each pixel in the 2D output LIC image///对输出图像的每一个像素for(int  j = 0;   j < y_size;  j ++){for(int  i = 0;    i < x_size;  i ++){   ///init the composite texture accumulators and the weight accumulators///每一个像素点为起始点,初始化一次权重和卷积和t_acum[0] = t_acum[1] = w_acum[0] = w_acum[1] = 0.0f;//初始化正反方向卷积和及权重和///for either advection direction///分别计算正反方向的卷积和及权重和for(advDir = 0;  advDir < 2;  advDir ++){  ///init the step counter, curve-length measurer, and streamline seed/////初始化当前方向上前进的步数和当前流线的总长advcts = 0;//前进的步数curLen = 0.0f;clp0_x = i + 0.5f;//当前点的坐标clp0_y = j + 0.5f;///access the target filter LUT///LUT显示查找表wgtLUT = (advDir == 0) ? p_LUT0 : p_LUT1;//当前噪声点所对应的权重系数///until the streamline is advected long enough or a tightly  spiralling center / focus is encountered///while( curLen < krnlen && advcts < ADVCTS ) //??????{///access the vector at the sample///vec_id = ( int(clp0_y) * x_size + int(clp0_x) ) << 1;//vec_id相当于indexvctr_x = pVectr[vec_id    ];//clp0_y相当于当前像素列坐标,clp0_x相当于当前像素的横坐标vctr_y = pVectr[vec_id + 1];///in case of a critical point///遇到零向量,结束循环if( vctr_x == 0.0f && vctr_y == 0.0f ){  t_acum[advDir] = (advcts == 0) ? 0.0f : t_acum[advDir];         ///this line is indeed unnecessaryw_acum[advDir] = (advcts == 0) ? 1.0f : w_acum[advDir];break;}///negate the vector for the backward-advection case///相反的方向取相反的方向vctr_x = (advDir == 0) ? vctr_x : -vctr_x;//因为矢量是用x,y方向的值合成的,所以反向的值就是负的vctr_y = (advDir == 0) ? vctr_y : -vctr_y;//这儿可以就计算矢量大小、归一化运算????????????前面已经归一化了//。。。。。。////                   mag= sqrt(vctr_x*vctr_x+vctr_y*vctr_y);//                     if (mag>maxmag)//                    {//                         maxmag=mag;//                  }///clip the segment against the pixel boundaries --- find the shorter from the two clipped segments//////replace  all  if-statements  whenever  possible  as  they  might  affect the computational speed///影响算法计算速度segLen = LINE_SQUARE_CLIP_MAX;segLen = (vctr_x < -VECTOR_COMPONENT_MIN) ? ( int(     clp0_x         ) - clp0_x ) / vctr_x : segLen;//int(0.5)=0segLen = (vctr_x >  VECTOR_COMPONENT_MIN) ? ( int( int(clp0_x) + 1.5f ) - clp0_x ) / vctr_x : segLen;segLen = (vctr_y < -VECTOR_COMPONENT_MIN) ?   (      (    (  tmpLen = ( int(     clp0_y)          - clp0_y ) / vctr_y  )  <  segLen    ) ? tmpLen : segLen      ) : segLen;segLen = (vctr_y >  VECTOR_COMPONENT_MIN) ?(      (    (  tmpLen = ( int( int(clp0_y) + 1.5f ) - clp0_y ) / vctr_y  )  <  segLen    ) ? tmpLen : segLen      ) : segLen;///update the curve-length measurers///prvLen = curLen;curLen+= segLen;//segLen是单步步长segLen+= 0.00001f;//步长???// segLen+= 0.0001f;//步长???///check if the filter has reached either end///segLen = (curLen > krnlen) ? ( (curLen = krnlen) - prvLen ) : segLen;///obtain the next clip point///clp1_x = clp0_x + vctr_x * segLen;clp1_y = clp0_y + vctr_y * segLen;///obtain the middle point of the segment as the texture-contributing sample///samp_x = (clp0_x + clp1_x) * 0.5f;samp_y = (clp0_y + clp1_y) * 0.5f;///obtain the texture value of the sample///texVal = pNoise[ int(samp_y) * x_size + int(samp_x) ];///update the accumulated weight and the accumulated composite texture (texture x weight)///W_ACUM = wgtLUT[ int(curLen * len2ID) ];smpWgt = W_ACUM - w_acum[advDir];         w_acum[advDir]  = W_ACUM;                              t_acum[advDir] += texVal * smpWgt;//当前噪声点的权重系数///update the step counter and the "current" clip point///advcts ++;clp0_x = clp1_x;clp0_y = clp1_y;///check if the streamline has gone beyond the flow field///if( clp0_x < 0.0f || clp0_x >= x_size || clp0_y < 0.0f || clp0_y >= y_size)  break;}    }///normalize the accumulated composite texture///texVal = (t_acum[0] + t_acum[1]) / (w_acum[0] + w_acum[1]);//cout<<t_acum[0] + t_acum[1]<<endl;///clamp the texture value against the displayable intensity range [0, 255]texVal = (texVal <   0.0f) ?   0.0f : texVal/255;texVal = (texVal > 255.0f) ? 255.0f : texVal; pImage[j * x_size + i] = (float) texVal;//cout<<pImage[j * x_size + i]<<endl;}    }}

显示风场的某一局部区域,实现多分辨率相关推荐

  1. html局部可复制,截取网页局部区域css样式的方法和系统的制作方法

    截取网页局部区域css样式的方法和系统的制作方法 [技术领域] [0001]本发明涉及计算机网络技术领域,特别是涉及一种截取网页局部区域CSS样式的方法和系统. [背景技术] [0002]CSS(Ca ...

  2. 【Opencv系列】之显示图像以及使用鼠标截取图像局部区域进行放大

    序  本文主要介绍通过Opencv显示一副图像,同时又可以使用鼠标左键框选局部区域且放大一倍: 1. 使用IplImage的示例代码 #include <stdio.h> #include ...

  3. 视频局部区域的马赛克处理

    在电视采访中,有时候一些采访对象不愿意抛头露面.这种情况下,被采访者可能会背对摄像镜头:但更通常的做法是,被采访者仍然面对镜头,而在电视节目播出时对采访对象的面部进行马赛克处理.这种马赛克处理,使观众 ...

  4. mars3d-canvans风向图支持自定义绘制局部区域

    场景 在河道范围内根据 udata 和 vdata ,生成流向粒子图.不是矩形范围的局部区域. 问题 1.mars3d-canvans风向图-局部区域 支持自定义绘制局部区域吗? 2.或者有其他方式可 ...

  5. (OpenCV+Python)--图片局部区域像素值处理(改进版)

    上一个版本看这里:<Python+OpenCV实现[图片]局部区域像素值处理> 上个版本的代码虽然实现了我需要的功能,但还是走了很多弯路,我意识到图片本就是数组形式,对于8位灰度图,通道数 ...

  6. Bootstrap中DropDown插件显示下拉列表,点击下拉列表区域,不会再自动关闭。

    目标: Bootstrap中DropDown插件显示下拉列表,点击下拉列表区域,不会再自动关闭. 参考:http://v3.bootcss.com/javascript/#dropdowns    / ...

  7. 在ANSYS workbench中如何对物体局部区域进行网格细密化

    版权声明:本文为博主原创文章,转载请附源链接. 一.我们需要知道的 在本文内容之前,需要说明的是,我们利用ANSYS workben进行仿真分析时,无论是什么分析,流体分析,模态分析,静力分析等.注意 ...

  8. NVIDIA控制面板打开报错,提示nvcplui.exe应用程序错误并显示传递给系统调用的数据区域太小

    打开NVIDIA控制面板时提示错误,提示nvcplui.exe应用程序错误并显示传递给系统调用的数据区域太小 解决方法:打开报错提示中程序所在路径 我电脑对应路径如下图所示 在电脑中进入对应路径,可能 ...

  9. EDIUS中的局部区域该怎么进行模糊

    我们常常会在新闻视频中看到视频局部被做成模糊的效果,比如模糊人眼,使其不被认出.那么在EDIUS中,有什么办法可以实现局部区域模糊呢?在这篇文章里,小编要向大家展示EDIUS手绘遮罩特效的强大之处,相 ...

  10. wxpython显示图片_wxpython下图片局部显示的方法

    我想要显示图片其中的一部分,其他不想显示的画面不显示出来!摸索了好久,发现了以下两种方法: 1.画个矩形(或其他图形也行)把你不想显示出来的画面遮住!如何遮呢?就是先加载图片,接着再在想要遮住的地方画 ...

最新文章

  1. 仿抖音底部导航效果(二)
  2. mysql 开启不严谨模式,mysql – 为什么innodb严格模式无法启用?
  3. 离散数学--二元关系总结
  4. 滑动窗口算法应用及详解
  5. activemq的使用(四)
  6. 线程故事:Web应用程序中的ThreadLocal
  7. linux修改组的选项名字为,Linux用户、组及权限管理浅析
  8. 使用HTML5的Canvas画布来剪裁用户头像
  9. CentOS镜像中替换安装镜像的小系统的内核方法
  10. getTickCount()函数 区别GetTickCount()函数
  11. 有监督对比学习在分类任务中的应用 Supervised Contrastive Learning
  12. css中关于单行文本溢出部分用省略号显示
  13. Spring boot微服务项目中上传图片报错,The field file exceeds its maximum permitted size of 1048576 bytes.
  14. CVE-2020-7961 Liferay Portal 命令执行漏洞
  15. RPS基准点系统 2020
  16. SSL 1653 数字游戏
  17. DBeaver Read-only:No corresponding table column
  18. 资金核对平台的发展历程
  19. 莫纳什大学计算机专业排名,澳大利亚大学计算机专业排名
  20. java加载顺序_类加载过程中几个重点执行顺序整理

热门文章

  1. Knowledge Distillation论文阅读之:综述文章:Knowledge Distillation: A Survey(未完待续····)
  2. JavaScript基础学习总结(一) 适合小白
  3. python selenium 异常:selenium.common.exceptions.ElementClickInterceptedException
  4. iPhoneSE3定价或跌穿3K,苹果不给安卓手机活路了?
  5. 广州宽带市场割喉战:电信地狱价小企业陷两难
  6. 两台计算机共享鼠标,总算发现什么是双模键盘(两台电脑共用一套鼠标键盘)
  7. 关于华为AR/HUAWEI AR Engine
  8. 苹果屏幕录制怎么没有声音_苹果6plus没有声音怎么回事
  9. MATLAB的变换器毕业设计,基于matlab的反激变换器分析与设计毕业设计doc.docx
  10. word去掉标题前面的黑点