显示图像

#include "StdAfx.h"
#include <string>
#include <iostream>
#include <opencv2\core\core.hpp>
#include <opencv2\highgui\highgui.hpp>using namespace cv;
using namespace std;int main()
{string imageName = “lena.jpg”;//读入图像Mat img = imread(imageName, CV_LOAD_IMAGE_COLOR);//如果读入图像失败if (img.empty()){cout<<”Could not open or find the image!”<<endl;return -1;}//创建窗口namedWindow(“lena”, CV_WINDOW_AUTOSIZE);//显示图像imshow(“lena”, img);//等待按键,按键盘任意键返回waitKey();return 0;
}

加载-RGB转灰度图-保存

#include “StdAfx.h”
#include <cv.h>
#include <highgui.h>
#include <string>using namespace cv;
using namespace std;int main()
{char* imageName = “lena.jpg”;Mat image = imread(imageName, 1);if (!image.data){cout<<”Could not open or find the image!”<<endl;return -1;}Mat gray_image;String grayImageName = “lena_gray”;cvtColor(image,gray_image,CV_RGB2GRAY);//将RGB图像转换成灰度图像imwrite(“../../lena_gray.jpg”,gray_image);//保存图像namedWindow(imageName, CV_WINDOW_AUTOSIZE);//创建用于显示元图像窗口namedWindow(grayImageName,CV_WINDOW_AUTOSIZE);//创建用于显示转换后图像窗口imshow(imageName,image);imshow(“grayImageName”, gray_image);waitKey(0);return 0;
}

<h1>膨胀操作示例</h1><pre name="code" class="cpp">#include <opencv2/core/core.hpp>
#include<opencv2/highgui/highgui.hpp>
#include<opencv2/imgproc/imgproc.hpp>
#include <iostream>using namespace std;
using namespace cv;int main(  )
{//载入原图 Mat image = imread("1.jpg");//创建窗口 namedWindow("原图-膨胀操作");namedWindow("效果图-膨胀操作");//显示原图imshow("原图-膨胀操作", image);//获取自定义核Mat element = getStructuringElement(MORPH_RECT, Size(15, 15));Mat out;//进行膨胀操作dilate(image,out, element);//显示效果图imshow("效果图-膨胀操作", out);waitKey(0);return 0;
}

腐蚀操作示例

#include <opencv2/core/core.hpp>

#include<opencv2/highgui/highgui.hpp>
#include<opencv2/imgproc/imgproc.hpp>
#include <iostream>using namespace std;
using namespace cv;int main(  )
{//载入原图 Matimage = imread("1.jpg");//创建窗口 namedWindow("原图-腐蚀操作");namedWindow("效果图-腐蚀操作");//显示原图imshow("原图-腐蚀操作", image);//获取自定义核Mat element = getStructuringElement(MORPH_RECT, Size(15, 15));Mat out;//进行腐蚀操作erode(image,out, element);//显示效果图imshow("效果图-腐蚀操作", out);waitKey(0);return 0;
}

膨胀与腐蚀综合示例

#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include<opencv2/imgproc/imgproc.hpp>
#include <iostream>using namespace std;
using namespace cv;Mat g_srcImage, g_dstImage;//原始图和效果图
int g_nTrackbarNumer = 0;//0表示腐蚀erode, 1表示膨胀dilate
int g_nStructElementSize = 3; //结构元素(内核矩阵)的尺寸void Process();//膨胀和腐蚀的处理函数
void on_TrackbarNumChange(int, void *);//回调函数
void on_ElementSizeChange(int, void *);//回调函数int main( )
{//改变console字体颜色system("color5E"); //载入原图g_srcImage= imread("1.jpg");if(!g_srcImage.data ) { printf("Oh,no,读取srcImage错误~!\n"); return false; }//显示原始图namedWindow("原始图");imshow("原始图", g_srcImage);//进行初次腐蚀操作并显示效果图namedWindow("效果图");//获取自定义核Matelement = getStructuringElement(MORPH_RECT, Size(2*g_nStructElementSize+1,2*g_nStructElementSize+1),Point( g_nStructElementSize, g_nStructElementSize ));erode(g_srcImage,g_dstImage, element);imshow("效果图", g_dstImage);//创建轨迹条createTrackbar("腐蚀/膨胀", "效果图", &g_nTrackbarNumer, 1, on_TrackbarNumChange);createTrackbar("内核尺寸", "效果图",&g_nStructElementSize, 21, on_ElementSizeChange);//输出一些帮助信息cout<<endl<<"\t嗯。运行成功,请调整滚动条观察图像效果~\n\n"<<"\t按下“q”键时,程序退出~!\n"<<"\n\n\t\t\t\tby毛毛";//轮询获取按键信息,若下q键,程序退出while(char(waitKey(1))!= 'q') {}return 0;
}//进行自定义的腐蚀和膨胀操作
void Process()
{//获取自定义核Mat element = getStructuringElement(MORPH_RECT, Size(2*g_nStructElementSize+1,2*g_nStructElementSize+1),Point( g_nStructElementSize, g_nStructElementSize ));//进行腐蚀或膨胀操作if(g_nTrackbarNumer== 0) {   erode(g_srcImage,g_dstImage, element);}else{dilate(g_srcImage,g_dstImage, element);}//显示效果图imshow("效果图", g_dstImage);
}//腐蚀和膨胀之间切换开关的回调函数
void on_TrackbarNumChange(int, void *)
{//腐蚀和膨胀之间效果已经切换,回调函数体内需调用一次Process函数,使改变后的效果立即生效并显示出来Process();
}//腐蚀和膨胀操作内核改变时的回调函数
void on_ElementSizeChange(int, void *)
{//内核尺寸已改变,回调函数体内需调用一次Process函数,使改变后的效果立即生效并显示出来Process();
}

膨胀与腐蚀综合示例2

#include "cv.h"
#include "highgui.h"
#include "opencv2/imgproc/imgproc.hpp"using namespace std;
using namespace cv;#define TYPE_MORPH_RECT      (0)
#define TYPE_MORPH_CROSS     (1)
#define TYPE_MORPH_ELLIPSE   (2)#define MAX_ELE_TYPE         (2)
#define MAX_ELE_SIZE         (20)Mat src, erode_dst, dilate_dst;const char *erode_wn  = "eroding demo";
const char *dilate_wn = "dilating demo";int erode_ele_type;
int dilate_ele_type;
int erode_ele_size;
int dilate_ele_size;static void Erosion(int, void *);
static void Dilation(int, void *);/** @brief   * @inputs  * @outputs * @retval  */
int main(int argc, char *argv[])
{if (argc < 2) {cout<<"Usage: ./eroding_and_dilating [file name]"<<endl;return -1;}src = imread(argv[1]);if (!src.data) {cout<<"Read image failure."<<endl;return -1;}// WindowsnamedWindow(erode_wn, WINDOW_AUTOSIZE);namedWindow(dilate_wn, WINDOW_AUTOSIZE);// Track Bar for ErosioncreateTrackbar("Element Type\n0:Rect\n1:Cross\n2:Ellipse", erode_wn, &erode_ele_type, MAX_ELE_TYPE, Erosion);  // callback @ErosioncreateTrackbar("Element Size: 2n+1", erode_wn, &erode_ele_size, MAX_ELE_SIZE, Erosion);// Track Bar for DilationcreateTrackbar("Element Type\n0:Rect\n1:Cross\n2:Ellipse", dilate_wn, &dilate_ele_type, MAX_ELE_TYPE, Dilation);  // callback @ErosioncreateTrackbar("Element Size: 2n+1", dilate_wn, &dilate_ele_size, MAX_ELE_SIZE, Dilation);// Default startErosion(0, 0);Dilation(0, 0);waitKey(0);return 0;
}/** @brief   腐蚀操作的回调函数* @inputs  * @outputs * @retval  */
static void Erosion(int, void *)
{int erode_type;switch (erode_ele_type) {case TYPE_MORPH_RECT:erode_type = MORPH_RECT; break;case TYPE_MORPH_CROSS:erode_type = MORPH_CROSS;break;case TYPE_MORPH_ELLIPSE:erode_type = MORPH_ELLIPSE;break;default:erode_type = MORPH_RECT;break;}Mat ele = getStructuringElement(erode_type, Size(2*erode_ele_size+1, 2*erode_ele_size+1), Point(erode_ele_size, erode_ele_size));erode(src, erode_dst, ele);imshow(erode_wn, erode_dst);
}/** @brief   膨胀操作的回调函数* @inputs  * @outputs * @retval  */
static void Dilation(int, void *)
{int dilate_type;switch (dilate_ele_type) {case TYPE_MORPH_RECT:dilate_type = MORPH_RECT; break;case TYPE_MORPH_CROSS:dilate_type = MORPH_CROSS;break;case TYPE_MORPH_ELLIPSE:dilate_type = MORPH_ELLIPSE;break;default:dilate_type = MORPH_RECT;break;}Mat ele = getStructuringElement(dilate_type, Size(2*dilate_ele_size+1, 2*dilate_ele_size+1), Point(dilate_ele_size, dilate_ele_size));dilate(src, dilate_dst, ele);imshow(dilate_wn, dilate_dst);
}

Qt图像的缩放显示

#include "widget.h"
#include "ui_widget.h"
#include <QDebug>
Widget::Widget(QWidget *parent) :QWidget(parent),ui(new Ui::Widget)
{ui->setupUi(this);
}Widget::~Widget()
{delete ui;
}void Widget::on_openButton_clicked()
{QString fileName = QFileDialog::getOpenFileName(this,tr("Open Image"),".",tr("Image Files (*.png *.jpg *.bmp)"));qDebug()<<"filenames:"<<fileName;image = cv::imread(fileName.toAscii().data());ui->imgfilelabel->setText(fileName);//here use 2 ways to make a copy
//    image.copyTo(originalimg);          //make a copyoriginalimg = image.clone();        //clone the imgqimg = Widget::Mat2QImage(image);display(qimg);                      //display by the labelif(image.data){ui->saltButton->setEnabled(true);ui->originalButton->setEnabled(true);ui->reduceButton->setEnabled(true);}
}QImage Widget::Mat2QImage(const cv::Mat &mat)
{QImage img;if(mat.channels()==3){//cvt Mat BGR 2 QImage RGBcvtColor(mat,rgb,CV_BGR2RGB);img =QImage((const unsigned char*)(rgb.data),rgb.cols,rgb.rows,rgb.cols*rgb.channels(),QImage::Format_RGB888);}else{img =QImage((const unsigned char*)(mat.data),mat.cols,mat.rows,mat.cols*mat.channels(),QImage::Format_RGB888);}return img;
}void Widget::display(QImage img)
{QImage imgScaled;imgScaled = img.scaled(ui->imagelabel->size(),Qt::KeepAspectRatio);
//  imgScaled = img.QImage::scaled(ui->imagelabel->width(),ui->imagelabel->height(),Qt::KeepAspectRatio);ui->imagelabel->setPixmap(QPixmap::fromImage(imgScaled));
}void Widget::on_originalButton_clicked()
{qimg = Widget::Mat2QImage(originalimg);display(qimg);
}void Widget::on_saltButton_clicked()
{salt(image,3000);qimg = Widget::Mat2QImage(image);display(qimg);
}
void Widget::on_reduceButton_clicked()
{colorReduce0(image,64);qimg = Widget::Mat2QImage(image);display(qimg);
}
void Widget::salt(cv::Mat &image, int n)
{int i,j;for (int k=0; k<n; k++){i= qrand()%image.cols;j= qrand()%image.rows;if (image.channels() == 1){ // gray-level imageimage.at<uchar>(j,i)= 255;}else if (image.channels() == 3){ // color imageimage.at<cv::Vec3b>(j,i)[0]= 255;image.at<cv::Vec3b>(j,i)[1]= 255;image.at<cv::Vec3b>(j,i)[2]= 255;}}
}// using .ptr and []
void Widget::colorReduce0(cv::Mat &image, int div)
{int nl= image.rows; // number of linesint nc= image.cols * image.channels(); // total number of elements per linefor (int j=0; j<nl; j++){uchar* data= image.ptr<uchar>(j);for (int i=0; i<nc; i++){// process each pixel ---------------------data[i]= data[i]/div*div+div/2;// end of pixel processing ----------------} // end of line}
}
#ifndef WIDGET_H
#define WIDGET_H#include <QWidget>
#include <QImage>
#include <QFileDialog>
#include <QTimer>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>using namespace cv;namespace Ui {
class Widget;
}class Widget : public QWidget
{Q_OBJECTpublic:explicit Widget(QWidget *parent = 0);~Widget();
private slots:void on_openButton_clicked();QImage Mat2QImage(const cv::Mat &mat);void display(QImage image);void salt(cv::Mat &image, int n);void on_saltButton_clicked();void on_reduceButton_clicked();void colorReduce0(cv::Mat &image, int div);void on_originalButton_clicked();private:Ui::Widget *ui;cv::Mat image;cv::Mat originalimg; //store the original imgQImage qimg;QImage imgScaled;cv::Mat rgb;
};#endif // WIDGET_H
#include <iostream>#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>// using .ptr and []
void colorReduce0(cv::Mat &image, int div=64) {int nl= image.rows; // number of linesint nc= image.cols * image.channels(); // total number of elements per linefor (int j=0; j<nl; j++) {uchar* data= image.ptr<uchar>(j);for (int i=0; i<nc; i++) {// process each pixel ---------------------data[i]= data[i]/div*div + div/2;// end of pixel processing ----------------} // end of line                   }
}// using .ptr and * ++
void colorReduce1(cv::Mat &image, int div=64) {int nl= image.rows; // number of linesint nc= image.cols * image.channels(); // total number of elements per linefor (int j=0; j<nl; j++) {uchar* data= image.ptr<uchar>(j);for (int i=0; i<nc; i++) {// process each pixel ---------------------*data++= *data/div*div + div/2;// end of pixel processing ----------------} // end of line                   }
}// using .ptr and * ++ and modulo
void colorReduce2(cv::Mat &image, int div=64) {int nl= image.rows; // number of linesint nc= image.cols * image.channels(); // total number of elements per linefor (int j=0; j<nl; j++) {uchar* data= image.ptr<uchar>(j);for (int i=0; i<nc; i++) {// process each pixel ---------------------int v= *data;*data++= v - v%div + div/2;// end of pixel processing ----------------} // end of line                   }
}// using .ptr and * ++ and bitwise
void colorReduce3(cv::Mat &image, int div=64) {int nl= image.rows; // number of linesint nc= image.cols * image.channels(); // total number of elements per lineint n= static_cast<int>(log(static_cast<double>(div))/log(2.0));// mask used to round the pixel valueuchar mask= 0xFF<<n; // e.g. for div=16, mask= 0xF0for (int j=0; j<nl; j++) {uchar* data= image.ptr<uchar>(j);for (int i=0; i<nc; i++) {// process each pixel ---------------------*data++= *data&mask + div/2;// end of pixel processing ----------------} // end of line                   }
}// direct pointer arithmetic
void colorReduce4(cv::Mat &image, int div=64) {int nl= image.rows; // number of linesint nc= image.cols * image.channels(); // total number of elements per lineint n= static_cast<int>(log(static_cast<double>(div))/log(2.0));int step= image.step; // effective width// mask used to round the pixel valueuchar mask= 0xFF<<n; // e.g. for div=16, mask= 0xF0// get the pointer to the image bufferuchar *data= image.data;for (int j=0; j<nl; j++) {for (int i=0; i<nc; i++) {// process each pixel ---------------------*(data+i)= *data&mask + div/2;// end of pixel processing ----------------} // end of line                   data+= step;  // next line}
}// using .ptr and * ++ and bitwise with image.cols * image.channels()
void colorReduce5(cv::Mat &image, int div=64) {int nl= image.rows; // number of linesint n= static_cast<int>(log(static_cast<double>(div))/log(2.0));// mask used to round the pixel valueuchar mask= 0xFF<<n; // e.g. for div=16, mask= 0xF0for (int j=0; j<nl; j++) {uchar* data= image.ptr<uchar>(j);for (int i=0; i<image.cols * image.channels(); i++) {// process each pixel ---------------------*data++= *data&mask + div/2;// end of pixel processing ----------------} // end of line                   }
}// using .ptr and * ++ and bitwise (continuous)
void colorReduce6(cv::Mat &image, int div=64) {int nl= image.rows; // number of linesint nc= image.cols * image.channels(); // total number of elements per lineif (image.isContinuous())  {// then no padded pixelsnc= nc*nl; nl= 1;  // it is now a 1D array}int n= static_cast<int>(log(static_cast<double>(div))/log(2.0));// mask used to round the pixel valueuchar mask= 0xFF<<n; // e.g. for div=16, mask= 0xF0for (int j=0; j<nl; j++) {uchar* data= image.ptr<uchar>(j);for (int i=0; i<nc; i++) {// process each pixel ---------------------*data++= *data&mask + div/2;// end of pixel processing ----------------} // end of line                   }
}// using .ptr and * ++ and bitwise (continuous+channels)
void colorReduce7(cv::Mat &image, int div=64) {int nl= image.rows; // number of linesint nc= image.cols ; // number of columnsif (image.isContinuous())  {// then no padded pixelsnc= nc*nl; nl= 1;  // it is now a 1D array}int n= static_cast<int>(log(static_cast<double>(div))/log(2.0));// mask used to round the pixel valueuchar mask= 0xFF<<n; // e.g. for div=16, mask= 0xF0for (int j=0; j<nl; j++) {uchar* data= image.ptr<uchar>(j);for (int i=0; i<nc; i++) {// process each pixel ---------------------*data++= *data&mask + div/2;*data++= *data&mask + div/2;*data++= *data&mask + div/2;// end of pixel processing ----------------} // end of line                   }
}// using Mat_ iterator
void colorReduce8(cv::Mat &image, int div=64) {// get iteratorscv::Mat_<cv::Vec3b>::iterator it= image.begin<cv::Vec3b>();cv::Mat_<cv::Vec3b>::iterator itend= image.end<cv::Vec3b>();for ( ; it!= itend; ++it) {// process each pixel ---------------------(*it)[0]= (*it)[0]/div*div + div/2;(*it)[1]= (*it)[1]/div*div + div/2;(*it)[2]= (*it)[2]/div*div + div/2;// end of pixel processing ----------------}
}// using Mat_ iterator and bitwise
void colorReduce9(cv::Mat &image, int div=64) {// div must be a power of 2int n= static_cast<int>(log(static_cast<double>(div))/log(2.0));// mask used to round the pixel valueuchar mask= 0xFF<<n; // e.g. for div=16, mask= 0xF0// get iteratorscv::Mat_<cv::Vec3b>::iterator it= image.begin<cv::Vec3b>();cv::Mat_<cv::Vec3b>::iterator itend= image.end<cv::Vec3b>();// scan all pixelsfor ( ; it!= itend; ++it) {// process each pixel ---------------------(*it)[0]= (*it)[0]&mask + div/2;(*it)[1]= (*it)[1]&mask + div/2;(*it)[2]= (*it)[2]&mask + div/2;// end of pixel processing ----------------}
}// using MatIterator_
void colorReduce10(cv::Mat &image, int div=64) {// get iteratorscv::Mat_<cv::Vec3b> cimage= image;cv::Mat_<cv::Vec3b>::iterator it=cimage.begin();cv::Mat_<cv::Vec3b>::iterator itend=cimage.end();for ( ; it!= itend; it++) { // process each pixel ---------------------(*it)[0]= (*it)[0]/div*div + div/2;(*it)[1]= (*it)[1]/div*div + div/2;(*it)[2]= (*it)[2]/div*div + div/2;// end of pixel processing ----------------}
}void colorReduce11(cv::Mat &image, int div=64) {int nl= image.rows; // number of linesint nc= image.cols; // number of columnsfor (int j=0; j<nl; j++) {for (int i=0; i<nc; i++) {// process each pixel ---------------------image.at<cv::Vec3b>(j,i)[0]=image.at<cv::Vec3b>(j,i)[0]/div*div + div/2;image.at<cv::Vec3b>(j,i)[1]=image.at<cv::Vec3b>(j,i)[1]/div*div + div/2;image.at<cv::Vec3b>(j,i)[2]=image.at<cv::Vec3b>(j,i)[2]/div*div + div/2;// end of pixel processing ----------------} // end of line                   }
}// with input/ouput images
void colorReduce12(const cv::Mat &image, // input image cv::Mat &result,      // output imageint div=64) {int nl= image.rows; // number of linesint nc= image.cols ; // number of columns// allocate output image if necessaryresult.create(image.rows,image.cols,image.type());// created images have no padded pixelsnc= nc*nl; nl= 1;  // it is now a 1D arrayint n= static_cast<int>(log(static_cast<double>(div))/log(2.0));// mask used to round the pixel valueuchar mask= 0xFF<<n; // e.g. for div=16, mask= 0xF0for (int j=0; j<nl; j++) {uchar* data= result.ptr<uchar>(j);const uchar* idata= image.ptr<uchar>(j);for (int i=0; i<nc; i++) { // process each pixel ---------------------*data++= (*idata++)&mask + div/2;*data++= (*idata++)&mask + div/2;*data++= (*idata++)&mask + div/2;// end of pixel processing ---------------- } // end of line                   }
}// using overloaded operators
void colorReduce13(cv::Mat &image, int div=64) {    int n= static_cast<int>(log(static_cast<double>(div))/log(2.0));// mask used to round the pixel valueuchar mask= 0xFF<<n; // e.g. for div=16, mask= 0xF0// perform color reductionimage=(image&cv::Scalar(mask,mask,mask))+cv::Scalar(div/2,div/2,div/2);
}

图像锐化1

sharp.h

#pragma once
#include <opencv\cv.h>
using namespace cv;
namespace ggicci
{void sharpen(const Mat& img, Mat& result);
}

sharp.cpp

#include "sharp.h"
void ggicci::sharpen(const Mat& img, Mat& result)
{    result.create(img.size(), img.type());//处理边界内部的像素点, 图像最外围的像素点应该额外处理for (int row = 1; row < img.rows-1; row++){//前一行像素点const uchar* previous = img.ptr<const uchar>(row-1);//待处理的当前行const uchar* current = img.ptr<const uchar>(row);//下一行const uchar* next = img.ptr<const uchar>(row+1);uchar *output = result.ptr<uchar>(row);int ch = img.channels();int starts = ch;int ends = (img.cols - 1) * ch;for (int col = starts; col < ends; col++){//输出图像的遍历指针与当前行的指针同步递增, 以每行的每一个像素点的每一个通道值为一个递增量, 因为要考虑到图像的通道数*output++ = saturate_cast<uchar>(5 * current[col] - current[col-ch] - current[col+ch] - previous[col] - next[col]);}} //end loop//处理边界, 外围像素点设为 0result.row(0).setTo(Scalar::all(0));result.row(result.rows-1).setTo(Scalar::all(0));result.col(0).setTo(Scalar::all(0));result.col(result.cols-1).setTo(Scalar::all(0));
}

main.cpp

#include <opencv\highgui.h>
#pragma comment(lib, "opencv_core231d.lib")
#pragma comment(lib, "opencv_highgui231d.lib")
#pragma comment(lib, "opencv_imgproc231d.lib")using namespace cv;#include "sharp.h"int main()
{    Mat lena = imread("lena.jpg");Mat sharpenedLena;ggicci::sharpen(lena, sharpenedLena);imshow("lena", lena);imshow("sharpened lena", sharpenedLena);cvWaitKey();return 0;
}

图像锐化2

int main()
{    Mat lena = imread("lena.jpg");Mat sharpenedLena;Mat kernel = (Mat_<float>(3, 3) << 0, -1, 0, -1, 5, -1, 0, -1, 0);cv::filter2D(lena, sharpenedLena, lena.depth(), kernel);imshow("lena", lena);imshow("sharpened lena", sharpenedLena);cvWaitKey();return 0;
}

简单的灰度图像的直方图计算

   int main(){    Mat img = imread("lena.jpg", CV_LOAD_IMAGE_GRAYSCALE);Mat* arrays = &img;int narrays = 1;int channels[] = { 0 };InputArray mask = noArray();Mat hist;int dims = 1;int histSize[] = { 256 };    float hranges[] = { 0.0, 255.0 };const float *ranges[] = { hranges };//调用 calcHist 计算直方图, 结果存放在 hist 中calcHist(arrays, narrays, channels, mask, hist, dims, histSize, ranges);//调用一个我自己写的简单的函数用于获取一张显示直方图数据的图片,//输入参数为直方图数据 hist 和期望得到的图片的尺寸Mat histImg = ggicci::getHistogram1DImage(hist, Size(600, 420));imshow("lena gray image histogram", histImg);waitKey();}Mat ggicci::getHistogram1DImage(const Mat& hist, Size imgSize){Mat histImg(imgSize, CV_8UC3);int Padding = 10;int W = imgSize.width - 2 * Padding;int H = imgSize.height - 2 * Padding;double _max;minMaxLoc(hist, NULL, &_max);double Per = (double)H / _max;const Point Orig(Padding, imgSize.height-Padding);int bin = W / (hist.rows + 2);//画方柱for (int i = 1; i <= hist.rows; i++){Point pBottom(Orig.x + i * bin, Orig.y);Point pTop(pBottom.x, pBottom.y - Per * hist.at<float>(i-1));line(histImg, pBottom, pTop, Scalar(255, 0, 0), bin);}//画 3 条红线标明区域line(histImg, Point(Orig.x + bin, Orig.y - H), Point(Orig.x + hist.rows *  bin, Orig.y - H), Scalar(0, 0, 255), 1);line(histImg, Point(Orig.x + bin, Orig.y), Point(Orig.x + bin, Orig.y - H), Scalar(0, 0, 255), 1);line(histImg, Point(Orig.x + hist.rows * bin, Orig.y), Point(Orig.x + hist.rows *  bin, Orig.y - H), Scalar(0, 0, 255), 1);drawArrow(histImg, Orig, Orig+Point(W, 0), 10, 30, Scalar::all(0), 2);drawArrow(histImg, Orig, Orig-Point(0, H), 10, 30, Scalar::all(0), 2);return histImg;}

图像缩放-最近邻插值-双线性插值

#include "stdafx.h"
#include <cv.h>
#include <cxcore.h>
#include <highgui.h>
#include <cmath>using namespace std;
using namespace cv;int main(int argc ,char ** argv)
{IplImage *scr=0;IplImage *dst=0;double scale=4;CvSize dst_cvsize;if (argc==2&&(scr=cvLoadImage(argv[1],-1))!=0){dst_cvsize.width=(int)(scr->width*scale);dst_cvsize.height=(int)(scr->height*scale);dst=cvCreateImage(dst_cvsize,scr->depth,scr->nChannels);cvResize(scr,dst,CV_INTER_NN);//
//             CV_INTER_NN - 最近邻插值,
//             CV_INTER_LINEAR - 双线性插值 (缺省使用)
//             CV_INTER_AREA - 使用象素关系重采样。当图像缩小时候,该方法可以避免波纹出现。/*当图像放大时,类似于 CV_INTER_NN 方法..*/
//             CV_INTER_CUBIC - 立方插值.cvNamedWindow("scr",CV_WINDOW_AUTOSIZE);cvNamedWindow("dst",CV_WINDOW_AUTOSIZE);cvShowImage("scr",scr);cvShowImage("dst",dst);cvWaitKey();cvReleaseImage(&scr);cvReleaseImage(&dst);cvDestroyWindow("scr");cvDestroyWindow("dst");}return 0;
}

图片加“怀旧色”滤镜保存输出

#include <opencv/cv.h>
#include <opencv/highgui.h>using namespace cv;
using namespace std;int main(int argc, char ** argv)
{// input args checkif(argc < 3){printf("please input args.\n");printf("e.g. : ./test infilepath outfilepath \n");return 0;}char * input = argv[1];char * output = argv[2];printf("input: %s, output: %s\n", input, output);Mat src = imread(input, 1);int width=src.cols;int heigh=src.rows;RNG rng;Mat img(src.size(),CV_8UC3);for (int y=0; y<heigh; y++){uchar* P0 = src.ptr<uchar>(y);uchar* P1 = img.ptr<uchar>(y);for (int x=0; x<width; x++){float B=P0[3*x];float G=P0[3*x+1];float R=P0[3*x+2];float newB=0.272*R+0.534*G+0.131*B;float newG=0.349*R+0.686*G+0.168*B;float newR=0.393*R+0.769*G+0.189*B;if(newB<0)newB=0;if(newB>255)newB=255;if(newG<0)newG=0;if(newG>255)newG=255;if(newR<0)newR=0;if(newR>255)newR=255;P1[3*x] = (uchar)newB;P1[3*x+1] = (uchar)newG;P1[3*x+2] = (uchar)newR;}}//imshow("out",img);waitKey();imwrite(output,img);
}

浮雕和雕刻效果

#include <cv.h>
#include <highgui.h>  #pragma comment( lib, "cv.lib" )
#pragma comment( lib, "cxcore.lib" )
#pragma comment( lib, "highgui.lib" )  int main()
{  IplImage *org=cvLoadImage("1.jpg",1);  IplImage *image=cvCloneImage(org);  int width=image->width;  int height=image->height;  int step=image->widthStep;  int channel=image->nChannels;  uchar* data=(uchar *)image->imageData;  for(int i=0;i<width-1;i++)  {  for(int j=0;j<height-1;j++)  {  for(int k=0;k<channel;k++)  {  int temp = data[(j+1)*step+(i+1)*channel+k]-data[j*step+i*channel+k]+128;//浮雕  //int temp = data[j*step+i*channel+k]-data[(j+1)*step+(i+1)*channel+k]+128;//雕刻  if(temp>255)  {  data[j*step+i*channel+k]=255;  }  else if(temp<0)  {  data[j*step+i*channel+k]=0;  }  else  {  data[j*step+i*channel+k]=temp;  }  }  }  }  cvNamedWindow("original",1);  cvShowImage("original",org);  cvNamedWindow("image",1);  cvShowImage("image",image);  cvWaitKey(0);   cvDestroyAllWindows();  cvReleaseImage(&image);  cvReleaseImage(&org);  return 0;
}

图像褶皱效果

#include <cv.h>
#include <highgui.h>  #pragma comment( lib, "cv.lib" )
#pragma comment( lib, "cxcore.lib" )
#pragma comment( lib, "highgui.lib" )  int main()
{  IplImage *org=cvLoadImage("lena.jpg",1);  IplImage *image=cvCloneImage(org);  int width=image->width;  int height=image->height;  int step=image->widthStep;  int channel=image->nChannels;  uchar* data=(uchar *)image->imageData;  int sign=-1;  for(int i=0;i<height;i++)  {     int cycle=10;  int margin=(i%cycle);  if((i/cycle)%2==0)  {  sign=-1;  }  else  {  sign=1;  }  if(sign==-1)  {     margin=cycle-margin;  for(int j=0;j<width-margin;j++)  {             for(int k=0;k<channel;k++)  {  data[i*step+j*channel+k]=data[i*step+(j+margin)*channel+k];  }  }  }  else if(sign==1)  {         for(int j=0;j<width-margin;j++)  {  for(int k=0;k<channel;k++)  {  data[i*step+j*channel+k]=data[i*step+(j+margin)*channel+k];  }  }  }     }  cvNamedWindow("original",1);  cvShowImage("original",org);  cvNamedWindow("image",1);  cvShowImage("image",image);  cvSaveImage("image.jpg",image);  cvWaitKey(0);   cvDestroyAllWindows();  cvReleaseImage(&image);  cvReleaseImage(&org);  return 0;
}

Grabcut算法

#include "stdafx.h"  #include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"  #include <iostream>  #include "ComputeTime.h"
#include "windows.h"  using namespace std;
using namespace cv;  static void help()
{  cout << "\nThis program demonstrates GrabCut segmentation -- select an object in a region\n"  "and then grabcut will attempt to segment it out.\n"  "Call:\n"  "./grabcut <image_name>\n"  "\nSelect a rectangular area around the object you want to segment\n" <<  "\nHot keys: \n"  "\tESC - quit the program\n"  "\tr - restore the original image\n"  "\tn - next iteration\n"  "\n"  "\tleft mouse button - set rectangle\n"  "\n"  "\tCTRL+left mouse button - set GC_BGD pixels\n"  "\tSHIFT+left mouse button - set CG_FGD pixels\n"  "\n"  "\tCTRL+right mouse button - set GC_PR_BGD pixels\n"  "\tSHIFT+right mouse button - set CG_PR_FGD pixels\n" << endl;
}  const Scalar RED = Scalar(0,0,255);
const Scalar PINK = Scalar(230,130,255);
const Scalar BLUE = Scalar(255,0,0);
const Scalar LIGHTBLUE = Scalar(255,255,160);
const Scalar GREEN = Scalar(0,255,0);  const int BGD_KEY = CV_EVENT_FLAG_CTRLKEY;  //Ctrl键
const int FGD_KEY = CV_EVENT_FLAG_SHIFTKEY; //Shift键  static void getBinMask( const Mat& comMask, Mat& binMask )
{  if( comMask.empty() || comMask.type()!=CV_8UC1 )  CV_Error( CV_StsBadArg, "comMask is empty or has incorrect type (not CV_8UC1)" );  if( binMask.empty() || binMask.rows!=comMask.rows || binMask.cols!=comMask.cols )  binMask.create( comMask.size(), CV_8UC1 );  binMask = comMask & 1;  //得到mask的最低位,实际上是只保留确定的或者有可能的前景点当做mask
}  class GCApplication
{
public:  enum{ NOT_SET = 0, IN_PROCESS = 1, SET = 2 };  static const int radius = 2;  static const int thickness = -1;  void reset();  void setImageAndWinName( const Mat& _image, const string& _winName );  void showImage() const;  void mouseClick( int event, int x, int y, int flags, void* param );  int nextIter();  int getIterCount() const { return iterCount; }
private:  void setRectInMask();  void setLblsInMask( int flags, Point p, bool isPr );  const string* winName;  const Mat* image;  Mat mask;  Mat bgdModel, fgdModel;  uchar rectState, lblsState, prLblsState;  bool isInitialized;  Rect rect;  vector<Point> fgdPxls, bgdPxls, prFgdPxls, prBgdPxls;  int iterCount;
};  /*给类的变量赋值*/
void GCApplication::reset()
{  if( !mask.empty() )  mask.setTo(Scalar::all(GC_BGD));  bgdPxls.clear(); fgdPxls.clear();  prBgdPxls.clear();  prFgdPxls.clear();  isInitialized = false;  rectState = NOT_SET;    //NOT_SET == 0  lblsState = NOT_SET;  prLblsState = NOT_SET;  iterCount = 0;
}  /*给类的成员变量赋值而已*/
void GCApplication::setImageAndWinName( const Mat& _image, const string& _winName  )
{  if( _image.empty() || _winName.empty() )  return;  image = &_image;  winName = &_winName;  mask.create( image->size(), CV_8UC1);  reset();
}  /*显示4个点,一个矩形和图像内容,因为后面的步骤很多地方都要用到这个函数,所以单独拿出来*/
void GCApplication::showImage() const
{  if( image->empty() || winName->empty() )  return;  Mat res;  Mat binMask;  if( !isInitialized )  image->copyTo( res );  else  {  getBinMask( mask, binMask );  image->copyTo( res, binMask );  //按照最低位是0还是1来复制,只保留跟前景有关的图像,比如说可能的前景,可能的背景  }  vector<Point>::const_iterator it;  /*下面4句代码是将选中的4个点用不同的颜色显示出来*/  for( it = bgdPxls.begin(); it != bgdPxls.end(); ++it )  //迭代器可以看成是一个指针  circle( res, *it, radius, BLUE, thickness );  for( it = fgdPxls.begin(); it != fgdPxls.end(); ++it )  //确定的前景用红色表示  circle( res, *it, radius, RED, thickness );  for( it = prBgdPxls.begin(); it != prBgdPxls.end(); ++it )  circle( res, *it, radius, LIGHTBLUE, thickness );  for( it = prFgdPxls.begin(); it != prFgdPxls.end(); ++it )  circle( res, *it, radius, PINK, thickness );  /*画矩形*/  if( rectState == IN_PROCESS || rectState == SET )  rectangle( res, Point( rect.x, rect.y ), Point(rect.x + rect.width, rect.y + rect.height ), GREEN, 2);  imshow( *winName, res );
}  /*该步骤完成后,mask图像中rect内部是3,外面全是0*/
void GCApplication::setRectInMask()
{  assert( !mask.empty() );  mask.setTo( GC_BGD );   //GC_BGD == 0  rect.x = max(0, rect.x);  rect.y = max(0, rect.y);  rect.width = min(rect.width, image->cols-rect.x);  rect.height = min(rect.height, image->rows-rect.y);  (mask(rect)).setTo( Scalar(GC_PR_FGD) );    //GC_PR_FGD == 3,矩形内部,为可能的前景点
}  void GCApplication::setLblsInMask( int flags, Point p, bool isPr )
{  vector<Point> *bpxls, *fpxls;  uchar bvalue, fvalue;  if( !isPr ) //确定的点  {  bpxls = &bgdPxls;  fpxls = &fgdPxls;  bvalue = GC_BGD;    //0  fvalue = GC_FGD;    //1  }  else    //概率点  {  bpxls = &prBgdPxls;  fpxls = &prFgdPxls;  bvalue = GC_PR_BGD; //2  fvalue = GC_PR_FGD; //3  }  if( flags & BGD_KEY )  {  bpxls->push_back(p);  circle( mask, p, radius, bvalue, thickness );   //该点处为2  }  if( flags & FGD_KEY )  {  fpxls->push_back(p);  circle( mask, p, radius, fvalue, thickness );   //该点处为3  }
}  /*鼠标响应函数,参数flags为CV_EVENT_FLAG的组合*/
void GCApplication::mouseClick( int event, int x, int y, int flags, void* )
{  // TODO add bad args check  switch( event )  {  case CV_EVENT_LBUTTONDOWN: // set rect or GC_BGD(GC_FGD) labels  {  bool isb = (flags & BGD_KEY) != 0,  isf = (flags & FGD_KEY) != 0;  if( rectState == NOT_SET && !isb && !isf )//只有左键按下时  {  rectState = IN_PROCESS; //表示正在画矩形  rect = Rect( x, y, 1, 1 );  }  if ( (isb || isf) && rectState == SET ) //按下了alt键或者shift键,且画好了矩形,表示正在画前景背景点  lblsState = IN_PROCESS;  }  break;  case CV_EVENT_RBUTTONDOWN: // set GC_PR_BGD(GC_PR_FGD) labels  {  bool isb = (flags & BGD_KEY) != 0,  isf = (flags & FGD_KEY) != 0;  if ( (isb || isf) && rectState == SET ) //正在画可能的前景背景点  prLblsState = IN_PROCESS;  }  break;  case CV_EVENT_LBUTTONUP:  if( rectState == IN_PROCESS )  {  rect = Rect( Point(rect.x, rect.y), Point(x,y) );   //矩形结束  rectState = SET;  setRectInMask();  assert( bgdPxls.empty() && fgdPxls.empty() && prBgdPxls.empty() && prFgdPxls.empty() );  showImage();  }  if( lblsState == IN_PROCESS )   //已画了前后景点  {  setLblsInMask(flags, Point(x,y), false);    //画出前景点  lblsState = SET;  showImage();  }  break;  case CV_EVENT_RBUTTONUP:  if( prLblsState == IN_PROCESS )  {  setLblsInMask(flags, Point(x,y), true); //画出背景点  prLblsState = SET;  showImage();  }  break;  case CV_EVENT_MOUSEMOVE:  if( rectState == IN_PROCESS )  {  rect = Rect( Point(rect.x, rect.y), Point(x,y) );  assert( bgdPxls.empty() && fgdPxls.empty() && prBgdPxls.empty() && prFgdPxls.empty() );  showImage();    //不断的显示图片  }  else if( lblsState == IN_PROCESS )  {  setLblsInMask(flags, Point(x,y), false);  showImage();  }  else if( prLblsState == IN_PROCESS )  {  setLblsInMask(flags, Point(x,y), true);  showImage();  }  break;  }
}  /*该函数进行grabcut算法,并且返回算法运行迭代的次数*/
int GCApplication::nextIter()
{  if( isInitialized )  //使用grab算法进行一次迭代,参数2为mask,里面存的mask位是:矩形内部除掉那些可能是背景或者已经确定是背景后的所有的点,且mask同时也为输出  //保存的是分割后的前景图像  grabCut( *image, mask, rect, bgdModel, fgdModel, 1 );  else  {  if( rectState != SET )  return iterCount;  if( lblsState == SET || prLblsState == SET )  grabCut( *image, mask, rect, bgdModel, fgdModel, 1, GC_INIT_WITH_MASK );  else  grabCut( *image, mask, rect, bgdModel, fgdModel, 1, GC_INIT_WITH_RECT );  isInitialized = true;  }  iterCount++;  bgdPxls.clear(); fgdPxls.clear();  prBgdPxls.clear(); prFgdPxls.clear();  return iterCount;
}  GCApplication gcapp;  static void on_mouse( int event, int x, int y, int flags, void* param )
{  gcapp.mouseClick( event, x, y, flags, param );
}  int main( int argc, char** argv )
{  string filename;  cout<<" Grabcuts ! \n";  cout<<"input image name:  "<<endl;  cin>>filename;  Mat image = imread( filename, 1 );  if( image.empty() )  {  cout << "\n Durn, couldn't read image filename " << filename << endl;  return 1;  }  help();  const string winName = "image";  cvNamedWindow( winName.c_str(), CV_WINDOW_AUTOSIZE );  cvSetMouseCallback( winName.c_str(), on_mouse, 0 );  gcapp.setImageAndWinName( image, winName );  gcapp.showImage();  for(;;)  {  int c = cvWaitKey(0);  switch( (char) c )  {  case '\x1b':  cout << "Exiting ..." << endl;  goto exit_main;  case 'r':  cout << endl;  gcapp.reset();  gcapp.showImage();  break;  case 'n':  ComputeTime ct ;  ct.Begin();  int iterCount = gcapp.getIterCount();  cout << "<" << iterCount << "... ";  int newIterCount = gcapp.nextIter();  if( newIterCount > iterCount )  {  gcapp.showImage();  cout << iterCount << ">" << endl;  cout<<"运行时间:  "<<ct.End()<<endl;  }  else  cout << "rect must be determined>" << endl;  break;  }  }  exit_main:  cvDestroyWindow( winName.c_str() );  return 0;
}

lazy snapping

lszySnapping.cpp

LazySnapping.cpp#include "stdafx.h"
#include <cv.h>
#include <highgui.h>
#include "graph.h"
#include <vector>
#include <iostream>
#include <cmath>
#include <string>  using namespace std;  typedef Graph<float,float,float> GraphType;  class LasySnapping
{  public :  LasySnapping();  ~LasySnapping()  {   if(graph)  {  delete graph;  }  };
private :  vector<CvPoint> forePts;  vector<CvPoint> backPts;  IplImage* image;  // average color of foreground points  unsigned char avgForeColor[3];  // average color of background points  unsigned char avgBackColor[3];
public :  void setImage(IplImage* image)  {  this->image = image;  graph = new GraphType(image->width*image->height,image->width*image->height*2);  }  // include-pen locus  void setForegroundPoints(vector<CvPoint> pts)  {  forePts.clear();  for(int i =0; i< pts.size(); i++)  {  if(!isPtInVector(pts[i],forePts))  {  forePts.push_back(pts[i]);  }  }  if(forePts.size() == 0)  {  return;  }  int sum[3] = {0};  for(int i =0; i < forePts.size(); i++)  {  unsigned char* p = (unsigned char*)image->imageData + forePts[i].x * 3   + forePts[i].y*image->widthStep;  sum[0] += p[0];  sum[1] += p[1];  sum[2] += p[2];              }  cout<<sum[0]<<" " <<forePts.size()<<endl;  avgForeColor[0] = sum[0]/forePts.size();  avgForeColor[1] = sum[1]/forePts.size();  avgForeColor[2] = sum[2]/forePts.size();  }  // exclude-pen locus  void setBackgroundPoints(vector<CvPoint> pts)  {  backPts.clear();  for(int i =0; i< pts.size(); i++)  {  if(!isPtInVector(pts[i],backPts))  {  backPts.push_back(pts[i]);  }  }  if(backPts.size() == 0)  {  return;  }  int sum[3] = {0};  for(int i =0; i < backPts.size(); i++)  {  unsigned char* p = (unsigned char*)image->imageData + backPts[i].x * 3 +   backPts[i].y*image->widthStep;  sum[0] += p[0];  sum[1] += p[1];  sum[2] += p[2];              }  avgBackColor[0] = sum[0]/backPts.size();  avgBackColor[1] = sum[1]/backPts.size();  avgBackColor[2] = sum[2]/backPts.size();  }  // return maxflow of graph  int runMaxflow();  // get result, a grayscale mast image indicating forground by 255 and background by 0  IplImage* getImageMask();  private :  float colorDistance(unsigned char* color1, unsigned char* color2);  float minDistance(unsigned char* color, vector<CvPoint> points);  bool isPtInVector(CvPoint pt, vector<CvPoint> points);  void getE1(unsigned char* color,float* energy);  float getE2(unsigned char* color1,unsigned char* color2);  GraphType *graph;
};  LasySnapping::LasySnapping()
{  graph = NULL;  avgForeColor[0] = 0;  avgForeColor[1] = 0;  avgForeColor[2] = 0;  avgBackColor[0] = 0;  avgBackColor[1] = 0;  avgBackColor[2] = 0;
}  float LasySnapping::colorDistance(unsigned char* color1, unsigned char* color2)
{  return sqrt(((float)color1[0]-(float)color2[0])*((float)color1[0]-(float)color2[0])+  ((float)color1[1]-(float)color2[1])*((float)color1[1]-(float)color2[1])+  ((float)color1[2]-(float)color2[2])*((float)color1[2]-(float)color2[2]));
}  float LasySnapping::minDistance(unsigned char* color, vector<CvPoint> points)
{  float distance = -1;  for(int i =0 ; i < points.size(); i++)  {  unsigned char* p = (unsigned char*)image->imageData + points[i].y * image->widthStep +   points[i].x * image->nChannels;  float d = colorDistance(p,color);  if(distance < 0 )  {  distance = d;  }  else  {  if(distance > d)  {  distance = d;  }  }  }  return distance;
}  bool LasySnapping::isPtInVector(CvPoint pt, vector<CvPoint> points)
{  for(int i =0 ; i < points.size(); i++)  {  if(pt.x == points[i].x && pt.y == points[i].y)  {  return true;  }  }  return false;
}
void LasySnapping::getE1(unsigned char* color,float* energy)
{  // average distance  float df = colorDistance(color,avgForeColor);  float db = colorDistance(color,avgBackColor);  // min distance from background points and forground points  // float df = minDistance(color,forePts);  // float db = minDistance(color,backPts);  energy[0] = df/(db+df);  energy[1] = db/(db+df);
}  float LasySnapping::getE2(unsigned char* color1,unsigned char* color2)
{  const float EPSILON = 0.01;  float lambda = 100;  return lambda/(EPSILON+  (color1[0]-color2[0])*(color1[0]-color2[0])+  (color1[1]-color2[1])*(color1[1]-color2[1])+  (color1[2]-color2[2])*(color1[2]-color2[2]));
}  int LasySnapping::runMaxflow()
{     const float INFINNITE_MAX = 1e10;  int indexPt = 0;  for(int h = 0; h < image->height; h ++)  {  unsigned char* p = (unsigned char*)image->imageData + h *image->widthStep;  for(int w = 0; w < image->width; w ++)  {  // calculate energe E1  float e1[2]={0};  if(isPtInVector(cvPoint(w,h),forePts))  {  e1[0] =0;  e1[1] = INFINNITE_MAX;  }  else if  (isPtInVector(cvPoint(w,h),backPts))  {  e1[0] = INFINNITE_MAX;  e1[1] = 0;  }  else   {  getE1(p,e1);  }  // add node  graph->add_node();  graph->add_tweights(indexPt, e1[0],e1[1]);  // add edge, 4-connect  if(h > 0 && w > 0)  {  float e2 = getE2(p,p-3);  graph->add_edge(indexPt,indexPt-1,e2,e2);  e2 = getE2(p,p-image->widthStep);  graph->add_edge(indexPt,indexPt-image->width,e2,e2);  }  p+= 3;  indexPt ++;              }  }  return graph->maxflow();
}  IplImage* LasySnapping::getImageMask()
{  IplImage* gray = cvCreateImage(cvGetSize(image),8,1);   int indexPt =0;  for(int h =0; h < image->height; h++)  {  unsigned char* p = (unsigned char*)gray->imageData + h*gray->widthStep;  for(int w =0 ;w <image->width; w++)  {  if (graph->what_segment(indexPt) == GraphType::SOURCE)  {  *p = 0;  }  else  {  *p = 255;  }  p++;  indexPt ++;  }  }  return gray;
}  // global
vector<CvPoint> forePts;
vector<CvPoint> backPts;
int currentMode = 0;// indicate foreground or background, foreground as default
CvScalar paintColor[2] = {CV_RGB(0,0,255),CV_RGB(255,0,0)};  IplImage* image = NULL;
char* winName = "lazySnapping";
IplImage* imageDraw = NULL;
const int SCALE = 4;  void on_mouse( int event, int x, int y, int flags, void* )
{      if( event == CV_EVENT_LBUTTONUP )  {  if(backPts.size() == 0 && forePts.size() == 0)  {  return;  }  LasySnapping ls;  IplImage* imageLS = cvCreateImage(cvSize(image->width/SCALE,image->height/SCALE),  8,3);  cvResize(image,imageLS);  ls.setImage(imageLS);  ls.setBackgroundPoints(backPts);  ls.setForegroundPoints(forePts);  ls.runMaxflow();  IplImage* mask = ls.getImageMask();  IplImage* gray = cvCreateImage(cvGetSize(image),8,1);  cvResize(mask,gray);  // edge  cvCanny(gray,gray,50,150,3);  IplImage* showImg = cvCloneImage(imageDraw);  for(int h =0; h < image->height; h ++)  {  unsigned char* pgray = (unsigned char*)gray->imageData + gray->widthStep*h;  unsigned char* pimage = (unsigned char*)showImg->imageData + showImg->widthStep*h;  for(int width  =0; width < image->width; width++)  {  if(*pgray++ != 0 )  {  pimage[0] = 0;  pimage[1] = 255;  pimage[2] = 0;  }  pimage+=3;                  }  }  cvSaveImage("t.bmp",showImg);  cvShowImage(winName,showImg);  cvReleaseImage(&imageLS);  cvReleaseImage(&mask);  cvReleaseImage(&showImg);  cvReleaseImage(&gray);  }  else if( event == CV_EVENT_LBUTTONDOWN )  {  }  else if( event == CV_EVENT_MOUSEMOVE && (flags & CV_EVENT_FLAG_LBUTTON))  {  CvPoint pt = cvPoint(x,y);  if(currentMode == 0)  {//foreground  forePts.push_back(cvPoint(x/SCALE,y/SCALE));  }  else  {//background  backPts.push_back(cvPoint(x/SCALE,y/SCALE));  }  cvCircle(imageDraw,pt,2,paintColor[currentMode]);  cvShowImage(winName,imageDraw);  }
}
int main(int argc, char** argv)
{     //if(argc != 2)  //{  //   cout<<"command : lazysnapping inputImage"<<endl;  //   return 0;  // }  string image_name;  cout<<"input image name: "<<endl;  cin>>image_name;  cvNamedWindow(winName,1);  cvSetMouseCallback( winName, on_mouse, 0);  image = cvLoadImage(image_name.c_str(),CV_LOAD_IMAGE_COLOR);  imageDraw = cvCloneImage(image);  cvShowImage(winName, image);  for(;;)  {  int c = cvWaitKey(0);  c = (char)c;  if(c == 27)  {//exit  break;  }  else if(c == 'r')  {//reset  image = cvLoadImage(image_name.c_str(),CV_LOAD_IMAGE_COLOR);  imageDraw = cvCloneImage(image);  forePts.clear();  backPts.clear();  currentMode = 0;  cvShowImage(winName, image);  }  else if(c == 'b')  {//change to background selection  currentMode = 1;  }else if(c == 'f')  {//change to foreground selection  currentMode = 0;  }  }  cvReleaseImage(&image);  cvReleaseImage(&imageDraw);  return 0;
}

由汉字生成图片

AddChinese.cpp

#include "stdafx.h"    #include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include "CvxText.h"  #pragma comment(lib,"freetype255d.lib")
#pragma comment(lib,"opencv_core2410d.lib")
#pragma comment(lib,"opencv_highgui2410d.lib")
#pragma comment(lib,"opencv_imgproc2410d.lib")     using namespace std;
using namespace cv;  #define ROW_BLOCK 2
#define COLUMN_Block 2  writePng.cpp : 定义控制台应用程序的入口点。
int run_test_png(Mat &mat,string image_name)
{  /*采用自己设置的参数来保存图片*/  //Mat mat(480, 640, CV_8UC4);  //createAlphaMat(mat);  vector<int> compression_params;  compression_params.push_back(CV_IMWRITE_PNG_COMPRESSION);  compression_params.push_back(9);    //png格式下,默认的参数为3.  try   {  imwrite(image_name, mat, compression_params);  }  catch (runtime_error& ex)   {  fprintf(stderr, "Exception converting image to PNG format: %s\n", ex.what());  return 1;  }  fprintf(stdout, "Saved PNG file with alpha data.\n");  waitKey(0);  return 0;
}  int coloured(Mat &template_src, Mat &mat_png, CvScalar color)
{  for (int i = 0; i < template_src.rows; ++i)   {  for (int j = 0; j < template_src.cols; ++j)   {  Vec4b& bgra = mat_png.at<Vec4b>(i, j);  //int temp = template_src.at<uchar>(i,j);  if (template_src.at<uchar>(i,j)== 0)  {  bgra[0] = color.val[0];    //b通道  bgra[1] = color.val[1];     //g通道  bgra[2] = color.val[2];     //r通道  bgra[3] = 255;//alpha通道全部设置为透明完全透明为0,否则为255  }  else  {  bgra[3] = 0;//alpha通道全部设置为透明完全透明为0,否则为255  }  }  }  return 0;
}  void ImageBinarization(IplImage *src)
{   /*对灰度图像二值化,自适应门限threshold*/  int i,j,width,height,step,chanel,threshold;  /*size是图像尺寸,svg是灰度直方图均值,va是方差*/  float size,avg,va,maxVa,p,a,s;  unsigned char *dataSrc;  float histogram[256];  width = src->width;  height = src->height;  dataSrc = (unsigned char *)src->imageData;  step = src->widthStep/sizeof(char);  chanel = src->nChannels;  /*计算直方图并归一化histogram*/  for(i=0; i<256; i++)  histogram[i] = 0;  for(i=0; i<height; i++)  for(j=0; j<width*chanel; j++)  {  histogram[dataSrc[i*step+j]-'0'+48]++;  }  size = width * height;  for(i=0; i<256; i++)  histogram[i] /=size;  /*计算灰度直方图中值和方差*/  avg = 0;  for(i=0; i<256; i++)  avg += i*histogram[i];  va = 0;  for(i=0; i<256; i++)  va += fabs(i*i*histogram[i]-avg*avg);  /*利用加权最大方差求门限*/  threshold = 20;  maxVa = 0;  p = a = s = 0;  for(i=0; i<256; i++)  {  p += histogram[i];  a += i*histogram[i];  s = (avg*p-a)*(avg*p-a)/p/(1-p);  if(s > maxVa)  {  threshold = i;  maxVa = s;  }  }  /*二值化*/  for(i=0; i<height; i++)  for(j=0; j<width*chanel; j++)  {  if(dataSrc[i*step+j] > threshold)  dataSrc[i*step+j] = 255;  else  dataSrc[i*step+j] = 0;  }
}  Mat binaryzation(Mat &src)
{  Mat des_gray(src.size(),CV_8UC1);  cvtColor(src,des_gray,CV_BGR2GRAY);  //Mat bin_mat();  IplImage temp(des_gray);  ImageBinarization(&temp);  //threshold(des_gray,des_gray,150,255,THRESH_BINARY);  imshow("二值图像",des_gray);  return des_gray;
}  int generate_chinese(const int size_zi, const char *msg ,int number,CvScalar color)
{  //int size_zi = 50;//字体大小  CvSize czSize;  //目标图像尺寸  float p = 0.5;  CvScalar fsize;  //读取TTF字体文件  CvxText text("simhei.ttf");       //设置字体属性 字体大小/空白比例/间隔比例/旋转角度  fsize = cvScalar(size_zi, 1, 0.1, 0);  text.setFont(NULL, &fsize, NULL, &p);        czSize.width = size_zi*number;  czSize.height = size_zi;  //加载原图像  IplImage* ImageSrc = cvCreateImage(czSize,IPL_DEPTH_8U,3);//cvLoadImage(Imagename, CV_LOAD_IMAGE_UNCHANGED);  //Mat image(ImageSrc);  //createAlphaMat(image);  //ImageSrc = ℑ  //IplImage temp(image);   //ImageSrc = &temp;  //设置原图像文字  text.putText(ImageSrc, msg, cvPoint(1, size_zi), color);   //显示原图像  cvShowImage("原图", ImageSrc);  string hanzi = msg;  hanzi = hanzi + ".png";  Mat chinese(ImageSrc,true);  Mat gray = binaryzation(chinese);  imwrite("chinese_gray.jpg",gray);  Mat mat_png(chinese.size(),CV_8UC4);  coloured(gray,mat_png,color);  run_test_png(mat_png,hanzi);  //  cvSaveImage("hanzi.jpg",reDstImage);  //run_test_png(chinese,hanzi);  //等待按键事件  cvWaitKey();  return 0;
}  int main()
{  CvScalar color = CV_RGB(0,0,0);  int size = 200;  const char* msg = "你好a";//暂时一行字不要太长  int number = 3;//字符个数  generate_chinese(size,msg,number,color);  return 0;
}

opencv图像处理常用完整示例代码总结相关推荐

  1. 利用ffmpeg来进行视频解码的完整示例代码

    (转)利用ffmpeg来进行视频解码的完整示例代码(H.264) Decode() { FILE  * inpf; int  nWrite; int  i,p; int  nalLen; unsign ...

  2. Vuex完整示例代码

    Vuex完整示例代码 运行效果: 1.在pages目录下创建 Students.vue <template><div><h1>Students</h1> ...

  3. JavaCV进阶opencv图像处理:10行代码快速实现扫描识别图片中的二维码

    人脸检测识别 javacv进阶opencv图像检测/识别系列目录 人脸检测识别 JavaCV进阶opencv图像处理:摄像头图像人脸检测 JavaCV进阶opencv图像处理:ffmpeg视频图像画面 ...

  4. opencv双目相机标定-示例代码分析

      在这里我使用的是Learning OpenCV3的示例,本节使用的项目代码可以在这里下载到. 一.运行示例   在下载完整个工程以后,按照工程使用说明,下载配置Opencv,运行VS2019项目即 ...

  5. Swift常用语法示例代码(一)

    此篇文章整理自我以前学习Swift时的一些练习代码,其存在的意义多是可以通过看示例代码更快地回忆Swift的主要语法. 如果你想系统学习Swift或者是Swift的初学者请绕路,感谢Github上Th ...

  6. Ajax原理,技术封装与完整示例代码

    在做项目和学习的时候,经常用到Ajax的相关技术,但是这方面的技术总是运用的不是十分好,就寻找相关博客来学习加深Ajax技术相关. 一.Ajax简介 二.同步.异步传输区别 2.1 异步传输 2.2 ...

  7. python快速入门—————附完整示例代码

    文章目录 0 背景 1 基本概念和操作 1.1 概念 1.2 操作 2 基本数据结构 2.1 list(列表) 2.2 tuple(元祖) 2.3 dict(字典): 2.4 Set(集合)----字 ...

  8. OpenCV图像处理常用手段

    点击上方"小白学视觉",选择加"星标"或"置顶" 重磅干货,第一时间送达 图像二值化操作 两种方法,全局固定阈值二值化和局部自适应阈值二值化 ...

  9. android设置列表id,学习Android绑定列表的时候提示:You must supply a resource ID for a TextView,有完整示例代码...

    这个是代码,我都是照着示例做的,但还是不行,查了一天了,到底是哪里出问题呢 万分感谢 package cn.com.ava.lesson4_fancy_view; import android.con ...

最新文章

  1. IntelliJ IDEA下自动生成Hibernate映射文件以及实体类
  2. 怎样学会单片机?先学什么,怎么入门?
  3. XIII Open Grodno SU Championship
  4. 第十四章 springboot + profile(不同环境读取不同配置)
  5. OC----内存管理
  6. Linux学习:第五章-Linux用户和用户组管理
  7. 使用 el 能否定义作用域变量_Jsp之El表达和JSTL标签库
  8. android ssl http,Android SSL HTTP请求使用自签名证书和CA
  9. 2091: [Poi2010]The Minima Game
  10. python多进程爬虫保存数据_Python多进程爬虫东方财富盘口异动数据+Python读写Mysql与Pandas读写Mysql效率对比...
  11. MySQL无法读表错误的解决方法(MySQL 1018 error)
  12. 深入理解CSS中的line-height的使用
  13. 以太坊(ethereum)开发DApp应用的入门区块链技术教程
  14. OPTEE的进程间通信(Inter-Process Communication, IPC)
  15. 2018-09-11-二手车交易平台系统
  16. 成为游戏开发程序员,要学些什么
  17. Python查询12306车票和使用selenium进行买票
  18. iphone7运行linux,我在iPhone7装入了Linux系统!
  19. flask-bootstrap 生成基模版过程中的细节
  20. 蓝海、红海指的是什么

热门文章

  1. 初始化列表||类对象作为类成员|| 静态成员
  2. C# 学习笔记(17)操作SQL Server 上
  3. C语言 匿名联合体和匿名结构体
  4. Rotate List
  5. cvtColor函数
  6. 基于BP弱分类器用Adaboost的强分类器
  7. view桌面池计算机显示错误,学员问答之3-View桌面问题
  8. sklearn学习(二)
  9. 新项目废弃oracle,oracle 12.2数据库新增参数和废弃参数
  10. mysql using btree_mysql导入数据时提示 USING BTREE 错误解决办法