Sunday, 29 November 2015

Digital Negative of an Image in OpenCV

Digital Negative as the name suggests inverting the pixel values of an image such that the bright pixels appear dark and dark as bright.

Thus the darkest pixel in the original image would be the brightest in that of its negative.
A good example of it can be an X-ray image.

Now, Considering an 8 bit image.
The pixel value can range from 0 to 255.
Thus to obtain the negative we need to subtract each pixel values of an image by 255.

Hence for an k-bit image.
The pixel value will range from 0 to [(2^k)-1].
Thus we would have to subtract each pixel of an image by  [(2^k)-1].

The below code is in opencv for digital negative of an 8-bit grayscale image:
// OpenCV Digital Negative Tutorial 
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
 const float pi=3.14;
 Mat src1,src2;
 src1 = imread("C:\\Users\\arjun\\Desktop\\image_opencv.jpg",CV_LOAD_IMAGE_GRAYSCALE);
 src2 = Mat::zeros(src1.rows,src1.cols, CV_8UC1);

 if( !src1.data ) { printf("Error loadind src1 \n"); return -1;}

 
for (int i=0; i<src1.cols ; i++)
{
for (int j=0 ; j<src1.rows ; j++)
 { 
 Scalar color1 = src1.at<uchar>(Point(i, j));
 Scalar color2 = src1.at<uchar>(Point(i, j));
 color2.val[0] = 255-color1.val[0];
   
 src2.at<uchar>(Point(i,j)) = color2.val[0]; 
 }
 }
namedWindow("Digital Negative Image",CV_WINDOW_AUTOSIZE); 
imshow("Digital Negative Image", src2); 
//imwrite("C:\\Users\\arjun\\Desktop\\digitalnegative.jpg",src1);

namedWindow("Original Image",CV_WINDOW_AUTOSIZE); 
imshow("Original Image", src1);

 waitKey(0);
 return 0;
}

INPUT IMAGE:
OPENCV TEST IMAGE

OUTPUT IMAGE:
opencv digital negative image


Applications:
It has various immense application in the field of medical in finding the minute details of a tissue.
Also in the field of astronomy for observing distant stars

Input Image:
opencv test input
Output:
opencv digital negative

Image Scaling(Image Shrinking) Tutorial

Refer this code for shrinking the image by a factor of two:
// OpenCV Image Shrinking Tutorial 
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
 Mat src1,src2;
 src1 = imread("C:\\Users\\arjun\\Desktop\\arj.jpg",CV_LOAD_IMAGE_COLOR);//Linux.jpg");//atom.jpg");//"Linux.jpg"); //f1.jpg
 if( !src1.data ) { printf("Error loading src1 \n"); return -1;}

 src2 = Mat::zeros((src1.rows/2)+1,(src1.cols/2)+1, CV_8UC3);
 cout<<"Shrinking factor is: 2"<<endl
 
for (float i=0; i<src1.cols ; i++)
{
  for (float j=0 ; j<src1.rows ; j++)
 { 
     Vec3b color1 = src1.at<Vec3b>(Point(i,j));
     Vec3b color2 = src2.at<Vec3b>(Point(i/2,j/2));
      color2.val[0] = color1.val[0];
      color2.val[1] = color1.val[1];
      color2.val[2] = color1.val[2];
 
      src2.at<Vec3b>(Point(i/2,j/2)) = color2;
 }
 }

namedWindow("Original Image",CV_WINDOW_AUTOSIZE); 
imshow("Original Image", src1);
//imwrite("C:\\Users\\arjun\\Desktop\\shrinked.jpg",src2);

namedWindow("Scaled Image",CV_WINDOW_AUTOSIZE); 
imshow("Scaled Image", src2); 

 waitKey(0);
 return 0;
}

Next we have done slight modifications in the code, where we take input from the user for shrinking an image by a particular factor:

// OpenCV Image Shrinking Tutorial 
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
 Mat src1,src2;
 src1 = imread("C:\\Users\\arjun\\Desktop\\arj.jpg",CV_LOAD_IMAGE_COLOR);
    if( !src1.data ) { printf("Error loading src1 \n"); return -1;}
 
 int a=-1;
     while(a<=0){
 cout<<"Enter the Shrinking factor greater than 0 "<<endl;
 cin>>a;

  if( a<=0 )
 {
  cout<<"Invalid values... Please re-enter the correct values \n \n";
 }

      }

 src2 = Mat::zeros(src1.rows/a+1,src1.cols/a+1, CV_8UC3);
 cout<<"Shrinking factor is: "<<a<<endl;
for (int i=0; i<src1.cols-1 ; i++)
{
 for (int j=0 ; j<src1.rows-1; j++)
 {
  
     Vec3b color1 = src1.at<Vec3b>(Point(i,j));
   {
  {
  Vec3b color2 = src2.at<Vec3b>(Point(i/a,j/a));
  color2.val[0] = color1.val[0];
  color2.val[1] = color1.val[1];
  color2.val[2] = color1.val[2];
  src2.at<Vec3b>(Point(i/a,j/a)) = color2;
  
  }
   }
 }
 
 }

// imwrite("C:\\Users\\arjun\\Desktop\\shrinked.jpg",src2);
 namedWindow("Original Image",CV_WINDOW_AUTOSIZE); 
 imshow("Original Image", src1);

 namedWindow("Scaled Image",CV_WINDOW_AUTOSIZE); 
imshow("Scaled Image", src2); 

 waitKey(0);
 return 0;
}

OpenCV image shrinking tutorial with the help of a trackbar:
// OpenCV Image Shrinking Tutorial with trackbar
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>

using namespace cv;
using namespace std;

Mat src1,src2;
int a=1;
const int shrink_slider_max=50;

void shrink_trackbar( int , void* )
{
 src2 = Mat::zeros(src1.rows/a+1,src1.cols/a+1, CV_8UC3);
 
for (int i=0; i<src1.cols-1 ; i++)
{
 for (int j=0 ; j<src1.rows-1; j++)
 {
  
     Vec3b color1 = src1.at<Vec3b>(Point(i,j));
   {
  {
  Vec3b color2 = src2.at<Vec3b>(Point(i/a,j/a));
  color2.val[0] = color1.val[0];
  color2.val[1] = color1.val[1];
  color2.val[2] = color1.val[2];
  src2.at<Vec3b>(Point(i/a,j/a)) = color2;
  
  }
   }
 }
 
 }
imshow("Scaled Image", src2); 
cout<<"Image Got Processed for Shrink factor="<<a<<endl;

}

int main()
{
 
 src1 = imread("C:\\Users\\arjun\\Desktop\\arj.jpg",CV_LOAD_IMAGE_COLOR);
 if( !src1.data ) { printf("Error loading src1 \n"); return -1;}
 
namedWindow("Scaled Image",CV_WINDOW_AUTOSIZE); 
createTrackbar( "Shrinking", "Scaled Image", &a,shrink_slider_max, shrink_trackbar );
shrink_trackbar( a, 0 );
// imwrite("C:\\Users\\arjun\\Desktop\\shrinked.jpg",src2);

namedWindow("Original Image",CV_WINDOW_AUTOSIZE); 
imshow("Original Image", src1);

 waitKey(0);
 return 0;
}

Thursday, 26 November 2015

Image Blender using addWeighted function

The purpose of blending or mixing two images linearly can be achieved by addWeighted function provided by OpenCV.
The syntax of OpenCV addWeighted function goes as:

C++: void addWeighted(src1, alpha, src2, beta,  gamma, dst, int dtype=-1)

Parameters:
src1 – first input array.
alpha – weight of the first array elements.
src2 – second input array of the same size and channel number as src1.
beta – weight of the second array elements.
dst – output array that has the same size and number of channels as the input arrays.
gamma – scalar added to each sum.
dtype – optional depth of the output array; when both input arrays have the same depth, dtype can be set to -1, which will be equivalent to src1.depth().

Linear Blending means adding two images pixel by pixel.
Thus we can use the function
c(x)=(1-α)*a(x)+α*b(x)
where a(x) and b(x) are the two source images.
c(x) is the resultant blended image.

addWeighted( src1, alpha, src2, beta, 0.0, dst);
Thus addWeighted function performs the same thing as
dst = α*src1+β*src2+γ
Here γ=0  and   β=1-α

Why do we choose β=1-α?
Since we are having images of  8 bits.
Thus pixel value can range from 0 to 255.
Thus while  adding two images the pixel value of the resultant image should lie between 0 to 255.
Hence if we multiply a particular co-ordinate of an image by α the the other image's respective co-ordinate need to be 1-α.
Thus the sum would be α+1-α=1.
Thus the pixel value will range between 0-255.
E.g
Consider that pixel value of src1 at particular co-ordinate is 230.
And that of src2 is 215.
Now, we need to blend these two images linearly, for that we need to blend the pixel values of the two images.
Thus if we choose α=0.5 and β=0.7
The pixel value at that particular co-rodinate would be
c(x)=α*a(x)+β*b(x)
      =0.5*230+0.7*215
      =265.5
Thus β value need to be less or equal to  that of 1-α.
Hence here we have chosen it equal to 1-α to be on the safer side.But it can be less than 1-α too.

The code for it goes as below:

// OpenCV Image Blending Tutorial using addWeighted function
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
 double alpha = 0.5; 
 double beta; 
 double input;

 Mat src1, src2, dst,src3;
 /// Read image ( same size, same type )
 src1 = imread("C:\\Users\\arjun\\Desktop\\green.jpg");
 src2 = imread("C:\\Users\\arjun\\Desktop\\blue.jpg");

 if( !src1.data ) { printf("Error loading src1 \n"); return -1; }
 if( !src2.data ) { printf("Error loading src2 \n"); return -1; }
 
 ///Comparing whether the two images are of same size or not
 int width1 , width2 , height1 , height2;
 width1 =src1.cols; 
 height1=src1.rows; 
 width2 =src2.cols; 
 height2=src2.rows; 
 
 if (width1!=width2 && height1!=height2)
 {
  printf("Error:Images must be of the same size \n");
  return -1;
 }
 /// Ask the user enter alpha
 std::cout<<" Simple Linear Blender "<<std::endl;
 std::cout<<"-----------------------"<<std::endl;
 std::cout<<"* Enter alpha [0-1]: ";
 std::cin>>input;

 /// We use the alpha provided by the user if it is between 0 and 1
 if( input >= 0.0 && input <= 1.0 )
   { 
    alpha = input;
   }

 beta = ( 1.0 - alpha );
 addWeighted( src1, alpha, src2, beta, 0.0, dst);

 /// Create Windows
 namedWindow("Linear Blend", 1);
 imshow( "Linear Blend", dst );

 namedWindow("Original Image1", 1);
 imshow( "Original Image1", src1 );

 namedWindow("Original Image2", 1);
 imshow( "Original Image2", src2 );
 waitKey(0);
 return 0;
}

The above code of blending two images linearly by using Trackbar is as shown below:
// OpenCV Image bleding Tutorial using addWeighted function and trackbar
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>

using namespace cv;
using namespace std;

double alpha; 
double beta;
const int blend_slider_max = 100;
int alpha_slider;
Mat src1, src2, dst,src3;

void blend_trackbar( int , void* )
{
 alpha = (double) alpha_slider/blend_slider_max;
 beta = (double)( 1.0 - alpha );
    addWeighted( src1, alpha, src2, beta, 0.0, dst);
    imshow( "Linear Blend", dst );
}

int main()
{
 // Read image ( same size, same type )
 src1 = imread("C:\\Users\\arjun\\Desktop\\opencv_image1.jpg");
 src2 = imread("C:\\Users\\arjun\\Desktop\\opencv_image2.jpg");

 if( !src1.data ) { printf("Error loading src1 \n"); return -1; }
 if( !src2.data ) { printf("Error loading src2 \n"); return -1; }
 
 ///Comparing whether the two images are of same size or not
 int width1 , width2 , height1 , height2;
 width1 =src1.cols; 
 height1=src1.rows; 
 width2 =src2.cols; 
 height2=src2.rows; 
 
 if (width1!=width2 && height1!=height2)
 {
  printf("Error:Images must be of the same size \n");
  return -1;
 }

 // Create Windows 
 namedWindow("Linear Blend",CV_WINDOW_AUTOSIZE); 
 createTrackbar( "Blending", "Linear Blend", &alpha_slider, blend_slider_max, blend_trackbar );
 blend_trackbar( alpha_slider, 0 );

 namedWindow("Original Image1", 1);
 imshow( "Original Image1", src1 );

 namedWindow("Original Image2", 1);
 imshow( "Original Image2", src2 );

 waitKey(0);
 return 0;
}


Input Image1:
opencv image1
Input Image2:
opencv image2

Blended Image Video by changing the position on the trackbar:



Wednesday, 25 November 2015

Image Scaling(Image Zooming) Tutorial

There are many times when we actually need to zoom-in an image or zoom-out an image.
The same can be done very easily in opencv.
The below code is for zooming the image by two
Refer the code below:

// OpenCV Image Zooming Tutorial 
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
 Mat src1,src2;
 src1 = imread("C:\\Users\\arjun\\Desktop\\arj.jpg",CV_LOAD_IMAGE_COLOR);
    if( !src1.data ) { printf("Error loading src1 \n"); return -1;}
 
 cout<<"src1.rows="<<src1.rows<<endl;
 cout<<"src1.cols="<<src1.cols<<endl;

  
 src2 = Mat::zeros(src1.rows*2,src1.cols*2, CV_8UC3);
 cout<<"Magnification factor is 2"<<endl;
for (int i=0; i<src1.cols ; i++)
{
 for (int j=0 ; j<src1.rows ; j++)
 {
  
     Vec3b color1 = src1.at<Vec3b>(Point(i,j));
     for(int p=0;p<2;p++)
   {
  for(int q=0;q<2;q++)
  {
  Vec3b color2 = src2.at<Vec3b>(Point(i,j));
  color2.val[0] = color1.val[0];
  color2.val[1] = color1.val[1];
  color2.val[2] = color1.val[2];

   
  src2.at<Vec3b>(Point(i*2+p,j*2+q)) = color2;
  }
   }
 }
 }

 //imwrite("C:\\Users\\arjun\\Desktop\\zoomed.jpg",src2);
 namedWindow("Original Image",CV_WINDOW_AUTOSIZE); 
 imshow("Original Image", src1);

 namedWindow("Scaled Image",CV_WINDOW_AUTOSIZE); 
 imshow("Scaled Image", src2); 

 waitKey(0);
 return 0;
}


Many times we just need not zoom the image twice,but instead mutliple times.
The code given below takes input from the user in terms of factors which we need to zoom the image.
Refer the code below:
// OpenCV Image Zooming Tutorial 
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
 Mat src1,src2;
 src1 = imread("C:\\Users\\arjun\\Desktop\\arj.jpg",CV_LOAD_IMAGE_COLOR);
    if( !src1.data ) { printf("Error loading src1 \n"); return -1;}
 
  int a;
  do
     //while(a<=0)
   {
 cout<<"Enter the Zooming factor greater than 0 "<<endl;
 cin>>a;
 cout<<"Magnification factor is: "<<a<<endl;
  if( a<=0 )
 {
  cout<<"Invalid values... Please re-enter the correct values \n";
 }
   }while(a<=0);
  
 src2 = Mat::zeros(src1.rows*a,src1.cols*a, CV_8UC3);

for (int i=0; i<src1.cols ; i++)
{
 for (int j=0 ; j<src1.rows ; j++)
 {
  
     Vec3b color1 = src1.at<Vec3b>(Point(i,j));
     for(int p=0;p<a;p++)
   {
  for(int q=0;q<a;q++)
  {
  Vec3b color2 = src2.at<Vec3b>(Point(i,j));
  color2.val[0] = color1.val[0];
  color2.val[1] = color1.val[1];
  color2.val[2] = color1.val[2];

   
  src2.at<Vec3b>(Point(i*a+p,j*a+q)) = color2;
  }
   }
 }
 }

 //imwrite("C:\\Users\\arjun\\Desktop\\zoomed.jpg",src2);
 namedWindow("Original Image",CV_WINDOW_AUTOSIZE); 
 imshow("Original Image", src1);

 namedWindow("Scaled Image",CV_WINDOW_AUTOSIZE); 
 imshow("Scaled Image", src2); 

 waitKey(0);
 return 0;
}

The same thing of zooming the image can be done by adding Trackbar to it.Thus by dragging the cursor on the Trackbar we can dynamically change the zooming factor of an image.
Refer the code below:
// OpenCV Image Zooming Tutorial with Trackbar
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>

using namespace cv;
using namespace std;

Mat src2,src1;
int a=1;
const int zoom_slider_max = 50;

void zoom_trackbar( int , void* )
{
 src2 = Mat::zeros(src1.rows*a,src1.cols*a, CV_8UC3); 
 for (int i=0; i<src1.cols ; i++)
  {
 for (int j=0 ; j<src1.rows ; j++)
 {
  
     Vec3b color1 = src1.at<Vec3b>(Point(i,j));
     for(int p=0;p<a;p++)
   {
  for(int q=0;q<a;q++)
  {
  Vec3b color2 = src2.at<Vec3b>(Point(i,j));
  color2.val[0] = color1.val[0];
  color2.val[1] = color1.val[1];
  color2.val[2] = color1.val[2];

  src2.at<Vec3b>(Point(i*a+p,j*a+q)) = color2;
   
  }
   }
 }
 }
 imshow("Scaled Image", src2); 
 cout<<"Image Got Processed for Zoom factor="<<a<<endl;
}
int main()
{
 src1 = imread("C:\\Users\\arjun\\Desktop\\arj.jpg",CV_LOAD_IMAGE_COLOR);
    if( !src1.data ) { printf("Error loading src1 \n"); }

 namedWindow("Scaled Image",CV_WINDOW_AUTOSIZE); 
 createTrackbar( "Magnify", "Scaled Image", &a, zoom_slider_max, zoom_trackbar );
 zoom_trackbar( a, 0 );

 //imwrite("C:\\Users\\arjun\\Desktop\\zoomed.jpg",src2);

 namedWindow("Original Image",CV_WINDOW_AUTOSIZE); 
 imshow("Original Image", src1);

 waitKey(0);
 return 0;
}

Output:


Thursday, 19 November 2015

Flip an image in opencv by using flip function


Syntax:
C++: void flip(InputArray src, OutputArray dst, int flipCode)

Flips a 2D array around vertical, horizontal, or both axes.

Parameters:
src – input array.
dst – output array of the same size and type as src.
flipCode – a flag to specify how to flip the array.

 0 means flipping around the x-axis
 positive value (for example, 1) means flipping around y-axis
 Negative value (for example, -1) means flipping around both axes

Here is the code of flipping the image:

// OpenCV Image Flipping Tutorial 
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
  double alpha, beta; 

 Mat src1,src2;
 // Read image (same size, same type )
 src1 = imread("C:\Users\arjun\Desktop\flipsrc.jpg");//Linux.jpg");//atom.jpg");//"Linux.jpg"); //f1.jpg
 
 if( !src1.data ) { printf("Error loadind src1 n"); return -1;}

 //Create Window
 namedWindow("Original Image", CV_WINDOW_AUTOSIZE );
 imshow( "Original Image", src1 );
 
 flip(src1,src2,0);
 namedWindow("Flip-x-axis", CV_WINDOW_AUTOSIZE );
 imshow( "Flip-x-axis", src2 );
 imwrite( "C:\Users\arjun\Desktop\flipXaxis.jpg",src2);


 flip(src1,src2,1);
 namedWindow("Flip-y-axis", CV_WINDOW_AUTOSIZE );
 imshow( "Flip-y-axis", src2 );
 imwrite( "C:\Users\arjun\Desktop\flipYaxis.jpg",src2);

 flip(src1,src2,-1);
 namedWindow("Flip-z-axis", CV_WINDOW_AUTOSIZE );
 imshow( "Flip-z-axis", src2 );
 imwrite( "C:\Users\arjun\Desktop\flipZaxis.jpg",src2);

 waitKey(0);
 return 0;
 
}

Input Image:
opencv image  without flip

Output:
Flipping around the X axis
opencv Flipping around the X axis

Flipping around the Y axis
opencv Flipping around the Y axis

Flipping around both the axis
opencv Flipping around both the axis
We can also put the flip function under for loop and obtain the three flipping of an image as shown below:
// OpenCV Image Flipping Tutorial 
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
  double alpha, beta; 

 Mat src1,src2;
 // Read image (same size, same type )
 src1 = imread("C:\\Users\\arjun\\Desktop\\arj.jpg");
 
 if( !src1.data ) { printf("Error loading src1 n"); return -1;}
 
 //Create Window
 namedWindow("Original Image", CV_WINDOW_AUTOSIZE );
 imshow( "Original Image", src1 );
 for(int i=-1;i<2;i++)
 {
 flip(src1,src2,i);
 namedWindow("Flip", CV_WINDOW_AUTOSIZE );
 imshow( "Flip", src2 );
 waitKey(5000);
  }
 waitKey(0);
 return 0;
 }

Output:

Flip an Image along X,Y and both X,Y axis without using opencv flip function

Code to flip an Image along X axis:
// OpenCV Image Flipping along X axis Tutorial 
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
 Mat src1,src2;
 src1 = imread("C:\\Users\\arjun\\Desktop\\arj.jpg",CV_LOAD_IMAGE_COLOR);
 src2=src1.clone();

 if( !src1.data ) { printf("Error loading src1 \n"); return -1;}
 if( !src2.data ) { printf("Error loading src2 \n"); return -1;}

 cout<<"src1.rows="<<src1.rows<<endl;
 cout<<"src1.cols="<<src1.cols<<endl;

for (int i=0 ; i<src1.cols ; i++){
for (int j=0 ; j<src1.rows ; j++)
 {
Vec3b color2 = src1.at<Vec3b>(Point(i,j));
Vec3b color1 = src2.at<Vec3b>(Point((src1.cols-1)-i,j));
      color2.val[0] = color1.val[0];
      color2.val[1] = color1.val[1];
      color2.val[2] = color1.val[2];
   
      src1.at<Vec3b>(Point(i,j)) = color1;
 }
 }
 
namedWindow("Display Flipped Image",CV_WINDOW_AUTOSIZE); 
imshow("Display Flipped Image", src1); 
//imwrite( "C:\\Users\\arjun\\Desktop\\X-axis_flip.jpg",src1);

namedWindow("Original Image",CV_WINDOW_AUTOSIZE); 
imshow("Original Image", src2);

 waitKey(0);
 return 0;
}
Input:
opencv input image

Output:
opencv Flipping around X axis

Code to flip an Image along Y axis:
// OpenCV Image Flipping along Y axis Tutorial 
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
 Mat src1,src2;
 src1 = imread("C:\\Users\\arjun\\Desktop\\arj.jpg",CV_LOAD_IMAGE_COLOR);
 src2=src1.clone();

 if( !src1.data ) { printf("Error loading src1 \n"); return -1;}
 if( !src2.data ) { printf("Error loading src2 \n"); return -1;}

 cout<<"src1.rows="<<src1.rows<<endl;
 cout<<"src1.cols="<<src1.cols<<endl;

for (int i=0 ; i<src1.cols ; i++){
for (int j=0 ; j<src1.rows ; j++)
 {
Vec3b color2 = src1.at<Vec3b>(Point(i,j));
Vec3b color1 = src2.at<Vec3b>(Point(i,(src1.rows-1)-j));
      color2.val[0] = color1.val[0];
      color2.val[1] = color1.val[1];
      color2.val[2] = color1.val[2];
   
      src1.at<Vec3b>(Point(i,j)) = color1;
 }
 }
 
namedWindow("Display Flipped Image",CV_WINDOW_AUTOSIZE); 
imshow("Display Flipped Image", src1); 
imwrite( "C:\\Users\\arjun\\Desktop\\Y-axis_flip.jpg",src1);

namedWindow("Original Image",CV_WINDOW_AUTOSIZE); 
imshow("Original Image", src2);

 waitKey(0);
 return 0;
}

Input:
opencv flip

Output:
opencv Flipping around Y axis

Code to flip an Image along both the  axis:
// OpenCV Image Flipping along both the axis Tutorial 
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
 Mat src1,src2;
 src1 = imread("C:\\Users\\arjun\\Desktop\\arj.jpg",CV_LOAD_IMAGE_COLOR);
 src2=src1.clone();

 if( !src1.data ) { printf("Error loading src1 \n"); return -1;}
 if( !src2.data ) { printf("Error loading src2 \n"); return -1;}

 cout<<"src1.rows="<<src1.rows<<endl;
 cout<<"src1.cols="<<src1.cols<<endl;

for (int i=0 ; i<src1.cols ; i++){
for (int j=0 ; j<src1.rows ; j++)
 {
Vec3b color2 = src1.at<Vec3b>(Point(i,j));
Vec3b color1 = src2.at<Vec3b>(Point((src1.cols-1)-i,(src1.rows-1)-j));
      color2.val[0] = color1.val[0];
      color2.val[1] = color1.val[1];
      color2.val[2] = color1.val[2];
   
      src1.at<Vec3b>(Point(i,j)) = color1;
 }
 }
 
namedWindow("Display Flipped Image",CV_WINDOW_AUTOSIZE); 
imshow("Display Flipped Image", src1); 
imwrite( "C:\\Users\\arjun\\Desktop\\Both-axis_flip.jpg",src1);

namedWindow("Original Image",CV_WINDOW_AUTOSIZE); 
imshow("Original Image", src2);

 waitKey(0);
 return 0;
}

Input:
opencv image flipping

Output:
opencv Flipping around both axis

Wednesday, 18 November 2015

Matlab style Initializers

Mat::zeroes
Each element of the matrix is zero of the specified size.
Mat A;
A = Mat::zeros(3, 3, CV_32F);

Mat::ones
Each element of the matrix is one of the specified size
Mat A;
A = Mat::ones(3, 3, CV_32F);

Mat::eyes
It returns an identity matrix of the specified size.
Mat A;
A = Mat::eyes(3, 3, CV_32F);

Note:
We can also mention the scale factor of the matrix.
e.g:
A = Mat::ones(3, 3, CV_32F)* 5;
Here each element of the matrix is 5, because each element of the uniy matrix is multiplied by 5

Here is the code below:

#include <opencv2/core/core.hpp>
#include <iostream>
#include <opencv2/highgui/highgui.hpp> 

using namespace cv;
using namespace std;

int main()
{
    Mat imgA = Mat::eye(5, 5, CV_8UC1);
cout << "imgA = \n " << imgA << "\n\n";

Mat imgB = Mat::ones(4, 4, CV_8UC1);
cout << "imgB = \n " << imgB << "\n\n";

Mat imgC = Mat::zeros(3,3, CV_8UC1);
cout << "imgC = \n " << imgC << "\n\n";

return 0;
}

Output:
opencv zeroes ones eyes matrix
Note:Here we have selected the single channel matrix.(CV_8UC1)
For 3 channel matrices:
Code:

#include <opencv2/core/core.hpp>
#include <iostream>
#include <opencv2/highgui/highgui.hpp> 

using namespace cv;
using namespace std;

int main()
{
    Mat imgA = Mat::eye(5, 5, CV_8UC3);
cout << "imgA = \n " << imgA << "\n\n";

Mat imgB = Mat::ones(4, 4, CV_8UC3);
cout << "imgB = \n " << imgB << "\n\n";

Mat imgC = Mat::zeros(3,3, CV_8UC3);
cout << "imgC = \n " << imgC << "\n\n";

return 0;
}

Output:
opencv zeroes ones eyes matrix 3 channel
See the difference in the output.Here the zeros,ones and eyes operator is applied only to 1 channel of the matrix.Rest of the other channel elements are taken 0.Thus two columns of 0 can be seen in between.

Also we doesn't mention the no. of channels by default it takes 1.
ie. CV_8U is equivalent to CV_8UC1.

Sunday, 15 November 2015

Creating Image from a Matrix

Ever wondered how to create an image of a specified matrix?
We know that we can get the matrix of the image file,since an image is just an array of pixels.
Similarly we can create an image by specifying the matrix and corresponding values for it.
Thus depending upon the values inserted in the matrix the image would change.

Here is the code below:

#include <opencv2/core/core.hpp>
#include <iostream>
#include <opencv2/highgui/highgui.hpp> 

using namespace cv;
using namespace std;

int main( int argc, char** argv )
{
    Mat img(4,5, CV_8UC3,Scalar(125,0,255));
    cout << "img = \n " << img << "\n\n";
 img = imwrite( "C:\\Users\\arjun\\Desktop\\newpic.jpg",img);
    img =imread("C:\\Users\\arjun\\Desktop\\newpic.jpg",CV_LOAD_IMAGE_COLOR);  
 waitKey(-1);
    return 0;
}

Output Matrix:
opencv zeroes matrix

CV_8UC3:
8U denoted 8 bit long unsigned characters
C3 denotes that there are 3 channels of an image

Note:
Any primitive type from the list can be defined by an identifier in the form CV_<bit-depth>{U|S|F}C(<number_of_channels>)
where U is unsigned integer type, S is signed integer type, and F is float type.

We can even modify the row from 4 to 6.The syntax goes as shown below:
Mat img(6,5, CV_8UC3, Scalar(125,0,255));

To see how the image created from the matrix would look, we increase the size of the matrix.
Thus making no. of rows as 600 and columns as 500.
Here is the code below:
#include <opencv2/core/core.hpp>
#include <iostream>
#include <opencv2/highgui/highgui.hpp> 

using namespace cv;
using namespace std;

int main( int argc, char** argv )
{
    Mat img(600,500, CV_8UC3,Scalar(125,0,255));
    cout << "img = \n " << img << "\n\n";
 img = imwrite( "C:\\Users\\arjun\\Desktop\\newpic.jpg",img);
    img =imread("C:\\Users\\arjun\\Desktop\\newpic.jpg",CV_LOAD_IMAGE_COLOR);  
 waitKey(-1);
    return 0;
}

The output image would look like:
opencv creating image from matrix

Similarly we can make the smallest 3 channel image of 1*1.By specifying the row attribute as 1 and column as 1 and Scalar(0,0,0).
Mat img(1,1,CV_8UC3,Scalar(0,0,0));
Note : The size of the formed jpeg image is just 631 bytes.Which is the smallest size of the 3 channel(RGB) jpeg image.
Here is the output image link:

Similary we can create the smallest size single channel jpeg image of size 1*1 pixel, by changing 8UC3 to 8UC1.Thus,
Mat img(1,1,CV_8UC1,Scalar(0,0,0));
Note: The size of the formed single channel jpeg image is just 333 bytes.
Here is the output image link:
https://drive.google.com/file/d/0B9Mnn6QWcwVnX3E4Qmo0d2k1TkU/view?usp=sharing

If we doesnt specify the scalar value.The default value is 205.
i.e Mat img(1,1,CV_8UC3);
Code:
#include <opencv2/core/core.hpp>
#include <iostream>
#include <opencv2/highgui/highgui.hpp> 

using namespace cv;
using namespace std;

int main( int argc, char** argv )
{
    Mat img(4,5, CV_8UC3);
    cout << "img = \n " << img << "\n\n";
 waitKey(0);
    return 0;
}
Here is the pic of the output matrix:
opencv matrix default values

This technique is immensely used in image steganography where the message is hidden in matrix form and an image is made out of it. 

Difference between copyTo,clone and assignment operator in OpenCV

Since  we have to deal with image of larger sizes, loading the images over and over again would decrease the computational efficiency of the program.
Thus we make use of the concept called image headers.

The basic idea is that each Mat object would have their unique headers but the matrix can be shared between the two instance by having the matrix pointers point to the same memory location.
Thus copy operator would only copy header but not the actual matrix data,thus saving the computational time.

Mat A, C;                                 // creates just the header parts
A = imread(argv[1], CV_LOAD_IMAGE_COLOR); // here we'll know the method used (allocate matrix)

Mat B(A);                                 // Use the copy constructor

C = A;                                    // Assignment operator

The interesting part here is that  Mat objects B and C just copy the Matrix headers and not the actual matrix data.Thus changing any of the matrix would reflect the change on the other.

// OpenCV copy constructor and assignment operator 
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
 Mat B,A;
 A = Mat::ones(2, 5, CV_8UC1);
 B = A;
 Mat C(A);
 cout<<"MAT A: \n"<<A<<"\n \n";
 cout<<"MAT B: \n"<<B<<"\n \n";
 cout<<"MAT C: \n"<<C<<"\n \n";
 return 0;
}
Output:
MAT A:
[1, 1, 1, 1, 1;
1, 1, 1, 1, 1]

MAT B:
[1, 1, 1, 1, 1;
 1, 1, 1, 1, 1]

MAT C:
[1, 1, 1, 1, 1;
1, 1, 1, 1, 1]


See the output of the below code:
// OpenCV copy constructor and assignment operator  
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
 Mat B,A;
 A = Mat::ones(2, 5, CV_8UC1);
 B = A;
 Mat C(A);
 B=B*5;
 cout<<"MAT A: \n"<<A<<"\n \n";
 cout<<"MAT B: \n"<<B<<"\n \n";
 cout<<"MAT C: \n"<<C<<"\n \n";
 return 0;
}
Output:
MAT A:
[5, 5, 5, 5, 5;
 5, 5, 5, 5, 5]

MAT B:
[5, 5, 5, 5, 5;
 5, 5, 5, 5, 5]

MAT C:
[5, 5, 5, 5, 5;
 5, 5, 5, 5, 5]
Note:Matrix A and C also gets changed due to change in Matrix B.

copyTo and clone:

Mat A = B.clone();
Mat C;
B.copyTo(C);

Here by changing the matrix data of Mat objects B and C wont reflect the change to that on the other matrix.For e.g if we are changing the matrix values of A then B and C would no longer be affected by that of the corresponding change in A because the copyTo and clone operator copies the actual data of the matrix not just its headers
To copy the underlying matrix of an image we use copyTo and clone operator.

Analyse the output of the below code:

// OpenCV copyTO,clone Tutorial 
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
Mat A = Mat::ones(2, 5, CV_8UC1);
Mat B=A.clone();
Mat C;
A.copyTo(C);
 cout<<"MAT A: \n"<<A<<"\n \n";
 cout<<"MAT B: \n"<<B<<"\n \n";
 cout<<"MAT C: \n"<<C<<"\n \n";
 return 0;
}

Output:
MAT A:
[1, 1, 1, 1, 1;
 1, 1, 1, 1, 1]

MAT B:
[1, 1, 1, 1, 1;
 1, 1, 1, 1, 1]

MAT C:
[1, 1, 1, 1, 1;
 1, 1, 1, 1, 1]

// OpenCV copyTO,clone Tutorial 
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
Mat A = Mat::ones(2, 5, CV_8UC1);
Mat G = Mat::eye(2, 5, CV_8UC1);
Mat B=A.clone();
B=B*5;
Mat C;
G.copyTo(C);
G=G*6;
 cout<<"MAT A: \n"<<A<<"\n \n";
 cout<<"MAT B: \n"<<B<<"\n \n";
 cout<<"MAT C: \n"<<C<<"\n \n";
 cout<<"MAT G: \n"<<G<<"\n \n";
 return 0;
}

Output:
MAT A:
[1, 1, 1, 1, 1;
 1, 1, 1, 1, 1]

MAT B:
[5, 5, 5, 5, 5;
 5, 5, 5, 5, 5]

MAT C:
[1, 0, 0, 0, 0;
 0, 1, 0, 0, 0]

MAT G:
[6, 0, 0, 0, 0;
 0, 6, 0, 0, 0]

Difference between clone() and copyTo():
Analyse the output of the two codes.

copyTo:
// OpenCV copyTo Tutorial 
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
Mat A = Mat::ones(2, 5, CV_8UC1);
cout<<"MAT A before clone opeation: \n"<<A<<"\n \n";
Mat B = A;
Mat C = Mat::eye(2, 5, CV_8UC1);
A=C.clone();
 cout<<"MAT A: \n"<<A<<"\n \n";
 cout<<"MAT B: \n"<<B<<"\n \n";
 cout<<"MAT C: \n"<<C<<"\n \n";
 return 0;
}

Output:
MAT A before clone opeation:
[1, 1, 1, 1, 1;
 1, 1, 1, 1, 1]

MAT A:
[1, 0, 0, 0, 0;
 0, 1, 0, 0, 0]

MAT B:
[1, 1, 1, 1, 1;
 1, 1, 1, 1, 1]

MAT C:
[1, 0, 0, 0, 0;
 0, 1, 0, 0, 0]

clone():
// OpenCV clone Tutorial 
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
Mat A = Mat::ones(2, 5, CV_8UC1);
cout<<"MAT A before copyTo operation: \n"<<A<<"\n \n";
Mat B = A;
Mat C = Mat::eye(2, 5, CV_8UC1);
C.copyTo(A);
 cout<<"MAT A: \n"<<A<<"\n \n";
 cout<<"MAT B: \n"<<B<<"\n \n";
 cout<<"MAT C: \n"<<C<<"\n \n";
 return 0;
}
Output:
MAT A before copyTo operation:
[1, 1, 1, 1, 1;
 1, 1, 1, 1, 1]

MAT A:
[1, 0, 0, 0, 0;
 0, 1, 0, 0, 0]

MAT B:
[1, 0, 0, 0, 0;
 0, 1, 0, 0, 0]

MAT C:
[1, 0, 0, 0, 0;
 0, 1, 0, 0, 0]

Thus by comparing the output of copyTo() and clone() we see that when the destination matrix and the source matrix have the same type and size, copyTo will not change the address of the destination matrix, while clone will always allocate a new address for the destination matrix.

Thursday, 12 November 2015

Blending two Images/Merging two Images/Adding two image

Image are basically matrices of pixel values.Thus Image Blending or Image Merging in layman terms simply means that  adding the pixel values at a particular co-ordinates of two images.
Hence here images should be of the same size.

For e.g if the pixel value of two gray-scale images are 120 and 35 respectively.Then after blending the pixel value at that particular co-ordinate would become 155.

Note:
Grayscale image is the one where each pixel is stored as a single byte(8 bits).
Thus the pixel values can range from 0 to 255.
Where 0 denotes black and 255 denotes white.

So what would happen if the pixel value of the two gray-scale images when merged exceed 255.
For e.g Let the pixel value at the particular co-ordinate is 250 (would appear white)and that of the other image at the same co-ordinate is 120 (would appear dark).
After merging it would seem dark because the pixel value at the respective co-ordinate of the merged image would be 255.Since 120+240>255)

Thus the lowest value of the pixel is 0. So even if we multiply a pixel value by -1.
The pixel value of the modified image would be 0.
i.e. If the pixel value at a particular co-ordinate is 255 (white) and if we multiply it by -1.
      Then the pixel at that point of image would become 0 (black).

We can check the above concept by accessing the pixel value of the merged image at a particular point.Refer:
http://opencvhub.blogspot.in/2015/06/accessing-pixel-value-of-image-using-vec3b-function-obtain-coordinates-get-location-point-display-opencv.html

Note:We can merge more than 2 images also.

The code for merging/blending the two images are as shown below:

#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>


using namespace cv;
using namespace std;

int main()
{
 
 Mat src1, src2, src3;
 /// Read image ( same size, same type )
 src1 = imread("C:\\Users\\arjun\\Desktop\\red.jpg");
 src2 = imread("C:\\Users\\arjun\\Desktop\\green.jpg");
 
 ///Comparing whether the two images are of same size or not
 int width1 , width2 , height1 , height2;
 width1 =src1.cols; 
 height1=src1.rows; 
 width2 =src2.cols; 
 height2=src2.rows; 
 
 if (width1!=width2 && height1!=height2)
 {
  printf("Error:Images must be of the same size \n");
  return -1;
 }
 
 //Merging two images
 src3=src1 + src2;

 if( !src1.data ) { printf("Error loading src1 \n"); return -1; }
 if( !src2.data ) { printf("Error loading src2 \n"); return -1; }
 if( !src3.data ) { printf("Error loading src1 \n"); return -1; }
  
 /// Create Windows
 namedWindow("First Image", 1);
 imshow( "First Image", src1 );

 namedWindow("Second Image", 1);
 imshow( "Second Image", src2 );

 namedWindow("Blend1 Image", 1);
 imshow( "Blend1 Image", src3 );

 waitKey(0);
 return 0;
}

Input Images:
Red
opencv red image

Green
opencv green image

Output Image:
opencv image blending red and green

For 3 images:
The modified code of merging the images is as shown below, where we have taken 3 images  of blue,green and red color and mixed them which other:
Note:Here we have assumed that the images are of same size.Hence we have not included the code for comparing the size of images which are to be merged

#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>


using namespace cv;
using namespace std;

int main()
{
 
 Mat src1, src2, src3,src4, src5, src6, src7;
 /// Read image ( same size, same type )
 src1 = imread("C:\\Users\\arjun\\Desktop\\red.jpg");
 src2 = imread("C:\\Users\\arjun\\Desktop\\green.jpg");
 src3  = imread("C:\\Users\\arjun\\Desktop\\blue.jpg");


 //Merging two images
 src4=src1 + src2;
 src5=src2 + src3;
 src6=src1 + src3;
 src7=src1 + src2 + src3;

 
 if( !src1.data ) { printf("Error loading src1 \n"); return -1; }
 if( !src2.data ) { printf("Error loading src2 \n"); return -1; }
 if( !src3.data ) { printf("Error loading src1 \n"); return -1; }
 if( !src4.data ) { printf("Error loading src1 \n"); return -1; }
 if( !src5.data ) { printf("Error loading src1 \n"); return -1; }
 if( !src6.data ) { printf("Error loading src1 \n"); return -1; }
 if( !src7.data ) { printf("Error loading src1 \n"); return -1; }
  
 //src4 = imwrite( "C:\\Users\\arjun\\Desktop\\new1.jpg",src4);
 //src5 = imwrite( "C:\\Users\\arjun\\Desktop\\new2.jpg",src5);
 //src6 = imwrite( "C:\\Users\\arjun\\Desktop\\new3.jpg",src6);
 //src7 = imwrite( "C:\\Users\\arjun\\Desktop\\new4.jpg",src7);
 //src4  = imread("C:\\Users\\arjun\\Desktop\\new1.jpg");
 //src5  = imread("C:\\Users\\arjun\\Desktop\\new2.jpg");
 //src6  = imread("C:\\Users\\arjun\\Desktop\\new3.jpg");
 //src7  = imread("C:\\Users\\arjun\\Desktop\\new4.jpg");
 
 /// Create Windows
 namedWindow("First Image", 1);
 imshow( "First Image", src1 );

 namedWindow("Second Image", 1);
 imshow( "Second Image", src2 );

 namedWindow("Third Image", 1);
 imshow( "Third Image", src3 );

 namedWindow("Blend1 Image", 1);
 imshow( "Blend1 Image", src4 );

 namedWindow("Blend2 Image", 1);
 imshow( "Blend2 Image", src5 );

 namedWindow("Blend3 Image", 1);
 imshow( "Blend3 Image", src6 );

 namedWindow("Blend4 Image", 1);
 imshow( "Blend4 Image", src7 );

 waitKey(0);
 return 0;
}

Input Image:
Red
opencv blending red

Green
opencv blending green

Blue
opencv blending blue


Red+Green
opencv image blending tutorial red and green

Green+Blue
opencv image blending tutorial blue and green

Red+Blue
opencv image blending tutorial red and blue

Multiplying Pixel Value By -1:

What would happen if we multiply the pixels by -1.
Since the minimum value of the pixel can be 0.Thus whole of the image would appear black.
Note:Here again we have assumed that the images are of same size.Hence we have not included the code for comparing the size of images which are to be merged

#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>


using namespace cv;
using namespace std;

int main()
{
 
 Mat src1, src2;
 /// Read image ( same size, same type )
 src1 = imread("C:\\Users\\arjun\\Desktop\\red.jpg");
 
 //Merging two images
 src2=src1 * (-1);
 
 if( !src1.data ) { printf("Error loading src1 \n"); return -1; }
 if( !src2.data ) { printf("Error loading src2 \n"); return -1; }
 
 //src2 = imwrite( "C:\\Users\\arjun\\Desktop\\new1.jpg",src2);
 //src2  = imread("C:\\Users\\arjun\\Desktop\\new1.jpg");

 /// Create Windows
 namedWindow("First Image", 1);
 imshow( "First Image", src1 );

 namedWindow("Second Image", 1);
 imshow( "Second Image", src2 );

 waitKey(0);
 return 0;
}

Output:
opencv image result when mutiplied by -1 is black

Think:
By including a for loop we can achieve a smooth transition effect between two images.Can't We?
Here is the code for it:

#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>


using namespace cv;
using namespace std;

int main()
{
 
 Mat src1, src2,src3;
 /// Read image ( same size, same type )
 src1 = imread("C:\\Users\\arjun\\Desktop\\red.jpg");
 src2 = imread("C:\\Users\\arjun\\Desktop\\green.jpg");

 //Checking whether images are loaded or not
 if( !src1.data ) { printf("Error loading src1 \n"); return -1; }
 if( !src2.data ) { printf("Error loading src2 \n"); return -1; }

 //Merging two images
 for (double i=0; i<=255;i=i+5)
 {
 src3=(i*src1)/255 + ((255-i)*src2)/255;

 /// Create Windows
 namedWindow("Blend Image", 1);
 imshow( "Blend Image", src3 );
 
 waitKey(250);
 }
 return 0;
}

Here is the Output:
You could note the smooth blending between the two images