Monday 28 December 2015

How to add Add Trackbar to a Specified Window in OpenCV.


This opencv tutorial is about adding a TrackBar to the specified Window:

Tracking means following the movement of something inorder to find them or know their route.
Bar-here refers to the counter.

Thus in opencv we add trackbar to the window so that we can follow the values of a particular function .

createTrackbar : create a trackbar (slider) to the specified window.

Syntax:
C++: int createTrackbar(const string& trackbarname, const string& winname, int* value, int count, TrackbarCallback onChange=0, void* userdata=0)

Parameters:
trackbarname – Name of the created trackbar.
winname – Name of the window that will be used as a parent of the created trackbar.
value – Optional pointer to an integer variable whose value reflects the position of the slider. Upon creation, the slider position is defined by this variable.
count – Maximal position of the slider. The minimal position is always 0.
onChange – Pointer to the function to be called every time the slider changes position. This function should be prototyped as void Foo(int,void*); , where the first parameter is the trackbar position and the second parameter is the user data (see the next parameter). If the callback is the NULL pointer, no callbacks are called, but only value is updated.
userdata – User data that is passed as is to the callback. It can be used to handle trackbar events without using global variables.

The function createTrackbar creates a trackbar with a particular name and range specified by the user and assigns a variable value synchronized to the slider’s position on the trackbar upon changing with onChange function is evoked.The created trackbar is displayed on the specified window winname.

Here is the Opencv Code for Adding a Trackbar to the Specified window for changing the intensity value of an Image:


//Trackbar for changing Intensity Values of an Image
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;
 
/// Global Variables
const int intensity_slider_max = 10;
int slider_value=1;

/// Matrices to store images
Mat src1,dst;

//Callback Function for Trackbar
void on_trackbar( int, void* )
{
 dst=src1/slider_value;
 imshow("Intensity Change", dst); 
}

int main( int argc, char** argv )
{
 /// Read image ( same size, same type )
 src1 = imread("C:\\Users\\arjun\\Desktop\\opencv-logo.jpg",CV_LOAD_IMAGE_COLOR);
 
 if( !src1.data ) { printf("Error loading src1 \n"); return -1; };

 /// Create Windows
 namedWindow("Intensity Change", 1);

 namedWindow("Original Image",CV_WINDOW_AUTOSIZE); 
 imshow("Original Image", src1);

 ///Create Trackbar
 createTrackbar( "Intensity", "Intensity Change", &slider_value, intensity_slider_max, on_trackbar );

 /// trackbar on_change function
 on_trackbar( slider_value, 0 );

 /// Wait until user press some key
 waitKey(0);
 return 0;
}

Sunday 27 December 2015

Opencv code for Drawing Prime Spiral-Amazing effects

This Opencv Tutorial is about drawing a Square Spiral(Prime Spiral):
In the previous tutorial we learn about drawing an Line.
http://opencv-hub.blogspot.in/2015/12/opencv-code-for-drawing-line-cplusplus.html
Thus this opencv tutorial will be an extension of that tutorial with some added mathematical logic for drawing a square spiral.
Here is the Opencv Code Below:
//Drawing a Square Spiral
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
using namespace cv;
using namespace std;
 
int main( )
{   
  int count=0,a,b=250,i,j;
  // Create black empty images
  Mat image = Mat::zeros( 500, 500, CV_8UC3 );
  int p=0;int q=1;
  for(a=250;a<500 && a>0;)
  {
    
   count++;
   if(count%2!=0)
   { 
      p++;
    j=b;
    if(p%2!=0) 
    {i=a+5*count;}
    else
    {i=a-5*count;}
 }
   else
  {
    
      q++;
    i=a;
    if(q%2==0 )
    {j=b+5*count;}
    else
    {j=b-5*count;}

    }
    // Draw a line 
  line( image, Point( a, b ), Point( i, j), Scalar( 255, 255, 0 ), 2, 8 );
  
     imshow("Image",image);
     waitKey( 100 );
  a=i;
  b=j;
 
  }
  waitKey( 0 );
  return(0);
}

Output:
Opencv Tutorial to draw Square Spiral

Monday 21 December 2015

Opencv Code for Drawing a Star

This opencv tutorial is about drawing a Star.

In the previous tutorial we learnt about drawing a LINE:
http://opencv-hub.blogspot.in/2015/12/opencv-code-for-drawing-line-cplusplus.html

Thus this opencv tutorial will be an extension of that tutorial with some added mathematical logic for drawing a star.
To begin with we first start by drawing a pentagon:
And name the vertex as a ,b ,c ,d, e.
The co-ordinates of which can be obtained by mathematical rules as explained before:

a=( 2*r*cos(36)*cos(72) , x )
b=( x-2*r*cos(36)*cos(72) , x )
c=( x , 2*r*cos(36)*sin(72) )
d=( x/2 , 0 )
e=( 0 , 2*r*cos(36)*sin(72) )



Opencv Star


Now, The magic begins.
1. Join vertex a with d.
2. Join vertex d with b.
3. Join vertex b with e.
4. Join vertex e with c.
5. Join vertex c with a.
Here is the opencv code for drawing a Star:

//Opencv Example of drawing a Star 
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <math.h>
using namespace cv;
using namespace std;
 
int main( )
{    
  double pi=3.14;
  int a=500/(1+cos(pi*54/180));
 
  // Create black empty images
  Mat image = Mat::zeros( 500, 500, CV_8UC3 );
  
  line( image, Point((a*cos(pi*72/180)), 500),  Point(250, 0), Scalar( 255, 255, 0 ), 2, 8 );
  imshow("Image",image); 
  waitKey( 500 );

  line( image, Point(250, 0), Point(500-(a*cos(pi*72/180)),500), Scalar( 255, 255, 0 ), 2, 8 );
  imshow("Image",image); 
  waitKey( 500 ); 

  line( image, Point(500-(a*cos(pi*72/180)),500), Point(0, 500-(a*sin(pi*72/180))), Scalar( 255, 255, 0 ), 2, 8 );
  imshow("Image",image); 
  waitKey( 500 );

  line( image, Point(0, 500-(a*sin(pi*72/180))), Point( 500, 500-(a*sin(pi*72/180)) ), Scalar( 255, 255, 0 ), 2, 8 );
  imshow("Image",image); 
  waitKey( 500 );

  line( image, Point( 500, 500-(a*sin(pi*72/180)) ), Point((a*cos(pi*72/180)), 500), Scalar( 255, 255, 0 ), 2, 8 );
  imshow("Image",image); 
  waitKey( 0 );
  return(0);
}

Resultant Star Image:
Opencv Code Drawing Star

Sunday 20 December 2015

Opencv Code-for-drawing-a-Line-c++

This opencv tutorial is about drawing a Line:

Syntax:
void line(Mat& img, Point pt1, Point pt2, const Scalar& color, int thickness=1, int lineType=8, int shift=0)
Parameters:
img – Image.
pt1 – First point of the line segment.
pt2 – Second point of the line segment.
color – Line color.
thickness – Line thickness.
lineType – Type of the line:
8 (or omitted) - 8-connected line.
4 - 4-connected line.
CV_AA - antialiased line.
shift – Number of fractional bits in the point coordinates.

Here is the opencv code for drawing a line:
//Opencv Code for drawing a Line
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;
 
int main( )
{    
  // Create black empty images
  Mat image = Mat::zeros( 400, 400, CV_8UC3 );
   
  // Draw a line 
  line( image, Point( 15, 20 ), Point( 70, 50), Scalar( 110, 220, 0 ),  2, 8 );
  imshow("Image",image);
 
  waitKey( 0 );
  return(0);
}

Code-for-drawing-an-ellipse-c++

This opencv tutorial is about drawing an ellipse

Syntax:
C++:
 void ellipse(Mat& img, Point center, Size axes, double angle, double startAngle, double endAngle, const Scalar& color, int thickness=1, int lineType=8, int shift=0)

Parameters:
img – Image.
center – Center of the ellipse.
axes – Half of the size of the ellipse main axes.
angle – Ellipse rotation angle in degrees.
startAngle – Starting angle of the elliptic arc in degrees.
endAngle – Ending angle of the elliptic arc in degrees.
box – Alternative ellipse representation via RotatedRect or CvBox2D. This means that the function draws an ellipse inscribed in the rotated rectangle.
color – Ellipse color.
thickness – Thickness of the ellipse arc outline, if positive. Otherwise, this indicates that a filled ellipse sector is to be drawn.
lineType – Type of the ellipse boundary. See the line() description.
shift – Number of fractional bits in the coordinates of the center and values of axes.

Note: If you want to draw the whole ellipse and not an arc, choose startAngle 0 and endAngle 360. 

Note: CIRCLE is a special case of ELLIPSE whose ECCENTRICITY is equal to ONE.
i.e whose both the axis are of equal length.

Here is the opencv code for drawing an ellipse :
//Code for drawing an ellipse
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;
int main( )
{
Mat image = Mat::zeros( 400, 400, CV_8UC3 );
//void ellipse(Mat& img, Point center, Size axes, double angle, double startAngle, double endAngle, const Scalar& color, int thickness=1*, int lineType=8*, int shift=0);
ellipse( image, Point( 200, 200 ), Size( 100.0, 150.0 ), 45, 0, 360, Scalar( 255, 0, 0 ), 3, 8 );
ellipse( image, Point( 200, 200 ), Size( 100.0, 150.0 ), 90, 0, 360, Scalar( 0,255, 0 ), 3, 8 );
ellipse( image, Point( 200, 200 ), Size( 100.0, 150.0 ), 135, 0, 360, Scalar( 0, 0, 255 ), 3, 8 );
ellipse( image, Point( 200, 200 ), Size( 100.0, 150.0 ), 180, 0, 360, Scalar( 255,255, 0 ), 3, 8 );
imshow("Image",image);
waitKey( 0 );
return(0);
}

Thursday 17 December 2015

Scanning Barcodes / QR Codes with OpenCV using ZBar

This Opencv Tutorial is about Scanning or Reading Barcode with Opencv by using ZBar Libraries

A bar code can best be described as an "optical Morse code." Or in other words it could be said as the machine readable representation of the human-readable characters. The mapping between messages and barcodes is called a symbology.

Links where we could create barcodes:
Bar-codes can be subdivided into two types, 1-Dimensional and 2-Dimensional .

1-Dimensional (1D)bar codes
It comprises of parallel lines(bars) of varying widths(thickness) and spaces used to encode information about the object it labels. The various types of 1-Dimensional bar codes are:
  • UPC Code(Universal Product Code)
  • Variations:UPC-A and UPC-E 
    1 UPC-A:It uses 12 digits codes and no other digits other than the numbers
    2. UPC-E:It uses only 6 digit codes and other digits other than the numbers
  •  EAN Code(European Article Number now renamed as International Article Number)
  • Variations: EAN-13, EAN-8, JAN-13, ISBN, ISSN
    EAN-13 barcode: It is similar to UPC-A barcode with the only visual difference lying in the position of the last digit.In EAN-13 barcode system the checksum digits lies below the barcode rather than to the right to it.EAN-13 barcode system consists of 2 groups of 6 digits each. 
    EAN-8 barcode: It is simliar to that of EAN-13 barcode system except the fact that it was introduced for the small packages where EAN-13 barcode system is not that useful.
  • Code 39:
  • Also known as code 3 of 9, initial version could encode only 39 characters,now could encode upto 43 characters.A to Z,0 to 9, and some special symbols like "$" , "/" , "," , "%" and space.Each character is encoded with five bars and four spaces.
  • Code 128:
  • It can encode all 128 ASCII characters.
  • ITF-14:
  • Also known as "Interleaved 2 of 5" is a high density bar-code symbology used for encoding only numeric digits from  0 to 9.It always codes digit pairs for e.g 01 is regarded as one pair and coded by 1 set of bars similarly for 02 and so on.Thus it should always consist of even number of digits.Whenever the data to be encoded is of odd numbers a leading 0 is added to data.
  • Codabar
  • Variations: Codeabar, Ames Code, NW-7, Monarch, Code 2 of 7, Rationalized Codabar, ANSI/AIM BC3-1995, USD-4
    It is a self checking bar-code symbology which can encode 16 different characters (consists of numbers from 0 to 9 , "$" ,"+" , "-" , ":" , ".","/" and an additional 4 start or stop characters (A,B,C,D).
  • GS1 Databar
  • Variations: GS1 DataBar Omnidirectional, Truncated, Stacked, Stacked Omnidirectional, Expanded, Expanded Stacked.
  • MSI Plessey: Also known as modified Plessey is not slf checking symbology and can encode only digits from 0 to 9.
2-Dimensional (2D) Barcode:
The  2D bar codes are similar to 1-D except  the fact that it can store more data per unit area(since it can not only store images horizontally as 1-D barcodes do but also vertically)
  • QR code:
  • DATAMATRIX Code:
  • PDF 417:
  • AZTEC:
Now,

How to scan/read these Barcodes using OPENCV?

Scanning the barcdoes with Zbar libraries are quite simple. So, What is ZBar and how to use ZBAR with OPENCV? ZBar is an open source library for reading bar codes from various sources, such as video streams, image files and raw intensity sensors. It supports many popular symbologies (types of bar codes) including EAN-13/UPC-A, UPC-E, EAN-8, Code 128, Code 39, Interleaved 2 of 5 and QR Code.So to read these barcodes we use ZBar along with OPENCV. The Steps to configure ZBar with OPENCV is as mentioned below:

1. Install Z-bar(Windows installer) from the link below:
http://sourceforge.net/projects/zbar/files/zbar/0.10/zbar-0.10-setup.exe/download
 zbar-installation-link





2. Now Open your Visual Studio : Go to File >> New>>Project.
 i. In Installed Templates Select Visual C++  
ii. Win32 Console Application
iii. Name of Project as BarcodeScanner.


Click on Next
In Additional Options : Tick Empty Project 
Finish

3. Right Click on File BarcodeScanner in Solution Explorer.

Click on Properties: The Property Pages Dialog Box would pop-up.

In "Configuration Properties" goto "VC++ Directories" then to "Include Directories"

Double click the empty box
4. Browse and Add the Path of OpenCV Include folder and ZBar Include folder

Here it is==> D:\opencv2410\build\include


Here it is ==> D:\opencv2410\build\include
Click OK and then  Apply.

5. Goto Library Directories.Add the path of the ZBar lib folder.

Here it is==> C:\Program Files\ZBar\lib

6. Goto Linker ==> Input
Then to Additional Dependencies
Add:
libzbar-0.lib
opencv_calib3d2410d.lib
opencv_contrib2410d.lib
opencv_core2410d.lib
opencv_features2d2410d.lib
opencv_flann2410d.lib
opencv_gpu2410d.lib
opencv_highgui2410d.lib
opencv_imgproc2410d.lib
opencv_legacy2410d.lib
opencv_ml2410d.lib
opencv_nonfree2410d.lib
opencv_objdetect2410d.lib
opencv_photo2410d.lib
opencv_stitching2410d.lib
opencv_superres2410d.lib
opencv_ts2410d.lib
opencv_video2410d.lib
opencv_videostab2410d.lib

Click OK

Click OK and APPLY on Property Page Dialog Box
(Note: If it doesn't work also copy all the library files under the debug folder of the current project)

7. Right Click Source Files in BarcodeScanner, goto Add , goto New Item.

Goto Visual C++ , Select C++ File(.cpp).Name it as "main" (without double inverted commas)


8. Copy the Opencv Code for Barcode Scanning as shown below:
// OpenCV Barcode Scanner  Tutorial 
// Opencv   Barcode Reader Tutorial
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include "opencv2/opencv.hpp"  
#include <iostream>
 #include "zbar.h"  
using namespace cv;
using namespace std;
 using namespace zbar;  

 int main(void){  
      ImageScanner scanner;  
      scanner.set_config(ZBAR_NONE, ZBAR_CFG_ENABLE, 1);  
       // obtain image data  
      char file[256];  
      cin>>file;  
      Mat img = imread(file,0);  
      Mat imgout;  
      cvtColor(img,imgout,CV_GRAY2RGB);  
      int width = img.cols;  
      int height = img.rows;  
   uchar *raw = (uchar *)img.data;  
   // wrap image data  
   Image image(width, height, "Y800", raw, width * height);  
   // scan the image for barcodes  
   int n = scanner.scan(image);  
   // extract results  
   for(Image::SymbolIterator symbol = image.symbol_begin();  
     symbol != image.symbol_end();  
     ++symbol) {  
                vector<Point> vp;  
     // do something useful with results  
     cout << "decoded " << symbol->get_type_name()  
        << " symbol \"" << symbol->get_data() << '"' <<" "<< endl;  
           int n = symbol->get_location_size();  
           for(int i=0;i<n;i++){  
                vp.push_back(Point(symbol->get_location_x(i),symbol->get_location_y(i))); 
           }  
           RotatedRect r = minAreaRect(vp);  
           Point2f pts[4];  
           r.points(pts);  
           for(int i=0;i<4;i++){  
                line(imgout,pts[i],pts[(i+1)%4],Scalar(255,0,0),3);  
           }  
           cout<<"Angle: "<<r.angle<<endl;  
   }  
      imshow("imgout.jpg",imgout);  
   // clean up  
   image.set_data(NULL, 0);  
       waitKey();  
 }  

Thursday 10 December 2015

Splitting and Merging various channels of a color image without using opencv split and merge functions.


This opencv tutorial is about splitting and merging the channels of an image without using opencv split and merge functions:

In the previous tutorial we split the R,G,B channels of a color image using opencv's function called split().



First of all why there is a need to split the channels of a color image?
As explained in the previous articles it helps us to guess the individual contribution of the respective channel in the color image.Also it has other application like object detection based on color recogntion i.e we can select a green object from the background and track it.

So the process of splitting color images without using split() function in opencv is as shown below:

// OpenCV Channel Splitting  Tutorial 
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
 const float pi=3.14;
 Mat src1,src2,src3,src4,src5;
 src1 = imread("C:\\Users\\arjun\\Desktop\\opencv.png",CV_LOAD_IMAGE_COLOR);
 src2 = Mat::eye(src1.rows,src1.cols, CV_8UC1);
 src3 = Mat::eye(src1.rows,src1.cols, CV_8UC1);
 src4 = Mat::eye(src1.rows,src1.cols, CV_8UC1);
 
 if( !src1.data ) { printf("Error loading src1 \n"); return -1;}

for (int i=0; i<src1.cols ; i++){
for (int j=0 ; j<src1.rows ; j++)
 { 
Vec3b color1 = src1.at<Vec3b>(Point(i,j));
Scalar color2 = src2.at<uchar>(Point(i,j));
Scalar color3 = src3.at<uchar>(Point(i,j));
Scalar color4 = src4.at<uchar>(Point(i,j));

      color2.val[0]=color1.val[0]; //Blue channel
   
   color3.val[0]=color1.val[1];  //Green Channel

   color4.val[0]=color1.val[2];  //Red Channel
    
   src2.at<uchar>(Point(i,j)) = color2.val[0];
   src3.at<uchar>(Point(i,j)) = color3.val[0];
   src4.at<uchar>(Point(i,j)) = color4.val[0];
  }
 }
namedWindow("Original Image",CV_WINDOW_AUTOSIZE); 
imshow("Original Image", src1);

namedWindow("Red Channel Image",CV_WINDOW_AUTOSIZE); 
imshow("Red Channel Image", src4);
imwrite("C:\\Users\\arjun\\Desktop\\opencv-red.png",src4);

namedWindow("Green Channel Image",CV_WINDOW_AUTOSIZE); 
imshow("Green Channel Image", src3); 
imwrite("C:\\Users\\arjun\\Desktop\\opencv-green.png",src3);

namedWindow("Blue Channel Image",CV_WINDOW_AUTOSIZE); 
imshow("Blue Channel Image", src2); 
imwrite("C:\\Users\\arjun\\Desktop\\opencv-blue.png",src2);

 waitKey(0);
 return 0;
}
Input Image:
opencv-rgb-image

Output Image:
opencv red channel split image

opencv green channel split image

opencv-blue-channel-split-image

Similarly the process of merging the channels in opencv again so that only individual color channels are displayed can be done as:

// OpenCV Channel Merging  Tutorial 
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
 const float pi=3.14;
 Mat src1,src2,src3,src4,src5;
 src1 = imread("C:\\Users\\arjun\\Desktop\\opencv.png",CV_LOAD_IMAGE_COLOR);
 src2 = Mat::eye(src1.rows,src1.cols, CV_8UC3);
 src3 = Mat::eye(src1.rows,src1.cols, CV_8UC3);
 src4 = Mat::eye(src1.rows,src1.cols, CV_8UC3);
 
 if( !src1.data ) { printf("Error loading src1 \n"); return -1;}

for (int i=0; i<src1.cols ; i++){
for (int j=0 ; j<src1.rows ; j++)
 { 
Vec3b color1 = src1.at<Vec3b>(Point(i,j));
Vec3b color2 = src2.at<Vec3b>(Point(i,j));
Vec3b color3 = src3.at<Vec3b>(Point(i,j));
Vec3b color4 = src4.at<Vec3b>(Point(i,j));

      color2.val[0]=color1.val[0]; //Blue channel
   color2.val[1]=0;
   color2.val[2]=0;

   color3.val[0]=0;             //Green Channel
   color3.val[1]=color1.val[1];
   color3.val[2]=0;

   color4.val[0]=0;             //Red Channel
   color4.val[1]=0;
   color4.val[2]=color1.val[2];
    
   src2.at<Vec3b>(Point(i,j)) = color2;
   src3.at<Vec3b>(Point(i,j)) = color3;
   src4.at<Vec3b>(Point(i,j)) = color4;
  }
 }
namedWindow("Original Image",CV_WINDOW_AUTOSIZE); 
imshow("Original Image", src1);

namedWindow("Red Channel Image",CV_WINDOW_AUTOSIZE); 
imshow("Red Channel Image", src4);
imwrite("C:\\Users\\arjun\\Desktop\\opencv-red.png",src4);

namedWindow("Green Channel Image",CV_WINDOW_AUTOSIZE); 
imshow("Green Channel Image", src3); 
imwrite("C:\\Users\\arjun\\Desktop\\opencv-green.png",src3);

namedWindow("Blue Channel Image",CV_WINDOW_AUTOSIZE); 
imshow("Blue Channel Image", src2); 
imwrite("C:\\Users\\arjun\\Desktop\\opencv-blue.png",src2);

 waitKey(0);
 return 0;
}

Input Image:
opencv rgb color-mix image

Output Image:
opencv red_channel merge function

opencv green_channel merge function

opencv blue_channel merge function

Note the difference between the two codes:
In channel splitting we have taken 8UC1 i.e a 8 bit unsigned single channel image.
In channel merging we have taken 8UC3 i.e a 8 bit unsigned three channel image.

Wednesday 2 December 2015

How to convert a color image into grayscale image without using CV_LOAD_IMAGE_GRAYSCALE

GrayScale(greyscale) image as the name suggests have  various shades of grey (ranging from black to white)present in the image.
Each pixel in grayscale image is stored as a byte i.e. 8 bits.
Thus the pixel value can range from 0 to 255.
Where 0 represents black and 255 represents white.

All the intermediate values have a shade of grey with increasing black color component as the pixel value approaches 0.
Thus the pixel values only carries the intensity information where black refers weakest intensity to that of white the strongest intensity.
Hence greyscale images are said to have only 1 channel.

The basic parameter is
                                 X=(a*R+b*G+c*B)
Depending upon the values of a,b,c we have two main methods of converting an RGB image into GreyScale.
1. Weighted average method:
Here a=b=c=1/3
         X=( R + G + B )/3
   i.e. X=(0.33*R+0.33*G+0.33*B)
Thus here have taken 33% of contribution of red,green and blue colors each.
But in reality ,based on human visual system we do not see each color as 1/3rd of its intensity.
Our eye is more sensitive to green compared to that of red and blue.
Thus there came the second method called as luminosity method.
The code and output by weighted sum method is as shown below:
// OpenCV Grayscale Image Tutorial 
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
 const float pi=3.14;
 Mat src1,src2;
 src1 = imread("C:\\Users\\arjun\\Desktop\\abcd.png",CV_LOAD_IMAGE_COLOR);
 src2 = Mat::zeros(src1.rows,src1.cols, CV_8UC1);

 if( !src1.data ) { printf("Error loading src1 \n"); return -1;}

for (int i=0; i<src1.cols ; i++){
for (int j=0 ; j<src1.rows ; j++)
 { 
Vec3b color1 = src1.at<Vec3b>(Point(i,j));
Scalar color2 = src2.at<uchar>(Point(i,j));
      color2 = (color1.val[0]+color1.val[1]+color1.val[2])/3;

   src2.at<uchar>(Point(i,j)) = color2.val[0];
  }
 }

//imwrite("C:\\Users\\arjun\\Desktop\\greyscale.jpg",src2);

namedWindow("GRAYSCALE_IMAGE",CV_WINDOW_AUTOSIZE); 
imshow("GRAYSCALE_IMAGE", src2); 

namedWindow("Original Image",CV_WINDOW_AUTOSIZE); 
imshow("Original Image", src1);

 waitKey(0);
 return 0;
}

Input Image:

Output Image:


2. Luminosity method:
X=0.3*R+0.59*G+0.11*B
Note: In OpenCV the default color ordering is BGR rather than RGB.
Refer the code below:
// OpenCV GrayScale Image  Tutorial 
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
 const float pi=3.14;
 Mat src1,src2;
 src1 = imread("C:\\Users\\arjun\\Desktop\\abcd.png",CV_LOAD_IMAGE_COLOR);
 src2 = Mat::zeros(src1.rows,src1.cols, CV_8UC1);

 if( !src1.data ) { printf("Error loading src1 \n"); return -1;}

for (int i=0; i<src1.cols ; i++){
for (int j=0 ; j<src1.rows ; j++)
 { 
Vec3b color1 = src1.at<Vec3b>(Point(i,j));
Scalar color2 = src2.at<uchar>(Point(i,j));
      color2 = (0.11*color1.val[0]+0.59*color1.val[1]+0.3*color1.val[2]);

   src2.at<uchar>(Point(i,j)) = color2.val[0];
  }
 }

//imwrite("C:\\Users\\arjun\\Desktop\\greyscale.jpg",src2);

namedWindow("GRAYSCALE_IMAGE",CV_WINDOW_AUTOSIZE); 
imshow("GRAYSCALE_IMAGE", src2); 

namedWindow("Original Image",CV_WINDOW_AUTOSIZE); 
imshow("Original Image", src1);

 waitKey(0);
 return 0;
}

Input Image:
opencv CV_LOAD_IMAGE_COLOR


Output Image:
OPENCV CV_LOAD_IMAGE_GRAYSCALE

Note: Compare the two greyscale Images and note the difference between the output of the two images.
The green component of an image in luminosity method is much lighter compared to that of the other two color components since the green channel is multiplied by 0.59 which is much much greater than that of the other two constants which are used for other two channels.

Also as we increase this factor more the more the higher intensity that particular channel would get, thus if would have multiplied it by 1 that particular channel would have appeared white in greyscale image.

Significance:
It tells us  about the luminosity (intensity component) of each image.
The size of the greyscale image is comparatively less than that of its corresponding color part(if they use the same encoding and compression factor), since it uses only a single channel to represent the information.

Sunday 29 November 2015

Digital Negative of an Image in OpenCV

Digital Negative as the name suggests inverting the pixel values of an image such that the bright pixels appear dark and dark as bright.

Thus the darkest pixel in the original image would be the brightest in that of its negative.
A good example of it can be an X-ray image.

Now, Considering an 8 bit image.
The pixel value can range from 0 to 255.
Thus to obtain the negative we need to subtract each pixel values of an image by 255.

Hence for an k-bit image.
The pixel value will range from 0 to [(2^k)-1].
Thus we would have to subtract each pixel of an image by  [(2^k)-1].

The below code is in opencv for digital negative of an 8-bit grayscale image:
// OpenCV Digital Negative Tutorial 
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
 const float pi=3.14;
 Mat src1,src2;
 src1 = imread("C:\\Users\\arjun\\Desktop\\image_opencv.jpg",CV_LOAD_IMAGE_GRAYSCALE);
 src2 = Mat::zeros(src1.rows,src1.cols, CV_8UC1);

 if( !src1.data ) { printf("Error loadind src1 \n"); return -1;}

 
for (int i=0; i<src1.cols ; i++)
{
for (int j=0 ; j<src1.rows ; j++)
 { 
 Scalar color1 = src1.at<uchar>(Point(i, j));
 Scalar color2 = src1.at<uchar>(Point(i, j));
 color2.val[0] = 255-color1.val[0];
   
 src2.at<uchar>(Point(i,j)) = color2.val[0]; 
 }
 }
namedWindow("Digital Negative Image",CV_WINDOW_AUTOSIZE); 
imshow("Digital Negative Image", src2); 
//imwrite("C:\\Users\\arjun\\Desktop\\digitalnegative.jpg",src1);

namedWindow("Original Image",CV_WINDOW_AUTOSIZE); 
imshow("Original Image", src1);

 waitKey(0);
 return 0;
}

INPUT IMAGE:
OPENCV TEST IMAGE

OUTPUT IMAGE:
opencv digital negative image


Applications:
It has various immense application in the field of medical in finding the minute details of a tissue.
Also in the field of astronomy for observing distant stars

Input Image:
opencv test input
Output:
opencv digital negative

Image Scaling(Image Shrinking) Tutorial

Refer this code for shrinking the image by a factor of two:
// OpenCV Image Shrinking Tutorial 
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
 Mat src1,src2;
 src1 = imread("C:\\Users\\arjun\\Desktop\\arj.jpg",CV_LOAD_IMAGE_COLOR);//Linux.jpg");//atom.jpg");//"Linux.jpg"); //f1.jpg
 if( !src1.data ) { printf("Error loading src1 \n"); return -1;}

 src2 = Mat::zeros((src1.rows/2)+1,(src1.cols/2)+1, CV_8UC3);
 cout<<"Shrinking factor is: 2"<<endl
 
for (float i=0; i<src1.cols ; i++)
{
  for (float j=0 ; j<src1.rows ; j++)
 { 
     Vec3b color1 = src1.at<Vec3b>(Point(i,j));
     Vec3b color2 = src2.at<Vec3b>(Point(i/2,j/2));
      color2.val[0] = color1.val[0];
      color2.val[1] = color1.val[1];
      color2.val[2] = color1.val[2];
 
      src2.at<Vec3b>(Point(i/2,j/2)) = color2;
 }
 }

namedWindow("Original Image",CV_WINDOW_AUTOSIZE); 
imshow("Original Image", src1);
//imwrite("C:\\Users\\arjun\\Desktop\\shrinked.jpg",src2);

namedWindow("Scaled Image",CV_WINDOW_AUTOSIZE); 
imshow("Scaled Image", src2); 

 waitKey(0);
 return 0;
}

Next we have done slight modifications in the code, where we take input from the user for shrinking an image by a particular factor:

// OpenCV Image Shrinking Tutorial 
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
 Mat src1,src2;
 src1 = imread("C:\\Users\\arjun\\Desktop\\arj.jpg",CV_LOAD_IMAGE_COLOR);
    if( !src1.data ) { printf("Error loading src1 \n"); return -1;}
 
 int a=-1;
     while(a<=0){
 cout<<"Enter the Shrinking factor greater than 0 "<<endl;
 cin>>a;

  if( a<=0 )
 {
  cout<<"Invalid values... Please re-enter the correct values \n \n";
 }

      }

 src2 = Mat::zeros(src1.rows/a+1,src1.cols/a+1, CV_8UC3);
 cout<<"Shrinking factor is: "<<a<<endl;
for (int i=0; i<src1.cols-1 ; i++)
{
 for (int j=0 ; j<src1.rows-1; j++)
 {
  
     Vec3b color1 = src1.at<Vec3b>(Point(i,j));
   {
  {
  Vec3b color2 = src2.at<Vec3b>(Point(i/a,j/a));
  color2.val[0] = color1.val[0];
  color2.val[1] = color1.val[1];
  color2.val[2] = color1.val[2];
  src2.at<Vec3b>(Point(i/a,j/a)) = color2;
  
  }
   }
 }
 
 }

// imwrite("C:\\Users\\arjun\\Desktop\\shrinked.jpg",src2);
 namedWindow("Original Image",CV_WINDOW_AUTOSIZE); 
 imshow("Original Image", src1);

 namedWindow("Scaled Image",CV_WINDOW_AUTOSIZE); 
imshow("Scaled Image", src2); 

 waitKey(0);
 return 0;
}

OpenCV image shrinking tutorial with the help of a trackbar:
// OpenCV Image Shrinking Tutorial with trackbar
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>

using namespace cv;
using namespace std;

Mat src1,src2;
int a=1;
const int shrink_slider_max=50;

void shrink_trackbar( int , void* )
{
 src2 = Mat::zeros(src1.rows/a+1,src1.cols/a+1, CV_8UC3);
 
for (int i=0; i<src1.cols-1 ; i++)
{
 for (int j=0 ; j<src1.rows-1; j++)
 {
  
     Vec3b color1 = src1.at<Vec3b>(Point(i,j));
   {
  {
  Vec3b color2 = src2.at<Vec3b>(Point(i/a,j/a));
  color2.val[0] = color1.val[0];
  color2.val[1] = color1.val[1];
  color2.val[2] = color1.val[2];
  src2.at<Vec3b>(Point(i/a,j/a)) = color2;
  
  }
   }
 }
 
 }
imshow("Scaled Image", src2); 
cout<<"Image Got Processed for Shrink factor="<<a<<endl;

}

int main()
{
 
 src1 = imread("C:\\Users\\arjun\\Desktop\\arj.jpg",CV_LOAD_IMAGE_COLOR);
 if( !src1.data ) { printf("Error loading src1 \n"); return -1;}
 
namedWindow("Scaled Image",CV_WINDOW_AUTOSIZE); 
createTrackbar( "Shrinking", "Scaled Image", &a,shrink_slider_max, shrink_trackbar );
shrink_trackbar( a, 0 );
// imwrite("C:\\Users\\arjun\\Desktop\\shrinked.jpg",src2);

namedWindow("Original Image",CV_WINDOW_AUTOSIZE); 
imshow("Original Image", src1);

 waitKey(0);
 return 0;
}

Thursday 26 November 2015

Image Blender using addWeighted function

The purpose of blending or mixing two images linearly can be achieved by addWeighted function provided by OpenCV.
The syntax of OpenCV addWeighted function goes as:

C++: void addWeighted(src1, alpha, src2, beta,  gamma, dst, int dtype=-1)

Parameters:
src1 – first input array.
alpha – weight of the first array elements.
src2 – second input array of the same size and channel number as src1.
beta – weight of the second array elements.
dst – output array that has the same size and number of channels as the input arrays.
gamma – scalar added to each sum.
dtype – optional depth of the output array; when both input arrays have the same depth, dtype can be set to -1, which will be equivalent to src1.depth().

Linear Blending means adding two images pixel by pixel.
Thus we can use the function
c(x)=(1-α)*a(x)+α*b(x)
where a(x) and b(x) are the two source images.
c(x) is the resultant blended image.

addWeighted( src1, alpha, src2, beta, 0.0, dst);
Thus addWeighted function performs the same thing as
dst = α*src1+β*src2+γ
Here γ=0  and   β=1-α

Why do we choose β=1-α?
Since we are having images of  8 bits.
Thus pixel value can range from 0 to 255.
Thus while  adding two images the pixel value of the resultant image should lie between 0 to 255.
Hence if we multiply a particular co-ordinate of an image by α the the other image's respective co-ordinate need to be 1-α.
Thus the sum would be α+1-α=1.
Thus the pixel value will range between 0-255.
E.g
Consider that pixel value of src1 at particular co-ordinate is 230.
And that of src2 is 215.
Now, we need to blend these two images linearly, for that we need to blend the pixel values of the two images.
Thus if we choose α=0.5 and β=0.7
The pixel value at that particular co-rodinate would be
c(x)=α*a(x)+β*b(x)
      =0.5*230+0.7*215
      =265.5
Thus β value need to be less or equal to  that of 1-α.
Hence here we have chosen it equal to 1-α to be on the safer side.But it can be less than 1-α too.

The code for it goes as below:

// OpenCV Image Blending Tutorial using addWeighted function
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
 double alpha = 0.5; 
 double beta; 
 double input;

 Mat src1, src2, dst,src3;
 /// Read image ( same size, same type )
 src1 = imread("C:\\Users\\arjun\\Desktop\\green.jpg");
 src2 = imread("C:\\Users\\arjun\\Desktop\\blue.jpg");

 if( !src1.data ) { printf("Error loading src1 \n"); return -1; }
 if( !src2.data ) { printf("Error loading src2 \n"); return -1; }
 
 ///Comparing whether the two images are of same size or not
 int width1 , width2 , height1 , height2;
 width1 =src1.cols; 
 height1=src1.rows; 
 width2 =src2.cols; 
 height2=src2.rows; 
 
 if (width1!=width2 && height1!=height2)
 {
  printf("Error:Images must be of the same size \n");
  return -1;
 }
 /// Ask the user enter alpha
 std::cout<<" Simple Linear Blender "<<std::endl;
 std::cout<<"-----------------------"<<std::endl;
 std::cout<<"* Enter alpha [0-1]: ";
 std::cin>>input;

 /// We use the alpha provided by the user if it is between 0 and 1
 if( input >= 0.0 && input <= 1.0 )
   { 
    alpha = input;
   }

 beta = ( 1.0 - alpha );
 addWeighted( src1, alpha, src2, beta, 0.0, dst);

 /// Create Windows
 namedWindow("Linear Blend", 1);
 imshow( "Linear Blend", dst );

 namedWindow("Original Image1", 1);
 imshow( "Original Image1", src1 );

 namedWindow("Original Image2", 1);
 imshow( "Original Image2", src2 );
 waitKey(0);
 return 0;
}

The above code of blending two images linearly by using Trackbar is as shown below:
// OpenCV Image bleding Tutorial using addWeighted function and trackbar
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>

using namespace cv;
using namespace std;

double alpha; 
double beta;
const int blend_slider_max = 100;
int alpha_slider;
Mat src1, src2, dst,src3;

void blend_trackbar( int , void* )
{
 alpha = (double) alpha_slider/blend_slider_max;
 beta = (double)( 1.0 - alpha );
    addWeighted( src1, alpha, src2, beta, 0.0, dst);
    imshow( "Linear Blend", dst );
}

int main()
{
 // Read image ( same size, same type )
 src1 = imread("C:\\Users\\arjun\\Desktop\\opencv_image1.jpg");
 src2 = imread("C:\\Users\\arjun\\Desktop\\opencv_image2.jpg");

 if( !src1.data ) { printf("Error loading src1 \n"); return -1; }
 if( !src2.data ) { printf("Error loading src2 \n"); return -1; }
 
 ///Comparing whether the two images are of same size or not
 int width1 , width2 , height1 , height2;
 width1 =src1.cols; 
 height1=src1.rows; 
 width2 =src2.cols; 
 height2=src2.rows; 
 
 if (width1!=width2 && height1!=height2)
 {
  printf("Error:Images must be of the same size \n");
  return -1;
 }

 // Create Windows 
 namedWindow("Linear Blend",CV_WINDOW_AUTOSIZE); 
 createTrackbar( "Blending", "Linear Blend", &alpha_slider, blend_slider_max, blend_trackbar );
 blend_trackbar( alpha_slider, 0 );

 namedWindow("Original Image1", 1);
 imshow( "Original Image1", src1 );

 namedWindow("Original Image2", 1);
 imshow( "Original Image2", src2 );

 waitKey(0);
 return 0;
}


Input Image1:
opencv image1
Input Image2:
opencv image2

Blended Image Video by changing the position on the trackbar:



Wednesday 25 November 2015

Image Scaling(Image Zooming) Tutorial

There are many times when we actually need to zoom-in an image or zoom-out an image.
The same can be done very easily in opencv.
The below code is for zooming the image by two
Refer the code below:

// OpenCV Image Zooming Tutorial 
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
 Mat src1,src2;
 src1 = imread("C:\\Users\\arjun\\Desktop\\arj.jpg",CV_LOAD_IMAGE_COLOR);
    if( !src1.data ) { printf("Error loading src1 \n"); return -1;}
 
 cout<<"src1.rows="<<src1.rows<<endl;
 cout<<"src1.cols="<<src1.cols<<endl;

  
 src2 = Mat::zeros(src1.rows*2,src1.cols*2, CV_8UC3);
 cout<<"Magnification factor is 2"<<endl;
for (int i=0; i<src1.cols ; i++)
{
 for (int j=0 ; j<src1.rows ; j++)
 {
  
     Vec3b color1 = src1.at<Vec3b>(Point(i,j));
     for(int p=0;p<2;p++)
   {
  for(int q=0;q<2;q++)
  {
  Vec3b color2 = src2.at<Vec3b>(Point(i,j));
  color2.val[0] = color1.val[0];
  color2.val[1] = color1.val[1];
  color2.val[2] = color1.val[2];

   
  src2.at<Vec3b>(Point(i*2+p,j*2+q)) = color2;
  }
   }
 }
 }

 //imwrite("C:\\Users\\arjun\\Desktop\\zoomed.jpg",src2);
 namedWindow("Original Image",CV_WINDOW_AUTOSIZE); 
 imshow("Original Image", src1);

 namedWindow("Scaled Image",CV_WINDOW_AUTOSIZE); 
 imshow("Scaled Image", src2); 

 waitKey(0);
 return 0;
}


Many times we just need not zoom the image twice,but instead mutliple times.
The code given below takes input from the user in terms of factors which we need to zoom the image.
Refer the code below:
// OpenCV Image Zooming Tutorial 
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
 Mat src1,src2;
 src1 = imread("C:\\Users\\arjun\\Desktop\\arj.jpg",CV_LOAD_IMAGE_COLOR);
    if( !src1.data ) { printf("Error loading src1 \n"); return -1;}
 
  int a;
  do
     //while(a<=0)
   {
 cout<<"Enter the Zooming factor greater than 0 "<<endl;
 cin>>a;
 cout<<"Magnification factor is: "<<a<<endl;
  if( a<=0 )
 {
  cout<<"Invalid values... Please re-enter the correct values \n";
 }
   }while(a<=0);
  
 src2 = Mat::zeros(src1.rows*a,src1.cols*a, CV_8UC3);

for (int i=0; i<src1.cols ; i++)
{
 for (int j=0 ; j<src1.rows ; j++)
 {
  
     Vec3b color1 = src1.at<Vec3b>(Point(i,j));
     for(int p=0;p<a;p++)
   {
  for(int q=0;q<a;q++)
  {
  Vec3b color2 = src2.at<Vec3b>(Point(i,j));
  color2.val[0] = color1.val[0];
  color2.val[1] = color1.val[1];
  color2.val[2] = color1.val[2];

   
  src2.at<Vec3b>(Point(i*a+p,j*a+q)) = color2;
  }
   }
 }
 }

 //imwrite("C:\\Users\\arjun\\Desktop\\zoomed.jpg",src2);
 namedWindow("Original Image",CV_WINDOW_AUTOSIZE); 
 imshow("Original Image", src1);

 namedWindow("Scaled Image",CV_WINDOW_AUTOSIZE); 
 imshow("Scaled Image", src2); 

 waitKey(0);
 return 0;
}

The same thing of zooming the image can be done by adding Trackbar to it.Thus by dragging the cursor on the Trackbar we can dynamically change the zooming factor of an image.
Refer the code below:
// OpenCV Image Zooming Tutorial with Trackbar
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>

using namespace cv;
using namespace std;

Mat src2,src1;
int a=1;
const int zoom_slider_max = 50;

void zoom_trackbar( int , void* )
{
 src2 = Mat::zeros(src1.rows*a,src1.cols*a, CV_8UC3); 
 for (int i=0; i<src1.cols ; i++)
  {
 for (int j=0 ; j<src1.rows ; j++)
 {
  
     Vec3b color1 = src1.at<Vec3b>(Point(i,j));
     for(int p=0;p<a;p++)
   {
  for(int q=0;q<a;q++)
  {
  Vec3b color2 = src2.at<Vec3b>(Point(i,j));
  color2.val[0] = color1.val[0];
  color2.val[1] = color1.val[1];
  color2.val[2] = color1.val[2];

  src2.at<Vec3b>(Point(i*a+p,j*a+q)) = color2;
   
  }
   }
 }
 }
 imshow("Scaled Image", src2); 
 cout<<"Image Got Processed for Zoom factor="<<a<<endl;
}
int main()
{
 src1 = imread("C:\\Users\\arjun\\Desktop\\arj.jpg",CV_LOAD_IMAGE_COLOR);
    if( !src1.data ) { printf("Error loading src1 \n"); }

 namedWindow("Scaled Image",CV_WINDOW_AUTOSIZE); 
 createTrackbar( "Magnify", "Scaled Image", &a, zoom_slider_max, zoom_trackbar );
 zoom_trackbar( a, 0 );

 //imwrite("C:\\Users\\arjun\\Desktop\\zoomed.jpg",src2);

 namedWindow("Original Image",CV_WINDOW_AUTOSIZE); 
 imshow("Original Image", src1);

 waitKey(0);
 return 0;
}

Output:


Thursday 19 November 2015

Flip an image in opencv by using flip function


Syntax:
C++: void flip(InputArray src, OutputArray dst, int flipCode)

Flips a 2D array around vertical, horizontal, or both axes.

Parameters:
src – input array.
dst – output array of the same size and type as src.
flipCode – a flag to specify how to flip the array.

 0 means flipping around the x-axis
 positive value (for example, 1) means flipping around y-axis
 Negative value (for example, -1) means flipping around both axes

Here is the code of flipping the image:

// OpenCV Image Flipping Tutorial 
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
  double alpha, beta; 

 Mat src1,src2;
 // Read image (same size, same type )
 src1 = imread("C:\Users\arjun\Desktop\flipsrc.jpg");//Linux.jpg");//atom.jpg");//"Linux.jpg"); //f1.jpg
 
 if( !src1.data ) { printf("Error loadind src1 n"); return -1;}

 //Create Window
 namedWindow("Original Image", CV_WINDOW_AUTOSIZE );
 imshow( "Original Image", src1 );
 
 flip(src1,src2,0);
 namedWindow("Flip-x-axis", CV_WINDOW_AUTOSIZE );
 imshow( "Flip-x-axis", src2 );
 imwrite( "C:\Users\arjun\Desktop\flipXaxis.jpg",src2);


 flip(src1,src2,1);
 namedWindow("Flip-y-axis", CV_WINDOW_AUTOSIZE );
 imshow( "Flip-y-axis", src2 );
 imwrite( "C:\Users\arjun\Desktop\flipYaxis.jpg",src2);

 flip(src1,src2,-1);
 namedWindow("Flip-z-axis", CV_WINDOW_AUTOSIZE );
 imshow( "Flip-z-axis", src2 );
 imwrite( "C:\Users\arjun\Desktop\flipZaxis.jpg",src2);

 waitKey(0);
 return 0;
 
}

Input Image:
opencv image  without flip

Output:
Flipping around the X axis
opencv Flipping around the X axis

Flipping around the Y axis
opencv Flipping around the Y axis

Flipping around both the axis
opencv Flipping around both the axis
We can also put the flip function under for loop and obtain the three flipping of an image as shown below:
// OpenCV Image Flipping Tutorial 
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
  double alpha, beta; 

 Mat src1,src2;
 // Read image (same size, same type )
 src1 = imread("C:\\Users\\arjun\\Desktop\\arj.jpg");
 
 if( !src1.data ) { printf("Error loading src1 n"); return -1;}
 
 //Create Window
 namedWindow("Original Image", CV_WINDOW_AUTOSIZE );
 imshow( "Original Image", src1 );
 for(int i=-1;i<2;i++)
 {
 flip(src1,src2,i);
 namedWindow("Flip", CV_WINDOW_AUTOSIZE );
 imshow( "Flip", src2 );
 waitKey(5000);
  }
 waitKey(0);
 return 0;
 }

Output:

Flip an Image along X,Y and both X,Y axis without using opencv flip function

Code to flip an Image along X axis:
// OpenCV Image Flipping along X axis Tutorial 
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
 Mat src1,src2;
 src1 = imread("C:\\Users\\arjun\\Desktop\\arj.jpg",CV_LOAD_IMAGE_COLOR);
 src2=src1.clone();

 if( !src1.data ) { printf("Error loading src1 \n"); return -1;}
 if( !src2.data ) { printf("Error loading src2 \n"); return -1;}

 cout<<"src1.rows="<<src1.rows<<endl;
 cout<<"src1.cols="<<src1.cols<<endl;

for (int i=0 ; i<src1.cols ; i++){
for (int j=0 ; j<src1.rows ; j++)
 {
Vec3b color2 = src1.at<Vec3b>(Point(i,j));
Vec3b color1 = src2.at<Vec3b>(Point((src1.cols-1)-i,j));
      color2.val[0] = color1.val[0];
      color2.val[1] = color1.val[1];
      color2.val[2] = color1.val[2];
   
      src1.at<Vec3b>(Point(i,j)) = color1;
 }
 }
 
namedWindow("Display Flipped Image",CV_WINDOW_AUTOSIZE); 
imshow("Display Flipped Image", src1); 
//imwrite( "C:\\Users\\arjun\\Desktop\\X-axis_flip.jpg",src1);

namedWindow("Original Image",CV_WINDOW_AUTOSIZE); 
imshow("Original Image", src2);

 waitKey(0);
 return 0;
}
Input:
opencv input image

Output:
opencv Flipping around X axis

Code to flip an Image along Y axis:
// OpenCV Image Flipping along Y axis Tutorial 
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
 Mat src1,src2;
 src1 = imread("C:\\Users\\arjun\\Desktop\\arj.jpg",CV_LOAD_IMAGE_COLOR);
 src2=src1.clone();

 if( !src1.data ) { printf("Error loading src1 \n"); return -1;}
 if( !src2.data ) { printf("Error loading src2 \n"); return -1;}

 cout<<"src1.rows="<<src1.rows<<endl;
 cout<<"src1.cols="<<src1.cols<<endl;

for (int i=0 ; i<src1.cols ; i++){
for (int j=0 ; j<src1.rows ; j++)
 {
Vec3b color2 = src1.at<Vec3b>(Point(i,j));
Vec3b color1 = src2.at<Vec3b>(Point(i,(src1.rows-1)-j));
      color2.val[0] = color1.val[0];
      color2.val[1] = color1.val[1];
      color2.val[2] = color1.val[2];
   
      src1.at<Vec3b>(Point(i,j)) = color1;
 }
 }
 
namedWindow("Display Flipped Image",CV_WINDOW_AUTOSIZE); 
imshow("Display Flipped Image", src1); 
imwrite( "C:\\Users\\arjun\\Desktop\\Y-axis_flip.jpg",src1);

namedWindow("Original Image",CV_WINDOW_AUTOSIZE); 
imshow("Original Image", src2);

 waitKey(0);
 return 0;
}

Input:
opencv flip

Output:
opencv Flipping around Y axis

Code to flip an Image along both the  axis:
// OpenCV Image Flipping along both the axis Tutorial 
#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
 Mat src1,src2;
 src1 = imread("C:\\Users\\arjun\\Desktop\\arj.jpg",CV_LOAD_IMAGE_COLOR);
 src2=src1.clone();

 if( !src1.data ) { printf("Error loading src1 \n"); return -1;}
 if( !src2.data ) { printf("Error loading src2 \n"); return -1;}

 cout<<"src1.rows="<<src1.rows<<endl;
 cout<<"src1.cols="<<src1.cols<<endl;

for (int i=0 ; i<src1.cols ; i++){
for (int j=0 ; j<src1.rows ; j++)
 {
Vec3b color2 = src1.at<Vec3b>(Point(i,j));
Vec3b color1 = src2.at<Vec3b>(Point((src1.cols-1)-i,(src1.rows-1)-j));
      color2.val[0] = color1.val[0];
      color2.val[1] = color1.val[1];
      color2.val[2] = color1.val[2];
   
      src1.at<Vec3b>(Point(i,j)) = color1;
 }
 }
 
namedWindow("Display Flipped Image",CV_WINDOW_AUTOSIZE); 
imshow("Display Flipped Image", src1); 
imwrite( "C:\\Users\\arjun\\Desktop\\Both-axis_flip.jpg",src1);

namedWindow("Original Image",CV_WINDOW_AUTOSIZE); 
imshow("Original Image", src2);

 waitKey(0);
 return 0;
}

Input:
opencv image flipping

Output:
opencv Flipping around both axis