-1

Maybe the problem is, that I'm not good at English. I am new to openCV. I want know area which stitcher merge. like this↓

image

to

merged image

Spektre
  • 49,595
  • 11
  • 110
  • 380
  • 1
    Provide more details please. – Piglet Mar 04 '16 at 14:46
  • see [Wiki: autostitch](https://en.wikipedia.org/wiki/AutoStitch) how it works. When you run the stitching algo you will obtain transform matrix for each image so convert that to a polygon and compute the intersection/overlap of them as geometric problem. – Spektre Mar 07 '16 at 08:04

1 Answers1

0

If you know the order in which your image is taken then you may follow this code for stitching your images together. If the order is not known then the solution becomes more complex. Also, this code is designed for images of same size, if your camera is shifted it may result in some erroneous result.Implement some checks for proper understanding. You may refer to this article "http://ramsrigoutham.com/2012/11/22/panorama-image-stitching-in-opencv/"for much proper understanding of the stitching function that has been called twice in main.

#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/imgproc/imgproc.hpp"
using namespace cv;
void stitching( cv::Mat&,cv::Mat& ,cv::Mat& );
 int main()
{        
 Mat image1= imread("image1.jpg");
 Mat image2= imread("image2.jpg");
 Mat image3= imread("image3.jpg");
 Mat gray_image1;
 Mat gray_image2;
 Mat gray_image3;
 Mat result1,result2;
 // Convert to Grayscale
 cvtColor( image1, gray_image1, CV_RGB2GRAY );
 cvtColor( image2, gray_image2, CV_RGB2GRAY );
 cvtColor( image3, gray_image3, CV_RGB2GRAY );

 stitching(gray_image1,gray_image2,result1);
 stitching(result1,gray_image3,result2);
 cv::imshow("stitched image"result2);
cv::WaitKey(0);


 }


  void stitching( cv::Mat& im1,cv::Mat& im2,cv::Mat& stitch_im)
  {  
 int minHessian = 400;

SurfFeatureDetector detector( minHessian );

std::vector< KeyPoint > keypoints_object, keypoints_scene;

detector.detect(im1, keypoints_object );
 detector.detect(im2, keypoints_scene );

 SurfDescriptorExtractor extractor;

Mat descriptors_object, descriptors_scene;

extractor.compute( im1, keypoints_object, descriptors_object );
extractor.compute( im2, keypoints_scene, descriptors_scene );

 FlannBasedMatcher matcher;
 std::vector< DMatch > matches;
 matcher.match( descriptors_object, descriptors_scene, matches );

double max_dist = 0; double min_dist = 100;

 for( int i = 0; i < descriptors_object.rows; i++ )
 { double dist = matches[i].distance;
 if( dist < min_dist ) min_dist = dist;
 if( dist > max_dist ) max_dist = dist;
 }

 std::vector< DMatch > good_matches;

for( int i = 0; i < descriptors_object.rows; i++ )
 { if( matches[i].distance < 3*min_dist )
 { good_matches.push_back( matches[i]); }
 }
 std::vector< Point2f > obj;
 std::vector< Point2f > scene;

for( int i = 0; i < good_matches.size(); i++ )
 {

 obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
 scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
 }


 Mat H = findHomography( obj, scene, CV_RANSAC );

 cv::Mat result;
 warpPerspective(im1,stitch_im,H,cv::Size(im1.cols+im2.cols,im1.rows));



 }
Nikita Chopra
  • 440
  • 9
  • 22