0

I am struggling with the problem as IN the image that I have captured from camera , I need to find the existence and location of some patterns.

For this I found to use matchTemplate method of opencv.I used the images used in the sample of opencv and wrote the code but the result is different.

http://opencv.itseez.com/doc/tutorials/imgproc/histograms/template_matching/template_matching.html

This is the link which tell us about matchTemplate.

When I implemented it it shows the result:-

enter image description here

My code is below:-

-(void)matchPatchNet
{
    IplImage    *res;
    CvPoint     minloc, maxloc;
    double      minval, maxval;
    int         img_width, img_height;
    int         tpl_width, tpl_height;
    int         res_width, res_height;


    NSString *pathPatron = [[NSBundle mainBundle] pathForResource:@"timage" ofType:@"jpg"];

    UIImage *tim = [UIImage imageWithContentsOfFile:pathPatron];
    NSString *pathPatron2 = [[NSBundle mainBundle] pathForResource:@"simage" ofType:@"jpg"];

    UIImage *tim2 = [UIImage imageWithContentsOfFile:pathPatron2];

    IplImage *img = [self CreateIplImageFromUIImage:tim2];//

    IplImage *tpl = [self CreateIplImageFromUIImage:tim];
    cv::Mat forground1 = [tim2 CVMat];

    cv::Mat forground2 = [tim CVMat];
    img_width  = img->width;
    img_height = img->height;
    tpl_width  = tpl->width;
    tpl_height = tpl->height;
    res_width  = img_width - tpl_width + 1;
    res_height = img_height - tpl_height + 1;    
    res = cvCreateImage( cvSize( res_width, res_height ), IPL_DEPTH_32F, 1 );

    cvMatchTemplate( img, tpl, res, CV_TM_CCOEFF_NORMED );

    UIImage *ipala=[self UIImageFromIplImage:res];
    cv::Mat forground3 = [ipala CVMat];
    cv::normalize(forground3, forground3, 0, 1, cv::NORM_MINMAX, CV_8UC1);

    cvMinMaxLoc( res, &minval, &maxval, &minloc, &maxloc, 0 );    

    cvRectangle( img, 
                cvPoint( maxloc.x, maxloc.y ), 
                cvPoint( maxloc.x + tpl_width, maxloc.y + tpl_height ),
                cvScalar( 0, 255, 0, 0 ), 1, 0, 0 ); 
    /* display images */
    self.imageView.image = [self UIImageFromIplImage:img]; 
    cvReleaseImage(&img);
    cvReleaseImage(&tpl);
    cvReleaseImage(&res);
}

Please tell me what am I doing wrong .Please help me.

Thanks in advance

Gypsa
  • 11,230
  • 6
  • 44
  • 82

1 Answers1

6

I strongly suggest you to use the C++ interface and the current docs, which you'll find here: OpenCV v2.4.2 documentation

Get the lastest Version of OpenCV for iOS here: OpenCV for iOS and drop it into your project and include this into your project prefixes:

ExampleApp-Prefix.pch:

#ifdef __cplusplus
    #import <opencv2/opencv.hpp>
#endif

Use this to "convert" UIImages to cv::Mats:

UIImageCVMatConverter.h:

//
//  UIImageCVMatConverter.h
//

#import <Foundation/Foundation.h>

@interface UIImageCVMatConverter : NSObject {

}

+ (UIImage *)UIImageFromCVMat:(cv::Mat)cvMat;
+ (UIImage *)UIImageFromCVMat:(cv::Mat)cvMat withUIImage:(UIImage*)image;
+ (cv::Mat)cvMatFromUIImage:(UIImage *)image;
+ (cv::Mat)cvMatGrayFromUIImage:(UIImage *)image;
+ (UIImage *)scaleAndRotateImageFrontCamera:(UIImage *)image;
+ (UIImage *)scaleAndRotateImageBackCamera:(UIImage *)image;

@end

UIImageCVMatConverter.mm:

//
//  UIImageCVMatConverter.m
//

#import "UIImageCVMatConverter.h"

@implementation UIImageCVMatConverter

+ (UIImage *)UIImageFromCVMat:(cv::Mat)cvMat withUIImage:(UIImage*)image;
{
  CGColorSpaceRef colorSpace = CGImageGetColorSpace( image.CGImage );
    CGFloat cols = image.size.width;
    CGFloat rows = image.size.height;
    CGFloat widthStep = image.size.width;
    CGContextRef contextRef = CGBitmapContextCreate( NULL, cols, rows, 8, widthStep*4, colorSpace, kCGImageAlphaNoneSkipLast | kCGBitmapByteOrderDefault );
    CGContextDrawImage( contextRef, CGRectMake(0, 0, cols, rows), image.CGImage );
    CGContextSetRGBStrokeColor( contextRef, 1, 0, 0, 1 );
    CGImageRef cgImage = CGBitmapContextCreateImage( contextRef );
    UIImage* result = [UIImage imageWithCGImage:cgImage];
    CGImageRelease( cgImage );
    CGContextRelease( contextRef );
    CGColorSpaceRelease( colorSpace );
    return result;
}

+(UIImage *)UIImageFromCVMat:(cv::Mat)cvMat
{
    NSData *data = [NSData dataWithBytes:cvMat.data length:cvMat.elemSize()*cvMat.total()];
    CGColorSpaceRef colorSpace;
    if ( cvMat.elemSize() == 1 ) {
        colorSpace = CGColorSpaceCreateDeviceGray();
    }
    else {
        colorSpace = CGColorSpaceCreateDeviceRGB();
    }
    CGDataProviderRef provider = CGDataProviderCreateWithCFData( (__bridge CFDataRef)data );
    CGImageRef imageRef = CGImageCreate( cvMat.cols, cvMat.rows, 8, 8 * cvMat.elemSize(), cvMat.step[0], colorSpace, kCGImageAlphaNone|kCGBitmapByteOrderDefault, provider, NULL, false, kCGRenderingIntentDefault );
    UIImage *finalImage = [UIImage imageWithCGImage:imageRef];
    CGImageRelease( imageRef );
    CGDataProviderRelease( provider );
    CGColorSpaceRelease( colorSpace );
    return finalImage;
}

+ (cv::Mat)cvMatFromUIImage:(UIImage *)image
{
    CGColorSpaceRef colorSpace = CGImageGetColorSpace( image.CGImage );
    CGFloat cols = image.size.width;
    CGFloat rows = image.size.height;
    cv::Mat cvMat( rows, cols, CV_8UC4 );
    CGContextRef contextRef = CGBitmapContextCreate( cvMat.data, cols, rows, 8, cvMat.step[0], colorSpace, kCGImageAlphaNoneSkipLast | kCGBitmapByteOrderDefault );
    CGContextDrawImage( contextRef, CGRectMake(0, 0, cols, rows), image.CGImage );
    CGContextRelease( contextRef );
    CGColorSpaceRelease( colorSpace );
    return cvMat;
}

+ (cv::Mat)cvMatGrayFromUIImage:(UIImage *)image
{
  cv::Mat cvMat = [UIImageCVMatConverter cvMatFromUIImage:image];
  cv::Mat grayMat;
    if ( cvMat.channels() == 1 ) {
        grayMat = cvMat;
  }
    else {
        grayMat = cv :: Mat( cvMat.rows,cvMat.cols, CV_8UC1 );
        cv::cvtColor( cvMat, grayMat, CV_BGR2GRAY );
    }
  return grayMat;
}

+ (UIImage *)scaleAndRotateImageBackCamera:(UIImage *)image
{
  static int kMaxResolution = 640;
  CGImageRef imgRef = image.CGImage;
  CGFloat width = CGImageGetWidth( imgRef );
  CGFloat height = CGImageGetHeight( imgRef );
  CGAffineTransform transform = CGAffineTransformIdentity;
  CGRect bounds = CGRectMake( 0, 0, width, height );
  if ( width > kMaxResolution || height > kMaxResolution ) {
    CGFloat ratio = width/height;
    if ( ratio > 1 ) {
      bounds.size.width = kMaxResolution;
      bounds.size.height = bounds.size.width / ratio;
    }
        else {
      bounds.size.height = kMaxResolution;
      bounds.size.width = bounds.size.height * ratio;
    }
  }
  CGFloat scaleRatio = bounds.size.width / width;
  CGSize imageSize = CGSizeMake( CGImageGetWidth(imgRef), CGImageGetHeight(imgRef) );
  CGFloat boundHeight;
  UIImageOrientation orient = image.imageOrientation;
  switch( orient ) {
    case UIImageOrientationUp:
      transform = CGAffineTransformIdentity;
      break;
    case UIImageOrientationUpMirrored:
      transform = CGAffineTransformMakeTranslation(imageSize.width, 0.0);
      transform = CGAffineTransformScale(transform, -1.0, 1.0);
      break;
    case UIImageOrientationDown:
      transform = CGAffineTransformMakeTranslation(imageSize.width, imageSize.height);
      transform = CGAffineTransformRotate(transform, M_PI);
      break;
    case UIImageOrientationDownMirrored:
      transform = CGAffineTransformMakeTranslation(0.0, imageSize.height);
      transform = CGAffineTransformScale(transform, 1.0, -1.0);
      break;
    case UIImageOrientationLeftMirrored:
      boundHeight = bounds.size.height;
      bounds.size.height = bounds.size.width;
      bounds.size.width = boundHeight;
      transform = CGAffineTransformMakeTranslation(imageSize.height, imageSize.width);
      transform = CGAffineTransformScale(transform, -1.0, 1.0);
      transform = CGAffineTransformRotate(transform, 3.0 * M_PI / 2.0);
      break;
    case UIImageOrientationLeft:
      boundHeight = bounds.size.height;
      bounds.size.height = bounds.size.width;
      bounds.size.width = boundHeight;
      transform = CGAffineTransformMakeTranslation(0.0, imageSize.width);
      transform = CGAffineTransformRotate(transform, 3.0 * M_PI / 2.0);
      break;
    case UIImageOrientationRightMirrored:
      boundHeight = bounds.size.height;
      bounds.size.height = bounds.size.width;
      bounds.size.width = boundHeight;
      transform = CGAffineTransformMakeScale(-1.0, 1.0);
      transform = CGAffineTransformRotate(transform, M_PI / 2.0);
      break;
    case UIImageOrientationRight:
      boundHeight = bounds.size.height;
      bounds.size.height = bounds.size.width;
      bounds.size.width = boundHeight;
      transform = CGAffineTransformMakeTranslation(imageSize.height, 0.0);
      transform = CGAffineTransformRotate(transform, M_PI / 2.0);
      break;
    default:
      [NSException raise:NSInternalInconsistencyException format:@"Invalid image orientation"];
  }
  UIGraphicsBeginImageContext( bounds.size );
  CGContextRef context = UIGraphicsGetCurrentContext();
  if ( orient == UIImageOrientationRight || orient == UIImageOrientationLeft ) {
    CGContextScaleCTM( context, -scaleRatio, scaleRatio );
    CGContextTranslateCTM( context, -height, 0 );
  }
    else {
    CGContextScaleCTM( context, scaleRatio, -scaleRatio );
    CGContextTranslateCTM( context, 0, -height );
  }
  CGContextConcatCTM( context, transform );
  CGContextDrawImage( UIGraphicsGetCurrentContext(), CGRectMake(0, 0, width, height), imgRef );
  UIImage *returnImage = UIGraphicsGetImageFromCurrentImageContext();
  UIGraphicsEndImageContext();
  return returnImage;
}

+ (UIImage *)scaleAndRotateImageFrontCamera:(UIImage *)image
{
  static int kMaxResolution = 640;
  CGImageRef imgRef = image.CGImage;
  CGFloat width = CGImageGetWidth(imgRef);
  CGFloat height = CGImageGetHeight(imgRef);
  CGAffineTransform transform = CGAffineTransformIdentity;
  CGRect bounds = CGRectMake( 0, 0, width, height);
  if (width > kMaxResolution || height > kMaxResolution) {
    CGFloat ratio = width/height;
    if (ratio > 1) {
      bounds.size.width = kMaxResolution;
      bounds.size.height = bounds.size.width / ratio;
    } else {
      bounds.size.height = kMaxResolution;
      bounds.size.width = bounds.size.height * ratio;
    }
  }

  CGFloat scaleRatio = bounds.size.width / width;
  CGSize imageSize = CGSizeMake(CGImageGetWidth(imgRef), CGImageGetHeight(imgRef));
  CGFloat boundHeight;
  UIImageOrientation orient = image.imageOrientation;
  switch(orient) {
    case UIImageOrientationUp:
      transform = CGAffineTransformIdentity;
      break;
    case UIImageOrientationUpMirrored:
      transform = CGAffineTransformMakeTranslation(imageSize.width, 0.0);
      transform = CGAffineTransformScale(transform, -1.0, 1.0);
      break;
    case UIImageOrientationDown:
      transform = CGAffineTransformMakeTranslation(imageSize.width, imageSize.height);
      transform = CGAffineTransformRotate(transform, M_PI);
      break;
    case UIImageOrientationDownMirrored:
      transform = CGAffineTransformMakeTranslation(0.0, imageSize.height);
      transform = CGAffineTransformScale(transform, 1.0, -1.0);
      break;
    case UIImageOrientationLeftMirrored:
      boundHeight = bounds.size.height;
      bounds.size.height = bounds.size.width;
      bounds.size.width = boundHeight;
      transform = CGAffineTransformMakeTranslation(imageSize.height, imageSize.width);
      transform = CGAffineTransformScale(transform, -1.0, 1.0);
      transform = CGAffineTransformRotate(transform, 3.0 * M_PI / 2.0);
      break;
    case UIImageOrientationLeft:
      boundHeight = bounds.size.height;
      bounds.size.height = bounds.size.width;
      bounds.size.width = boundHeight;
      transform = CGAffineTransformMakeTranslation(0.0, imageSize.width);
      transform = CGAffineTransformRotate(transform, 3.0 * M_PI / 2.0);
      break;
        case UIImageOrientationRight:
    case UIImageOrientationRightMirrored:
      boundHeight = bounds.size.height;
      bounds.size.height = bounds.size.width;
      bounds.size.width = boundHeight;
      transform = CGAffineTransformMakeScale(-1.0, 1.0);
      transform = CGAffineTransformRotate(transform, M_PI / 2.0);
      break;
        default:
      [NSException raise:NSInternalInconsistencyException format:@"Invalid image orientation"];
  }
  UIGraphicsBeginImageContext( bounds.size );
  CGContextRef context = UIGraphicsGetCurrentContext();
  if ( orient == UIImageOrientationRight || orient == UIImageOrientationLeft ) {
    CGContextScaleCTM(context, -scaleRatio, scaleRatio);
    CGContextTranslateCTM(context, -height, 0);
  }
    else {
    CGContextScaleCTM(context, scaleRatio, -scaleRatio);
    CGContextTranslateCTM(context, 0, -height);
  }
  CGContextConcatCTM( context, transform );
  CGContextDrawImage( UIGraphicsGetCurrentContext(), CGRectMake(0, 0, width, height), imgRef );
  UIImage *returnImage = UIGraphicsGetImageFromCurrentImageContext();
  UIGraphicsEndImageContext();
  return returnImage;
}

@end

This is code I used to find several marker inside an image and push their center points into a std::vector:

#import "UIImageCVMatConverter.h"

...

cv::Mat src_img;
cv::Mat result_mat;
cv::Mat debug_img;
cv::Mat template_img;

NSArray *markerImages = [NSArray arrayWithObjects:@"marker-1.png", nil];

std::vector<cv::Point> markerPoints;

// input image
src_img = [UIImageCVMatConverter cvMatFromUIImage:cameriaInputImage];
cv::cvtColor(src_img, debug_img, CV_GRAY2BGR);

for (NSString *marker in markerImages) {
    template_img = [UIImageCVMatConverter cvMatFromUIImage:[UIImage imageNamed:marker]];
    cv::cvtColor(template_img, template_img, CV_GRAY2BGR);

    int match_method = CV_TM_CCORR_NORMED;

    cv::matchTemplate(src_img, template_img, result_mat, match_method);

    cv::normalize(result_mat, result_mat, 0, 1, cv::NORM_MINMAX, -1, cv::Mat());

    double minVal;
    double maxVal;

    cv::Point minLoc, maxLoc, matchLoc;
    cv::minMaxLoc(result_mat, &minVal, &maxVal, &minLoc, &maxLoc, cv::Mat() );
    if ( match_method  == CV_TM_SQDIFF || match_method == CV_TM_SQDIFF_NORMED ) {
        matchLoc = minLoc;
    }
    else {
        matchLoc = maxLoc;
    }

    cv::Point top_left = matchLoc;
    cv::Point bottom_right = cv::Point(matchLoc.x + template_img.cols , matchLoc.y + template_img.rows);
    cv::Point center = cv::Point(0,0);

    center.x = (bottom_right.x + top_left.x) / 2;
    center.y = (bottom_right.y + top_left.y) / 2;

    markerPoints.push_back(center);
}

I hope that helps …

dom
  • 11,894
  • 10
  • 51
  • 74
  • thanks for the answer.I tried this with creating a new project , adding the framework and other framework imageio,avfoudation,corevideo,coremedia and libz.Then created files like you have given but it is showing 37 issues.Can you give me your id on which I can send you the project so that you can help me a bit more. – Gypsa Aug 22 '12 at 06:43
  • I think the open cv is not working properly, do I need to do something else in settings or coding for C++ for making it work – Gypsa Aug 22 '12 at 06:43
  • Dont forget to rename your implementation files, which use C++ code, from .m to .mm. To really get started please read the OpenCV presentation slides about iOS programming, which you'll find on their website. And at least upvote if the answer was useful ;) – dom Aug 22 '12 at 10:15
  • I am still not able to run the application, yes I have changed to .mm but still it is showing 17 errors in core.hpp,operation.hpp and mat.hpp files. anyways I have upvoted it. – Gypsa Aug 22 '12 at 10:48
  • Update your question with the errors you get, so somebody can help you ;) The code is working - i've tested it twice! – dom Aug 22 '12 at 21:30