matchTemplate opencv无法正常工作,如opencv文档所示

我正在努力解决这个问题,因为在我从相机拍摄的图像中,我需要找到一些模式的存在和位置。

为此,我发现使用matchTemplate方法。我使用了opencv样本中使用的图像并编写了代码但结果却不同。

http://opencv.itseez.com/doc/tutorials/imgproc/histograms/template_matching/template_matching.html

这是告诉我们matchTemplate的链接。

当我实现它时,它显示了结果: –

在此处输入图像描述

我的代码如下: –

 -(void)matchPatchNet { IplImage *res; CvPoint minloc, maxloc; double minval, maxval; int img_width, img_height; int tpl_width, tpl_height; int res_width, res_height; NSString *pathPatron = [[NSBundle mainBundle] pathForResource:@"timage" ofType:@"jpg"]; UIImage *tim = [UIImage imageWithContentsOfFile:pathPatron]; NSString *pathPatron2 = [[NSBundle mainBundle] pathForResource:@"simage" ofType:@"jpg"]; UIImage *tim2 = [UIImage imageWithContentsOfFile:pathPatron2]; IplImage *img = [self CreateIplImageFromUIImage:tim2];// IplImage *tpl = [self CreateIplImageFromUIImage:tim]; cv::Mat forground1 = [tim2 CVMat]; cv::Mat forground2 = [tim CVMat]; img_width = img->width; img_height = img->height; tpl_width = tpl->width; tpl_height = tpl->height; res_width = img_width - tpl_width + 1; res_height = img_height - tpl_height + 1; res = cvCreateImage( cvSize( res_width, res_height ), IPL_DEPTH_32F, 1 ); cvMatchTemplate( img, tpl, res, CV_TM_CCOEFF_NORMED ); UIImage *ipala=[self UIImageFromIplImage:res]; cv::Mat forground3 = [ipala CVMat]; cv::normalize(forground3, forground3, 0, 1, cv::NORM_MINMAX, CV_8UC1); cvMinMaxLoc( res, &minval, &maxval, &minloc, &maxloc, 0 ); cvRectangle( img, cvPoint( maxloc.x, maxloc.y ), cvPoint( maxloc.x + tpl_width, maxloc.y + tpl_height ), cvScalar( 0, 255, 0, 0 ), 1, 0, 0 ); /* display images */ self.imageView.image = [self UIImageFromIplImage:img]; cvReleaseImage(&img); cvReleaseImage(&tpl); cvReleaseImage(&res); } 

请告诉我我做错了什么。请帮帮我。

提前致谢

我强烈建议您使用C ++界面和当前的文档,您可以在这里找到: OpenCV v2.4.2文档

在这里获取最新版本的OpenCV for iOS: OpenCV for iOS并将其放入您的项目中并将其包含在您的项目前缀中:

ExampleApp中,Prefix.pch:

 #ifdef __cplusplus #import  #endif 

用它来“转换”UIImages到cv :: Mats:

UIImageCVMatConverter.h:

 // // UIImageCVMatConverter.h // #import  @interface UIImageCVMatConverter : NSObject { } + (UIImage *)UIImageFromCVMat:(cv::Mat)cvMat; + (UIImage *)UIImageFromCVMat:(cv::Mat)cvMat withUIImage:(UIImage*)image; + (cv::Mat)cvMatFromUIImage:(UIImage *)image; + (cv::Mat)cvMatGrayFromUIImage:(UIImage *)image; + (UIImage *)scaleAndRotateImageFrontCamera:(UIImage *)image; + (UIImage *)scaleAndRotateImageBackCamera:(UIImage *)image; @end 

UIImageCVMatConverter.mm:

 // // UIImageCVMatConverter.m // #import "UIImageCVMatConverter.h" @implementation UIImageCVMatConverter + (UIImage *)UIImageFromCVMat:(cv::Mat)cvMat withUIImage:(UIImage*)image; { CGColorSpaceRef colorSpace = CGImageGetColorSpace( image.CGImage ); CGFloat cols = image.size.width; CGFloat rows = image.size.height; CGFloat widthStep = image.size.width; CGContextRef contextRef = CGBitmapContextCreate( NULL, cols, rows, 8, widthStep*4, colorSpace, kCGImageAlphaNoneSkipLast | kCGBitmapByteOrderDefault ); CGContextDrawImage( contextRef, CGRectMake(0, 0, cols, rows), image.CGImage ); CGContextSetRGBStrokeColor( contextRef, 1, 0, 0, 1 ); CGImageRef cgImage = CGBitmapContextCreateImage( contextRef ); UIImage* result = [UIImage imageWithCGImage:cgImage]; CGImageRelease( cgImage ); CGContextRelease( contextRef ); CGColorSpaceRelease( colorSpace ); return result; } +(UIImage *)UIImageFromCVMat:(cv::Mat)cvMat { NSData *data = [NSData dataWithBytes:cvMat.data length:cvMat.elemSize()*cvMat.total()]; CGColorSpaceRef colorSpace; if ( cvMat.elemSize() == 1 ) { colorSpace = CGColorSpaceCreateDeviceGray(); } else { colorSpace = CGColorSpaceCreateDeviceRGB(); } CGDataProviderRef provider = CGDataProviderCreateWithCFData( (__bridge CFDataRef)data ); CGImageRef imageRef = CGImageCreate( cvMat.cols, cvMat.rows, 8, 8 * cvMat.elemSize(), cvMat.step[0], colorSpace, kCGImageAlphaNone|kCGBitmapByteOrderDefault, provider, NULL, false, kCGRenderingIntentDefault ); UIImage *finalImage = [UIImage imageWithCGImage:imageRef]; CGImageRelease( imageRef ); CGDataProviderRelease( provider ); CGColorSpaceRelease( colorSpace ); return finalImage; } + (cv::Mat)cvMatFromUIImage:(UIImage *)image { CGColorSpaceRef colorSpace = CGImageGetColorSpace( image.CGImage ); CGFloat cols = image.size.width; CGFloat rows = image.size.height; cv::Mat cvMat( rows, cols, CV_8UC4 ); CGContextRef contextRef = CGBitmapContextCreate( cvMat.data, cols, rows, 8, cvMat.step[0], colorSpace, kCGImageAlphaNoneSkipLast | kCGBitmapByteOrderDefault ); CGContextDrawImage( contextRef, CGRectMake(0, 0, cols, rows), image.CGImage ); CGContextRelease( contextRef ); CGColorSpaceRelease( colorSpace ); return cvMat; } + (cv::Mat)cvMatGrayFromUIImage:(UIImage *)image { cv::Mat cvMat = [UIImageCVMatConverter cvMatFromUIImage:image]; cv::Mat grayMat; if ( cvMat.channels() == 1 ) { grayMat = cvMat; } else { grayMat = cv :: Mat( cvMat.rows,cvMat.cols, CV_8UC1 ); cv::cvtColor( cvMat, grayMat, CV_BGR2GRAY ); } return grayMat; } + (UIImage *)scaleAndRotateImageBackCamera:(UIImage *)image { static int kMaxResolution = 640; CGImageRef imgRef = image.CGImage; CGFloat width = CGImageGetWidth( imgRef ); CGFloat height = CGImageGetHeight( imgRef ); CGAffineTransform transform = CGAffineTransformIdentity; CGRect bounds = CGRectMake( 0, 0, width, height ); if ( width > kMaxResolution || height > kMaxResolution ) { CGFloat ratio = width/height; if ( ratio > 1 ) { bounds.size.width = kMaxResolution; bounds.size.height = bounds.size.width / ratio; } else { bounds.size.height = kMaxResolution; bounds.size.width = bounds.size.height * ratio; } } CGFloat scaleRatio = bounds.size.width / width; CGSize imageSize = CGSizeMake( CGImageGetWidth(imgRef), CGImageGetHeight(imgRef) ); CGFloat boundHeight; UIImageOrientation orient = image.imageOrientation; switch( orient ) { case UIImageOrientationUp: transform = CGAffineTransformIdentity; break; case UIImageOrientationUpMirrored: transform = CGAffineTransformMakeTranslation(imageSize.width, 0.0); transform = CGAffineTransformScale(transform, -1.0, 1.0); break; case UIImageOrientationDown: transform = CGAffineTransformMakeTranslation(imageSize.width, imageSize.height); transform = CGAffineTransformRotate(transform, M_PI); break; case UIImageOrientationDownMirrored: transform = CGAffineTransformMakeTranslation(0.0, imageSize.height); transform = CGAffineTransformScale(transform, 1.0, -1.0); break; case UIImageOrientationLeftMirrored: boundHeight = bounds.size.height; bounds.size.height = bounds.size.width; bounds.size.width = boundHeight; transform = CGAffineTransformMakeTranslation(imageSize.height, imageSize.width); transform = CGAffineTransformScale(transform, -1.0, 1.0); transform = CGAffineTransformRotate(transform, 3.0 * M_PI / 2.0); break; case UIImageOrientationLeft: boundHeight = bounds.size.height; bounds.size.height = bounds.size.width; bounds.size.width = boundHeight; transform = CGAffineTransformMakeTranslation(0.0, imageSize.width); transform = CGAffineTransformRotate(transform, 3.0 * M_PI / 2.0); break; case UIImageOrientationRightMirrored: boundHeight = bounds.size.height; bounds.size.height = bounds.size.width; bounds.size.width = boundHeight; transform = CGAffineTransformMakeScale(-1.0, 1.0); transform = CGAffineTransformRotate(transform, M_PI / 2.0); break; case UIImageOrientationRight: boundHeight = bounds.size.height; bounds.size.height = bounds.size.width; bounds.size.width = boundHeight; transform = CGAffineTransformMakeTranslation(imageSize.height, 0.0); transform = CGAffineTransformRotate(transform, M_PI / 2.0); break; default: [NSException raise:NSInternalInconsistencyException format:@"Invalid image orientation"]; } UIGraphicsBeginImageContext( bounds.size ); CGContextRef context = UIGraphicsGetCurrentContext(); if ( orient == UIImageOrientationRight || orient == UIImageOrientationLeft ) { CGContextScaleCTM( context, -scaleRatio, scaleRatio ); CGContextTranslateCTM( context, -height, 0 ); } else { CGContextScaleCTM( context, scaleRatio, -scaleRatio ); CGContextTranslateCTM( context, 0, -height ); } CGContextConcatCTM( context, transform ); CGContextDrawImage( UIGraphicsGetCurrentContext(), CGRectMake(0, 0, width, height), imgRef ); UIImage *returnImage = UIGraphicsGetImageFromCurrentImageContext(); UIGraphicsEndImageContext(); return returnImage; } + (UIImage *)scaleAndRotateImageFrontCamera:(UIImage *)image { static int kMaxResolution = 640; CGImageRef imgRef = image.CGImage; CGFloat width = CGImageGetWidth(imgRef); CGFloat height = CGImageGetHeight(imgRef); CGAffineTransform transform = CGAffineTransformIdentity; CGRect bounds = CGRectMake( 0, 0, width, height); if (width > kMaxResolution || height > kMaxResolution) { CGFloat ratio = width/height; if (ratio > 1) { bounds.size.width = kMaxResolution; bounds.size.height = bounds.size.width / ratio; } else { bounds.size.height = kMaxResolution; bounds.size.width = bounds.size.height * ratio; } } CGFloat scaleRatio = bounds.size.width / width; CGSize imageSize = CGSizeMake(CGImageGetWidth(imgRef), CGImageGetHeight(imgRef)); CGFloat boundHeight; UIImageOrientation orient = image.imageOrientation; switch(orient) { case UIImageOrientationUp: transform = CGAffineTransformIdentity; break; case UIImageOrientationUpMirrored: transform = CGAffineTransformMakeTranslation(imageSize.width, 0.0); transform = CGAffineTransformScale(transform, -1.0, 1.0); break; case UIImageOrientationDown: transform = CGAffineTransformMakeTranslation(imageSize.width, imageSize.height); transform = CGAffineTransformRotate(transform, M_PI); break; case UIImageOrientationDownMirrored: transform = CGAffineTransformMakeTranslation(0.0, imageSize.height); transform = CGAffineTransformScale(transform, 1.0, -1.0); break; case UIImageOrientationLeftMirrored: boundHeight = bounds.size.height; bounds.size.height = bounds.size.width; bounds.size.width = boundHeight; transform = CGAffineTransformMakeTranslation(imageSize.height, imageSize.width); transform = CGAffineTransformScale(transform, -1.0, 1.0); transform = CGAffineTransformRotate(transform, 3.0 * M_PI / 2.0); break; case UIImageOrientationLeft: boundHeight = bounds.size.height; bounds.size.height = bounds.size.width; bounds.size.width = boundHeight; transform = CGAffineTransformMakeTranslation(0.0, imageSize.width); transform = CGAffineTransformRotate(transform, 3.0 * M_PI / 2.0); break; case UIImageOrientationRight: case UIImageOrientationRightMirrored: boundHeight = bounds.size.height; bounds.size.height = bounds.size.width; bounds.size.width = boundHeight; transform = CGAffineTransformMakeScale(-1.0, 1.0); transform = CGAffineTransformRotate(transform, M_PI / 2.0); break; default: [NSException raise:NSInternalInconsistencyException format:@"Invalid image orientation"]; } UIGraphicsBeginImageContext( bounds.size ); CGContextRef context = UIGraphicsGetCurrentContext(); if ( orient == UIImageOrientationRight || orient == UIImageOrientationLeft ) { CGContextScaleCTM(context, -scaleRatio, scaleRatio); CGContextTranslateCTM(context, -height, 0); } else { CGContextScaleCTM(context, scaleRatio, -scaleRatio); CGContextTranslateCTM(context, 0, -height); } CGContextConcatCTM( context, transform ); CGContextDrawImage( UIGraphicsGetCurrentContext(), CGRectMake(0, 0, width, height), imgRef ); UIImage *returnImage = UIGraphicsGetImageFromCurrentImageContext(); UIGraphicsEndImageContext(); return returnImage; } @end 

这是我用来在图像中找到几个标记并将它们的中心点推入std :: vector的代码:

 #import "UIImageCVMatConverter.h" ... cv::Mat src_img; cv::Mat result_mat; cv::Mat debug_img; cv::Mat template_img; NSArray *markerImages = [NSArray arrayWithObjects:@"marker-1.png", nil]; std::vector markerPoints; // input image src_img = [UIImageCVMatConverter cvMatFromUIImage:cameriaInputImage]; cv::cvtColor(src_img, debug_img, CV_GRAY2BGR); for (NSString *marker in markerImages) { template_img = [UIImageCVMatConverter cvMatFromUIImage:[UIImage imageNamed:marker]]; cv::cvtColor(template_img, template_img, CV_GRAY2BGR); int match_method = CV_TM_CCORR_NORMED; cv::matchTemplate(src_img, template_img, result_mat, match_method); cv::normalize(result_mat, result_mat, 0, 1, cv::NORM_MINMAX, -1, cv::Mat()); double minVal; double maxVal; cv::Point minLoc, maxLoc, matchLoc; cv::minMaxLoc(result_mat, &minVal, &maxVal, &minLoc, &maxLoc, cv::Mat() ); if ( match_method == CV_TM_SQDIFF || match_method == CV_TM_SQDIFF_NORMED ) { matchLoc = minLoc; } else { matchLoc = maxLoc; } cv::Point top_left = matchLoc; cv::Point bottom_right = cv::Point(matchLoc.x + template_img.cols , matchLoc.y + template_img.rows); cv::Point center = cv::Point(0,0); center.x = (bottom_right.x + top_left.x) / 2; center.y = (bottom_right.y + top_left.y) / 2; markerPoints.push_back(center); } 

我希望有帮助……