1
이 SURF 코드를 사용하여 이미지의 로고를 감지하고 있습니다. 그것은 잘 작동하지만 매우 느립니다. 어떻게 최적화 할 수 있습니까? 위의 코드 image
에서이미지가 매우 느리게 감지 됨
- (void)findObject
{
//NSLog(@"%@ %@", self, NSStringFromSelector(_cmd));
width = 0;
CvMemStorage* storage = cvCreateMemStorage(0);
static CvScalar colors[] =
{
{{0,0,255}},
{{0,128,255}},
{{0,255,255}},
{{0,255,0}},
{{255,128,0}},
{{255,255,0}},
{{255,0,0}},
{{255,0,255}},
{{255,255,255}}
};
if(!objectToFind || !image)
{
NSLog(@"Missing object or image");
return;
}
CvSize objSize = cvGetSize(objectToFind);
IplImage* object_color = cvCreateImage(objSize, 8, 3);
cvCvtColor(objectToFind, object_color, CV_GRAY2BGR);
CvSeq *objectKeypoints = 0, *objectDescriptors = 0;
CvSeq *imageKeypoints = 0, *imageDescriptors = 0;
int i;
CvSURFParams params = cvSURFParams(500, 1);
double tt = (double)cvGetTickCount();
NSLog(@"Finding object descriptors");
cvExtractSURF(objectToFind, 0, &objectKeypoints, &objectDescriptors, storage, params);
NSLog(@"Object Descriptors: %d", objectDescriptors->total);
cvExtractSURF(image, 0, &imageKeypoints, &imageDescriptors, storage, params);
NSLog(@"Image Descriptors: %d", imageDescriptors->total);
tt = (double)cvGetTickCount() - tt;
NSLog(@"Extraction time = %gms", tt/(cvGetTickFrequency()*1000.));
CvPoint src_corners[4] = {{0,0}, {objectToFind->width,0}, {objectToFind->width, objectToFind->height}, {0, objectToFind->height}};
CvPoint dst_corners[4];
CvSize size = cvSize(image->width > objectToFind->width ? image->width : objectToFind->width,
objectToFind->height+image->height);
output = cvCreateImage(size, 8, 1);
cvSetImageROI(output, cvRect(0, 0, objectToFind->width, objectToFind->height));
//cvCopy(objectToFind, output);
cvResetImageROI(output);
cvSetImageROI(output, cvRect(0, objectToFind->height, output->width, output->height));
cvCopy(image, output);
cvResetImageROI(output);
NSLog(@"Locating Planar Object");
#ifdef USE_FLANN
NSLog(@"Using approximate nearest neighbor search");
#endif
if(locatePlanarObject(objectKeypoints, objectDescriptors, imageKeypoints,
imageDescriptors, src_corners, dst_corners))
{
for(i = 0; i < 4; i++)
{
CvPoint r1 = dst_corners[i%4];
CvPoint r2 = dst_corners[(i+1)%4];
//cvLine(output, cvPoint(r1.x, r1.y+objectToFind->height),
//cvPoint(r2.x, r2.y+objectToFind->height), colors[6]);
cvLine(output, cvPoint(r1.x, r1.y+objectToFind->height),
cvPoint(r2.x, r2.y+objectToFind->height), colors[6],4);
//if(i==0)
width = sqrt(((r1.x-r2.x)*(r1.x-r2.x))+((r1.y-r2.y)*(r1.y-r2.y)));
}
}
vector<int> ptpairs;
NSLog(@"finding Pairs");
#ifdef USE_FLANN
flannFindPairs(objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors, ptpairs);
#else
findPairs(objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors, ptpairs);
#endif
/* for(i = 0; i < (int)ptpairs.size(); i += 2)
{
CvSURFPoint* r1 = (CvSURFPoint*)cvGetSeqElem(objectKeypoints, ptpairs[i]);
CvSURFPoint* r2 = (CvSURFPoint*)cvGetSeqElem(imageKeypoints, ptpairs[i+1]);
cvLine(output, cvPointFrom32f(r1->pt),
cvPoint(cvRound(r2->pt.x), cvRound(r2->pt.y+objectToFind->height)), colors[8]);
}*/
float dist = 629.0/width;
[distanceLabel setText:[NSString stringWithFormat:@"%.2f",dist]];
NSLog(@"Converting Output");
UIImage *convertedOutput = [OpenCVUtilities UIImageFromGRAYIplImage:output];
NSLog(@"Opening Stuff");
[imageView setImage:convertedOutput];
cvReleaseImage(&object_color);
[activityView stopAnimating];
}
내 원래 이미지와 내가 감지 할 로고입니다.
제 질문에 명확하지 않은 점이 있으면 알려주십시오.