我試圖使用OpenCV的特徵檢測工具來決定是否在較大的場景圖像中存在小樣本圖像。
我使用here的代碼作爲參考(沒有單應性部分)。iOS上的OpenCV:與SurfFeatureDetector和FlannBasedMatcher錯誤匹配
UIImage *sceneImage, *objectImage1;
cv::Mat sceneImageMat, objectImageMat1;
cv::vector<cv::KeyPoint> sceneKeypoints, objectKeypoints1;
cv::Mat sceneDescriptors, objectDescriptors1;
cv::SurfFeatureDetector *surfDetector;
cv::FlannBasedMatcher flannMatcher;
cv::vector<cv::DMatch> matches;
int minHessian;
double minDistMultiplier;
minHessian = 400;
minDistMultiplier= 3;
surfDetector = new cv::SurfFeatureDetector(minHessian);
sceneImage = [UIImage imageNamed:@"twitter_scene.png"];
objectImage1 = [UIImage imageNamed:@"twitter.png"];
sceneImageMat = cv::Mat(sceneImage.size.height, sceneImage.size.width, CV_8UC1);
objectImageMat1 = cv::Mat(objectImage1.size.height, objectImage1.size.width, CV_8UC1);
cv::cvtColor([sceneImage CVMat], sceneImageMat, CV_RGB2GRAY);
cv::cvtColor([objectImage1 CVMat], objectImageMat1, CV_RGB2GRAY);
if (!sceneImageMat.data || !objectImageMat1.data) {
NSLog(@"NO DATA");
}
surfDetector->detect(sceneImageMat, sceneKeypoints);
surfDetector->detect(objectImageMat1, objectKeypoints1);
surfExtractor.compute(sceneImageMat, sceneKeypoints, sceneDescriptors);
surfExtractor.compute(objectImageMat1, objectKeypoints1, objectDescriptors1);
flannMatcher.match(objectDescriptors1, sceneDescriptors, matches);
double max_dist = 0; double min_dist = 100;
for(int i = 0; i < objectDescriptors1.rows; i++)
{
double dist = matches[i].distance;
if(dist < min_dist) min_dist = dist;
if(dist > max_dist) max_dist = dist;
}
cv::vector<cv::DMatch> goodMatches;
for(int i = 0; i < objectDescriptors1.rows; i++)
{
if(matches[i].distance < minDistMultiplier*min_dist)
{
goodMatches.push_back(matches[i]);
}
}
NSLog(@"Good matches found: %lu", goodMatches.size());
cv::Mat imageMatches;
cv::drawMatches(objectImageMat1, objectKeypoints1, sceneImageMat, sceneKeypoints, goodMatches, imageMatches, cv::Scalar::all(-1), cv::Scalar::all(-1),
cv::vector<char>(), cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
for(int i = 0; i < goodMatches.size(); i++)
{
//-- Get the keypoints from the good matches
obj.push_back(objectKeypoints1[ goodMatches[i].queryIdx ].pt);
scn.push_back(objectKeypoints1[ goodMatches[i].trainIdx ].pt);
}
cv::vector<uchar> outputMask;
cv::Mat homography = cv::findHomography(obj, scn, CV_RANSAC, 3, outputMask);
int inlierCounter = 0;
for (int i = 0; i < outputMask.size(); i++) {
if (outputMask[i] == 1) {
inlierCounter++;
}
}
NSLog(@"Inliers percentage: %d", (int)(((float)inlierCounter/(float)outputMask.size()) * 100));
cv::vector<cv::Point2f> objCorners(4);
objCorners[0] = cv::Point(0,0);
objCorners[1] = cv::Point(objectImageMat1.cols, 0);
objCorners[2] = cv::Point(objectImageMat1.cols, objectImageMat1.rows);
objCorners[3] = cv::Point(0, objectImageMat1.rows);
cv::vector<cv::Point2f> scnCorners(4);
cv::perspectiveTransform(objCorners, scnCorners, homography);
cv::line(imageMatches, scnCorners[0] + cv::Point2f(objectImageMat1.cols, 0), scnCorners[1] + cv::Point2f(objectImageMat1.cols, 0), cv::Scalar(0, 255, 0), 4);
cv::line(imageMatches, scnCorners[1] + cv::Point2f(objectImageMat1.cols, 0), scnCorners[2] + cv::Point2f(objectImageMat1.cols, 0), cv::Scalar(0, 255, 0), 4);
cv::line(imageMatches, scnCorners[2] + cv::Point2f(objectImageMat1.cols, 0), scnCorners[3] + cv::Point2f(objectImageMat1.cols, 0), cv::Scalar(0, 255, 0), 4);
cv::line(imageMatches, scnCorners[3] + cv::Point2f(objectImageMat1.cols, 0), scnCorners[0] + cv::Point2f(objectImageMat1.cols, 0), cv::Scalar(0, 255, 0), 4);
[self.mainImageView setImage:[UIImage imageWithCVMat:imageMatches]];
這工作,但我不斷收到火柴顯著量,即使在小圖像不是一個大的一部分。
下面是一個很好的示例輸出:
這裏還有一個壞輸出的例子:
兩個輸出是相同的代碼的結果。小樣本圖像只有不同之處。
像這樣的結果,我不可能知道樣本圖像何時不在較大圖像中。
在做我的研究時,我發現了this stackoverflow問題。我遵循那裏給出的答案,並嘗試了「OpenCV 2計算機視覺應用程序編程手冊」一書中提出的步驟,但我無法使它適用於不同大小的圖像(似乎是cv :: findFundamentalMat函數)。
我錯過了什麼?有沒有辦法使用SurfFeatureDetector和FlannBasedMatcher來知道一個樣本圖像是一個較大圖像的一部分,而另一個樣本圖像不是?有沒有更好的方法來達到這個目的?
更新:
我更新了上面的代碼以包含我使用的完整函數,包括嘗試實際繪製單應性。另外,這裏有3個圖像 - 1個場景,以及我想在場景中找到的兩個小物體。我掌握了爪子圖標的內部百分比,而不是實際在場景中的Twitter圖標。另外,單應不繪製因爲某些原因:
Twitter Icon
Paw Icon
Scene
感謝您的快速評論。我想我明白如何使用findHomography來查找轉換,但是如何測試匹配以查看它是否與轉換相對應?至於距離,我嘗試了一下,但是在我給出的兩個例子中,我似乎仍然無法消除這個不好的例子中的匹配。 –
cv :: findHomography的最後一個參數是「OutputArray mask」。如果您使用CV_RANSAC或CV_LMEDS,這將爲您提供關於哪些點是內點的信息以及哪些是異常點。 –
謝謝!使用findHomography的輸出效果非常好。你一直在幫助很大。 –