2013-11-03 44 views
1

以下是我的代碼,它用於使用SURF提取特徵,並使用flannBasedMatcher匹配點。SURF基於FlannBasedMatcher的特徵提取和關鍵點匹配

Mat object = imread("S6E0.bmp", CV_LOAD_IMAGE_GRAYSCALE); 

    if(!object.data) 
    { 
    // std::cout<< "Error reading object " << std::endl; 
    return -2; 
    } 

    //Detect the keypoints using SURF Detector 

    int minHessian = 500; 

    SurfFeatureDetector detector(minHessian); 

    std::vector<KeyPoint> kp_object; 

    detector.detect(object, kp_object); 

    //Calculate descriptors (feature vectors) 
    SurfDescriptorExtractor extractor; 

    Mat des_object; 

    extractor.compute(object, kp_object, des_object); 

    FlannBasedMatcher matcher; 
    char key = 'a'; 
    //VideoCapture cap(0); 

    namedWindow("Good Matches"); 

    std::vector<Point2f> obj_corners(4); 

    //Get the corners from the object 
    obj_corners[0] = cvPoint(0,0); 
    obj_corners[1] = cvPoint(object.cols, 0); 
    obj_corners[2] = cvPoint(object.cols, object.rows); 
    obj_corners[3] = cvPoint(0, object.rows); 

    Mat image = imread("S6E0.bmp", CV_LOAD_IMAGE_GRAYSCALE); 
    Mat des_image, img_matches; 

    std::vector<KeyPoint> kp_image; 
    std::vector<vector<DMatch >> matches; 

    std::vector<std::vector<cv::DMatch>> matches1; 
    std::vector<std::vector<cv::DMatch>> matches2; 
    std::vector<cv::DMatch> matches3; 
    std::vector<DMatch > good_matches; 
    std::vector<Point2f> obj; 
    std::vector<Point2f> scene; 

    std::vector<Point2f> scene_corners(4); 

    Mat H; 

    //cvtColor(frame, image, CV_RGB2GRAY); 
    detector.detect(image, kp_image); 
    extractor.compute(image, kp_image, des_image); 


    matcher.knnMatch(des_object, des_image, matches, 2); 



    for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS 
    { 
     if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0)) 
     { 
      good_matches.push_back(matches[i][0]); 
     } 
    } 

     //Draw only "good" matches 

    drawMatches(object, kp_object, image, kp_image, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS); 

    if (good_matches.size() >= 4) 
    { 
     printf("Images matching %d , %d", good_matches.size(), kp_object.size()); 

     //return 1; 

     for(int i = 0; i < good_matches.size(); i++) 
     { 

      //Get the keypoints from the good matches 

      obj.push_back(kp_object[ good_matches[i].queryIdx ].pt); 
      scene.push_back(kp_image[ good_matches[i].trainIdx ].pt); 
     } 

     //H = findHomography(obj, scene, CV_RANSAC); 
     //printf("Size : %d", H.size()); 
     //perspectiveTransform(obj_corners, scene_corners, H); 
     //printf("Size : %d --- %d --- %d", H.size(), scene_corners.size()); 

    }else{ 

     printf("Images matching %d , %d", good_matches.size(), kp_object.size()); 
    } 

     //Show detected matches 

    imshow("Good Matches", img_matches); 
    waitKey(0); 
    return 0; 

在此代碼我想知道究竟通過這種方法

matcher.knnMatch(des_object, des_image, matches, 2); 

發生正如我知道我經過匹配的圖像的兩個描述符和匹配矢量填充有2最近的鄰居。我想知道方法中究竟發生了什麼以及如何填充匹配方法以及填充了哪些點。

在這個代碼段使用最近neibour距離比(nndr)

for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS 
    { 
     if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0)) 
     { 
      good_matches.push_back(matches[i][0]); 
     } 
    } 

IM爲0.6,我想知道的good_matches如何找出和nndr值變化將如何影響。

這將是一個很大的幫助,如果我可以解決這個代碼。 謝謝。

回答

3

The FlannBasedMatcher基於Muja et. al.所寫的論文;你可以找到確切的算法,以及他們如何去它那裏..

關於good_matches,你只要在代碼片段本身看,原來是你的結果具有基於該最佳匹配的集合標準,即nndr ..它基本上是一個門檻,它決定了在完全丟棄比賽之前允許比賽的距離。門檻越高,考慮的點越多,積極匹配的數量越多(無論他們是真正的積極將由您的數據集和您設置nndr級別的方式決定)。

希望這有助於。

+0

請你解釋一下knnMatch(x,x,x)方法,我們是否將匹配點保存在匹配向量中? – posha

+1

是的,它將匹配的點保存在'匹配'向量中。 @ scap3y告訴你,算法取自哪裏,你仍然在問這個方法做了什麼。去閱讀那篇論文,找出該方法做什麼以及如何做。不要懶惰。讀。 – guneykayim