2016-05-13 32 views
2

我想通過使用SURF算法來識別對象。由於我在安裝非自由模塊時遇到了一些問題,因此我決定使用舊版本的OpenCV(2.4.11我在Visual Studio 2013中運行它)。非相關的sintax錯誤 - SURF算法

現在,我有一些與sintax無關的錯誤,我不知道該怎麼做。這裏是代碼:

#include <stdio.h> 
#include <iostream> 
#include <fstream> 
#include <string> 
#include "opencv2/core/core.hpp" 
#include "opencv2/features2d/features2d.hpp" 
#include "opencv2/highgui/highgui.hpp" 
#include "opencv2/imgproc/imgproc.hpp" 
#include "opencv2/calib3d/calib3d.hpp" 
#include "opencv2/nonfree/features2d.hpp" 
#include "opencv2/nonfree/nonfree.hpp" 

using namespace cv; 
using namespace std; 

char key = 'a'; 
int framecount = 0; 

SurfFeatureDetector detector(500); 
SurfDescriptorExtractor extractor; 
FlannBasedMatcher matcher; 

Mat frame, des_object, image; 
Mat des_image, img_matches, H; 
std::vector<KeyPoint> kp_object; 
std::vector<Point2f> obj_corners(4); 
std::vector<KeyPoint> kp_image; 
std::vector<vector<DMatch > > matches; 
std::vector<DMatch > good_matches; 
std::vector<Point2f> obj; 
std::vector<Point2f> scene; 
std::vector<Point2f> scene_corners(4); 

int main() 
{    
       //reference image 
    Mat object = imread("C:\\Users\\patri\\Desktop\\test.jpg", CV_LOAD_IMAGE_GRAYSCALE); 

    if(!object.data) 
    { 
     std::cout<< "Error reading object " << std::endl; 
     return -1; 
    } 

       //compute detectors and descriptors of reference image 
    detector.detect(object, kp_object); 
    extractor.compute(object, kp_object, des_object);  

       //create video capture object 
    VideoCapture cap(0); 

    //Get the corners from the object 
    obj_corners[0] = cvPoint(0,0); 
    obj_corners[1] = cvPoint(object.cols, 0); 
    obj_corners[2] = cvPoint(object.cols, object.rows); 
    obj_corners[3] = cvPoint(0, object.rows); 

       //wile loop for real time detection 
    while (key != 27) 
    { 
           //capture one frame from video and store it into image object name 'frame' 
     cap >> frame; 
if (framecount < 5) 
     { 
      framecount++; 
      continue; 
     }  

           //converting captured frame into gray scale 
     cvtColor(frame, image, CV_RGB2GRAY); 

           //extract detectors and descriptors of captured frame 
           detector.detect(image, kp_image); 
           extractor.compute(image, kp_image, des_image); 

           //find matching descriptors of reference and captured image 
           matcher.knnMatch(des_object, des_image, matches, 2); 

           //finding matching keypoints with Euclidean distance 0.6 times the distance of next keypoint 
           //used to find right matches 
           for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) 
           { 
               if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0)) 
               { 
                   good_matches.push_back(matches[i][0]); 
               } 
           }        

           //Draw only "good" matches 
           drawMatches(object, kp_object, frame, kp_image, good_matches, img_matches, 
               Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS); 

           //3 good matches are enough to describe an object as a right match. 
           if (good_matches.size() >= 3) 
     {          

      for(int i = 0; i < good_matches.size(); i++) 
      { 
       //Get the keypoints from the good matches 
       obj.push_back(kp_object[ good_matches[i].queryIdx ].pt); 
scene.push_back(kp_image[ good_matches[i].trainIdx ].pt); 
      } 
               try 
               { 
                   H = findHomography(obj, scene, CV_RANSAC); 
               } 
               catch(Exception e){} 

      perspectiveTransform(obj_corners, scene_corners, H); 

      //Draw lines between the corners (the mapped object in the scene image) 
      line(img_matches, scene_corners[0] + Point2f(object.cols, 0), scene_corners[1] + Point2f(object.cols, 0), Scalar(0, 255, 0), 4); 
      line(img_matches, scene_corners[1] + Point2f(object.cols, 0), scene_corners[2] + Point2f(object.cols, 0), Scalar(0, 255, 0), 4); 
      line(img_matches, scene_corners[2] + Point2f(object.cols, 0), scene_corners[3] + Point2f(object.cols, 0), Scalar(0, 255, 0), 4); 
      line(img_matches, scene_corners[3] + Point2f(object.cols, 0), scene_corners[0] + Point2f(object.cols, 0), Scalar(0, 255, 0), 4); 
     } 

     //Show detected matches 
     imshow("Good Matches", img_matches); 

           //clear array 
           good_matches.clear(); 

     key = waitKey(1); 
    } 
    return 0; 
} 

我還附上我得到的錯誤的圖片。請幫我找到問題。 errors

回答

0

關於警告(「conversion from int to float」),我想這個問題是使用整數初始化cv::Point2f();所以,在第112-115行中,您應該使用Point2f(object.cols, 0.0)而不是Point2f(object.cols, 0)

但只是警告。

真正的問題是鏈接器錯誤:您需要具有cv::SURF::SURF()的庫(由SurfFeatureDetector detector(500);SurfDescriptorExtractor extractor;使用)。

你應該添加一個我認爲是的庫,在你的情況下,opencv_nonfree241.lib。或者一個相似的名字。

+0

我已經包含非自由庫,但我仍然有相同的錯誤。 – patri

+0

@patri - 也鏈接'opencv_features2d241.lib'(或類似的名字)? – max66

+0

是的。起初,我嘗試過使用OpenCV 3.1,但在使用CMake構建非自由庫時出現錯誤。關於以前版本的一些文檔後,我已經看到他們有包含非自由庫,並根據這篇文章http://stackoverflow.com/questions/9968993/opencv-undefined-reference-surffeaturedetector-and-bruteforcematcher它應該按照我寫的方式工作。或者至少我認爲是這樣的 – patri