2016-01-18 59 views
1

好吧,這裏是我的源代碼。此代碼將在文件中獲取圖像,並將其與另一個文件中的圖像列表進行比較。在圖像文件中,您必須包含一個.txt文件,其中包含您要比較的文件中所有圖像的名稱。我遇到的問題是這兩張圖片非常相似,但並不完全相同。我需要一種方法來進一步細化這些匹配。也許甚至有一種全新的方式來比較這兩種形狀(大塊,斑點等)。我考慮的一種方法實際上是製作一張完整的關鍵點貼圖,並且只比較keypiont,如果它們處於或接近某個對這兩幅圖像產生反應的點。即:比較(12,200)點的關鍵點,(x,y)的+ 10個像素,並查看另一個圖像上是否有類似的關鍵點。使用opencv和C++找到與複雜形狀最接近的匹配的最佳方法是什麼?

我需要的是一種可能的最佳匹配方式:ActualImplantXrayOfThatSameImplantButASlightlyDifferentSize。謝謝,麻煩您了! PS:你會看到我在試用Sobel Derivatives和其他類似的東西的地方註釋掉了部分內容。我最終只是調整X射線的對比度和亮度以獲得最佳輪廓。在用於匹配任何東西之前,對植入物的圖像也必須做同樣的處理。

#include "opencv2\highgui\highgui.hpp" 
#include "opencv2\features2d\features2d.hpp" 
#include "opencv2\imgproc.hpp" 


#include <iostream> 
#include <fstream> 
#include <ctime> 
const string defaultDetector = "ORB"; 
const string defaultDescriptor = "ORB"; 
const string defaultMatcher = "BruteForce-Hamming"; 
const string defaultXrayImagePath = "../../xray.png"; 
const string defaultImplantImagesTextListPath = "../../implantImage.txt"; 
const string defaultPathToResultsFolder = "../../results"; 

static void printIntro(const string& appName) 
{ 
    cout << "/*                          *\n" 
     << " * Created by: Alex Gatz. 1/11/12. Created for: Xray Implant Identification        *\n" 
     << " * This code was created to scan a file full of images of differnt implants, generate keypoint maps  *\n" 
     << " * for each image, and identifywhich image most closely matches a chosen image in another folder   *\n" 
     << " */                          *\n" 
     << endl; 

    cout << endl << "Format:\n" << endl; 
    cout << "./" << appName << " [detector] [descriptor] [matcher] [xrayImagePath] [implantImagesTextListPath] [pathToSaveResults]" << endl; 
    cout << endl; 

    cout << "\nExample:" << endl 
     << "./" << appName << " " << defaultDetector << " " << defaultDescriptor << " " << defaultMatcher << " " 
     << defaultXrayImagePath << " " << defaultImplantImagesTextListPath << " " << defaultPathToResultsFolder << endl; 
} 

static void maskMatchesByImplantImgIdx(const vector<DMatch>& matches, int trainImgIdx, vector<char>& mask) 
{ 
    mask.resize(matches.size()); 
    fill(mask.begin(), mask.end(), 0); 
    for (size_t i = 0; i < matches.size(); i++) 
    { 
     if (matches[i].imgIdx == trainImgIdx) 
      mask[i] = 1; 
    } 
} 

static void readImplantFilenames(const string& filename, string& dirName, vector<string>& implantFilenames) 
{ 
    implantFilenames.clear(); 

    ifstream file(filename.c_str()); 
    if (!file.is_open()) 
     return; 

    size_t pos = filename.rfind('\\'); 
    char dlmtr = '\\'; 
    if (pos == String::npos) 
    { 
     pos = filename.rfind('/'); 
     dlmtr = '/'; 
    } 
    dirName = pos == string::npos ? "" : filename.substr(0, pos) + dlmtr; 

    while (!file.eof()) 
    { 
     string str; getline(file, str); 
     if (str.empty()) break; 
     implantFilenames.push_back(str); 
    } 
    file.close(); 
} 

static bool createDetectorDescriptorMatcher(const string& detectorType, const string& descriptorType, const string& matcherType, 
    Ptr<FeatureDetector>& featureDetector, 
    Ptr<DescriptorExtractor>& descriptorExtractor, 
    Ptr<DescriptorMatcher>& descriptorMatcher) 
{ 
    cout << "< Creating feature detector, descriptor extractor and descriptor matcher ..." << endl; 
    featureDetector = ORB::create(//All of these are parameters that can be adjusted to effect match accuracy and process time. 
     10000, //int nfeatures = Maxiumum number of features to retain; max vaulue unknown, higher number takes longer to process. Default: 500 
     1.4f, //float scaleFactor= Pyramid decimation ratio; between 1.00 - 2.00.             Default: 1.2f 
     6, //int nlevels = Number of pyramid levels used; more levels more time taken to process, but more accurate results. Default: 8 
     40, //int edgeThreshold = Size of the border where the features are not detected. Should match patchSize roughly.  Default: 31 
     0, //int firstLevel = Should remain 0 for now.                   Default: 0 
     4, //int WTA_K = Should remain 2.                      Default: 2 
     ORB::HARRIS_SCORE, //int scoreType = ORB::HARRIS_SCORE is the most accurate ranking possible for ORB.           Default: HARRIS_SCORE 
     33 //int patchSize = size of patch used by the oriented BRIEF descriptor. Should match edgeThreashold.     Default: 31 
     ); 
    //featureDetector = ORB::create(); // <-- Uncomment this and comment the featureDetector above for default detector- 
    //OpenCV 3.1 got rid of the dynamic naming of detectors and extractors. 

    //These two are one in the same when using ORB, some detectors and extractors are separate 
    // in which case you would set "descriptorExtractor = descriptorType::create();" or its equivilant. 
    descriptorExtractor = featureDetector; 

    descriptorMatcher = DescriptorMatcher::create(matcherType); 

    cout << ">" << endl; 

    bool isCreated = !(featureDetector.empty() || descriptorExtractor.empty() || descriptorMatcher.empty()); 
    if (!isCreated) 
     cout << "Can not create feature detector or descriptor extractor or descriptor matcher of given types." << endl << ">" << endl; 

    return isCreated; 
} 

static void manipulateImage(Mat& image) //Manipulates images into only showing an outline! 
{ 
    //Sobel Dirivative edge finder 

    //int scale = 1; 
    //int delta = 0; 
    //int ddepth = CV_16S; 
    ////equalizeHist(image, image); //This will equilize the lighting levels in each image. 
    //GaussianBlur(image, image, Size(3, 3), 0, 0, BORDER_DEFAULT); 

    //Mat grad_x, grad_y; 
    //Mat abs_grad_x, abs_grad_y; 
    ////For x 
    //Sobel(image, grad_x, ddepth, 1, 0, 3, scale, delta, BORDER_DEFAULT); 
    //convertScaleAbs(grad_x, abs_grad_x); 
    ////For y 
    //Sobel(image, grad_y, ddepth, 0, 1, 3, scale, delta, BORDER_DEFAULT); 
    //convertScaleAbs(grad_y, abs_grad_y); 

    //addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0, image); 

    //Specific Level adjustment (very clean) 
    double alpha = 20; //Best Result: 20 
    int beta = -300; //Best Result: -300 
    image.convertTo(image, -1, alpha, beta); 
} 

static bool readImages(const string& xrayImageName, const string& implantFilename, 
    Mat& xrayImage, vector <Mat>& implantImages, vector<string>& implantImageNames) 
{ 
    //TODO: Add a funtion call to automatically adjust all images loaded to best settings for matching. 
    cout << "< Reading the images..." << endl; 
    xrayImage = imread(xrayImageName, CV_LOAD_IMAGE_GRAYSCALE); //Turns the image gray while loading. 
    manipulateImage(xrayImage); //Runs image manipulations 

    if (xrayImage.empty()) 
    { 
     cout << "Xray image can not be read." << endl << ">" << endl; 
     return false; 
    } 
    string trainDirName; 
    readImplantFilenames(implantFilename, trainDirName, implantImageNames); 
    if (implantImageNames.empty()) 
    { 
     cout << "Implant image filenames can not be read." << endl << ">" << endl; 
     return false; 
    } 
    int readImageCount = 0; 
    for (size_t i = 0; i < implantImageNames.size(); i++) 
    { 
     string filename = trainDirName + implantImageNames[i]; 
     Mat img = imread(filename, CV_LOAD_IMAGE_GRAYSCALE); //Turns imamges gray while loading. 
     //manipulateImage(img); //Runs Sobel Dirivitage on implant image. 
     if (img.empty()) 
     { 
      cout << "Implant image " << filename << " can not be read." << endl; 
     } 
     else 
     { 
      readImageCount++; 
     } 
     implantImages.push_back(img); 
    } 
    if (!readImageCount) 
    { 
     cout << "All implant images can not be read." << endl << ">" << endl; 
     return false; 
    } 
    else 
     cout << readImageCount << " implant images were read." << endl; 
    cout << ">" << endl; 

    return true; 
} 

static void detectKeypoints(const Mat& xrayImage, vector<KeyPoint>& xrayKeypoints, 
    const vector<Mat>& implantImages, vector<vector<KeyPoint> >& implantKeypoints, 
    Ptr<FeatureDetector>& featureDetector) 
{ 
    cout << endl << "< Extracting keypoints from images..." << endl; 
    featureDetector->detect(xrayImage, xrayKeypoints); 
    featureDetector->detect(implantImages, implantKeypoints); 
    cout << ">" << endl; 
} 

static void computeDescriptors(const Mat& xrayImage, vector<KeyPoint>& implantKeypoints, Mat& implantDescriptors, 
    const vector<Mat>& implantImages, vector<vector<KeyPoint> >& implantImageKeypoints, vector<Mat>& implantImageDescriptors, 
    Ptr<DescriptorExtractor>& descriptorExtractor) 
{ 
    cout << "< Computing descriptors for keypoints..." << endl; 
    descriptorExtractor->compute(xrayImage, implantKeypoints, implantDescriptors); 
    descriptorExtractor->compute(implantImages, implantImageKeypoints, implantImageDescriptors); 

    int totalTrainDesc = 0; 
    for (vector<Mat>::const_iterator tdIter = implantImageDescriptors.begin(); tdIter != implantImageDescriptors.end(); tdIter++) 
     totalTrainDesc += tdIter->rows; 

    cout << "Query descriptors count: " << implantDescriptors.rows << "; Total train descriptors count: " << totalTrainDesc << endl; 
    cout << ">" << endl; 
} 

static void matchDescriptors(const Mat& xrayDescriptors, const vector<Mat>& implantDescriptors, 
    vector<DMatch>& matches, Ptr<DescriptorMatcher>& descriptorMatcher) 
{ 
    cout << "< Set implant image descriptors collection in the matcher and match xray descriptors to them..." << endl; 
    //time_t timerBegin, timerEnd; 

    //time(&timerBegin); 
    descriptorMatcher->add(implantDescriptors); 
    descriptorMatcher->train(); 
    //time(&timerEnd); 
    //double buildTime = difftime(timerEnd, timerBegin); 

    //time(&timerBegin); 
    descriptorMatcher->match(xrayDescriptors, matches); 
    //time(&timerEnd); 
    //double matchTime = difftime(timerEnd, timerBegin); 

    CV_Assert(xrayDescriptors.rows == (int)matches.size() || matches.empty()); 

    cout << "Number of imageMatches: " << matches.size() << endl; 
    //cout << "Build time: " << buildTime << " ms; Match time: " << matchTime << " ms" << endl; 
    cout << ">" << endl; 
} 

static void saveResultImages(const Mat& xrayImage, const vector<KeyPoint>& xrayKeypoints, 
    const vector<Mat>& implantImage, const vector<vector<KeyPoint> >& implantImageKeypoints, 
    const vector<DMatch>& matches, const vector<string>& implantImagesName, const string& resultDir) 
{ 
    cout << "< Save results..." << endl; 
    Mat drawImg; 
    vector<char> mask; 
    for (size_t i = 0; i < implantImage.size(); i++) 
    { 
     if (!implantImage[i].empty()) 
     { 
      maskMatchesByImplantImgIdx(matches, (int)i, mask); 
      drawMatches(xrayImage, xrayKeypoints, implantImage[i], implantImageKeypoints[i], 
       matches, drawImg, Scalar::all(-1), Scalar(0, 0, 255), mask, 4); 
      string filename = resultDir + "/result_" + implantImagesName[i]; 
      if (!imwrite(filename, drawImg)) 
       cout << "Image " << filename << " can not be saved (may be because directory " << resultDir << " does not exist)." << endl; 
     } 
    } 
    cout << ">" << endl; 

    //After all results have been saved, another function will scan and place the final result in a separate folder. 
    //For now this save process is required to manually access each result and determine if the current settings are working well. 
} 

int main(int argc, char** argv) 
{ 
    //Intialize variables to global defaults. 
    string detector = defaultDetector; 
    string descriptor = defaultDescriptor; 
    string matcher = defaultMatcher; 
    string xrayImagePath = defaultXrayImagePath; 
    string implantImagesTextListPath = defaultImplantImagesTextListPath; 
    string pathToSaveResults = defaultPathToResultsFolder; 

    //As long as you have 7 arguments, you can procede 
    if (argc != 7 && argc != 1) 
    { 
     //This will be called if the incorrect amount of commands are used to start the program. 
     printIntro(argv[1]); 
     system("PAUSE"); 
     return -1; 
    } 

    //As long as you still have 7 arguments, I will set the variables for this 
    // to the arguments you decided on. 
    //If testing using XrayID --> Properties --> Debugging --> Command Arguments, remember to start with [detector] as the first command 
    // C++ includes the [appName] command as the first argument automantically. 

    if (argc != 1) //I suggest placing a break here and stepping through this to ensure the proper commands were sent in. With a 
        // GUI this would nto matter because the GUI would structure the input and use a default if no input was used. 
    { 
     detector = argv[1]; 
     descriptor = argv[2]; 
     matcher = argv[3]; 
     xrayImagePath = argv[4]; 
     implantImagesTextListPath = argv[5]; 
     pathToSaveResults = argv[6]; 
    } 

    //Set up cv::Ptr's for tools. 
    Ptr<FeatureDetector> featureDetector; 
    Ptr<DescriptorExtractor> descriptorExtractor; 
    Ptr<DescriptorMatcher> descriptorMatcher; 

    //Check to see if tools are created, if not true print intro and close program. 
    if (!createDetectorDescriptorMatcher(detector, descriptor, matcher, featureDetector, descriptorExtractor, descriptorMatcher)) 
    { 
     printIntro(argv[0]); 
     system("PAUSE"); 
     return -1; 
    } 

    Mat testImage; 
    vector<Mat> implantImages; 
    vector<string> implantImagesNames; 

    //Check to see if readImages completes properly, if not true print intro and close program. 
    if (!readImages(xrayImagePath, implantImagesTextListPath, testImage, implantImages, implantImagesNames)) 
    { 
     printIntro(argv[0]); 
     system("PAUSE"); 
     return -1; 
    } 

    vector<KeyPoint> xrayKeypoints; 
    vector<vector<KeyPoint> > implantKeypoints; 
    detectKeypoints(testImage, xrayKeypoints, implantImages, implantKeypoints, featureDetector); 

    Mat xrayDescriptors; 
    vector<Mat> implantTestImageDescriptors; 
    computeDescriptors(testImage, xrayKeypoints, xrayDescriptors, implantImages, implantKeypoints, implantTestImageDescriptors, 
     descriptorExtractor); 

    vector<DMatch> imageMatches; 
    matchDescriptors(xrayDescriptors, implantTestImageDescriptors, imageMatches, descriptorMatcher); 
    saveResultImages(testImage, xrayKeypoints, implantImages, implantKeypoints, imageMatches, implantImagesNames, pathToSaveResults); 

    system("PAUSE"); 
    return 0; 
} 
+0

[鏈接](http://imgur.com/jxFAvWG)下面是結果的圖象形成該代碼。 – BrokenCode

+0

這取決於很多因素。首先,您可以應用哪些限制條件?例如,你可以假設尺度和旋轉的不變性? (您的示例顯示如此,但情況總是如此?)您可能需要查看互相關係數(標準化後進行縮放)和模板匹配。 – gavinb

+0

比例和旋轉不影響算法或匹配。只要我獲得更好的結果,我不在乎我使用什麼。我現在使用的可以拍攝任何圖像,並從整個場景中挑選出確切的圖像。我需要一些可以選擇接近但不是圖像的完全複製品的東西。我正在使用的圖書館可以完美地用於實時視頻拍攝和追蹤任何事情。如果你給它一張有人臉的圖像,它可以在他們四處走動時跟蹤他們的臉。理論上這應該很好地匹配兩個圖像。我只是無法接近足夠的困難。 – BrokenCode

回答

0

該圖像看起來很像人造髖關節。如果你正在處理醫學圖像,你一定要檢查出The Insight Toolkit(ITK),它有很多特殊功能是爲了這個領域的特殊需要而設計的。您可以在真實圖像和模板數據之間進行簡單的模型 - 圖像配準,以找到最佳結果。我認爲用這種方法比使用上述基於點的測試可以得到更好的結果。

這種註冊執行一系列參數(在這種情況下,仿射變換)的迭代優化,其試圖找到模型與圖像數據的最佳映射。

上面的例子採用一個固定的圖像,並試圖找到一個變換的運動圖像映射到其上。變換是一個2D仿射變換(在這種情況下是旋轉和平移),其參數是運行優化器的結果,可以最大化匹配度量。度量指標測量固定圖像和變換運動圖像的匹配程度。內插器是將運動圖像並應用變換將其映射到固定圖像上的內容。

在您的示例圖像中,固定圖像可能是original X-ray,移動圖像actual implant。由於兩者的大小不同,您可能需要添加縮放以進行完整的仿射變換。

該度量標準是對轉換後的運動圖像與固定圖像匹配程度的度量,因此您需要確定容差或最小度量標準以使匹配有效。如果圖像差別很大,則度量標準會很低,可以拒絕。

輸出是一組變換參數,輸出圖像是應用於運動圖像(不是圖像組合)的最終最優變換。結果基本上告訴你在X射線中發現植入物的位置。

+0

也許你可以提供一個工作示例?這個工具包看起來並不像我需要的東西。我可以忽略一些東西,但我只能使用2d圖像,而這似乎沒有辦法存儲和匹配大量的圖像數據。 – BrokenCode

+0

我已經在上面提供的鏈接是一個完整的示例。該工具包適用於2D和3D醫學圖像,絕對可用於處理大量數據。它是專門爲NHS和主要大學和研究團隊合作開發的,用於醫學圖像處理,並且重點關注註冊,這是您似乎正試圖解決的問題。查看ITK手冊,瞭解算法的完整說明。 – gavinb

+0

有趣。我現在已經看過書1;特別是3.6.5中心仿射變換。我不確定你提供的例子的預期目的實際上是什麼。從代碼和文本的插入中,看起來輸出實際上是組合圖像的變換。不幸的是,我沒有找到確定兩幅圖像之間最佳匹配的方法。您建議的示例似乎會生成一張圖像,顯示圖像中不相似​​的部分。我會測試這個代碼來驗證我的interu – BrokenCode

0
  • 請嘗試下面的code.Hope這將幫助你。

    #include <opencv2/nonfree/nonfree.hpp> 
    #include <iostream> 
    #include <dirent.h> 
    #include <ctime> 
    #include <stdio.h> 
    using namespace cv; 
    using namespace std; 
    
    int main(int argc, const char *argv[]) 
    { 
        double ratio = 0.9; 
    
        Mat image1 = imread("Image1_path); 
        Mat image2 = cv::imread("Image2_path"); 
    
        Ptr<FeatureDetector> detector; 
        Ptr<DescriptorExtractor> extractor; 
    
        // TODO default is 500 keypoints..but we can change 
        detector = FeatureDetector::create("ORB"); 
        extractor = DescriptorExtractor::create("ORB"); 
    
        vector<KeyPoint> keypoints1, keypoints2; 
        detector->detect(image1, keypoints1); 
        detector->detect(image2, keypoints2); 
    
        cout << "# keypoints of image1 :" << keypoints1.size() << endl; 
        cout << "# keypoints of image2 :" << keypoints2.size() << endl; 
    
        Mat descriptors1,descriptors2; 
        extractor->compute(image1,keypoints1,descriptors1); 
        extractor->compute(image2,keypoints2,descriptors2); 
    
        cout << "Descriptors size :" << descriptors1.cols << ":"<< descriptors1.rows << endl; 
    
        vector< vector<DMatch> > matches12, matches21; 
        Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming"); 
        matcher->knnMatch(descriptors1, descriptors2, matches12, 2); 
        matcher->knnMatch(descriptors2, descriptors1, matches21, 2); 
    
        //BFMatcher bfmatcher(NORM_L2, true); 
        //vector<DMatch> matches; 
        //bfmatcher.match(descriptors1, descriptors2, matches); 
        double max_dist = 0; double min_dist = 100; 
        for(int i = 0; i < descriptors1.rows; i++) 
        { 
         double dist = matches12[i].data()->distance; 
         if(dist < min_dist) 
         min_dist = dist; 
         if(dist > max_dist) 
         max_dist = dist; 
        } 
        printf("-- Max dist : %f \n", max_dist); 
        printf("-- Min dist : %f \n", min_dist); 
        cout << "Matches1-2:" << matches12.size() << endl; 
        cout << "Matches2-1:" << matches21.size() << endl; 
    
        std::vector<DMatch> good_matches1, good_matches2; 
        for(int i=0; i < matches12.size(); i++) 
        { 
         if(matches12[i][0].distance < ratio * matches12[i][1].distance) 
         good_matches1.push_back(matches12[i][0]); 
        } 
    
        for(int i=0; i < matches21.size(); i++) 
        { 
         if(matches21[i][0].distance < ratio * matches21[i][1].distance) 
         good_matches2.push_back(matches21[i][0]); 
        } 
    
        cout << "Good matches1:" << good_matches1.size() << endl; 
        cout << "Good matches2:" << good_matches2.size() << endl; 
    
    // Symmetric Test 
    std::vector<DMatch> better_matches; 
    for(int i=0; i<good_matches1.size(); i++) 
    { 
        for(int j=0; j<good_matches2.size(); j++) 
        { 
         if(good_matches1[i].queryIdx == good_matches2[j].trainIdx && good_matches2[j].queryIdx == good_matches1[i].trainIdx) 
         { 
          better_matches.push_back(DMatch(good_matches1[i].queryIdx, good_matches1[i].trainIdx, good_matches1[i].distance)); 
         break; 
         } 
        } 
    } 
    
    cout << "Better matches:" << better_matches.size() << endl; 
    double elapsed_secs = double(end - begin)/CLOCKS_PER_SEC; 
    
    // show it on an image 
    Mat output; 
    drawMatches(image1, keypoints1, image2, keypoints2, better_matches, output); 
    imshow("Matches result",output); 
    waitKey(0); 
    
    return 0; 
    } 
    
+0

我將整合該代碼。我還沒有嘗試過使用「knnMatch」之前的任何東西,我不知道這將如何工作。謝謝。 – BrokenCode

+0

謝謝。後來我開始學習機器。 :) – BrokenCode

相關問題