2016-03-28 197 views
4

我使用Emgu CV的SURF特徵識別圖像中的相似對象。Emgu CV SURF獲取匹配的點座標

圖像繪製在右側,顯示所有關鍵點發現,在這兩個圖像,類似點(這是我想)和矩形(通常爲矩形,有時只是一條線)覆蓋類似的點。

的問題是相似點在圖像中看到的,但它們不是保存在我想要的,其實,它們存儲在VectorOfKeyPoint對象,只存儲一個指針的格式,和其他內存數據,點存儲在內存中(這就是我的想法)。意思是,我不能讓相似點像對:

((img1X,img1Y),(img2X,img2Y))

這將是我在尋找什麼的,所以我可以稍後使用這些分數。 現在,我只能看到結果圖像中的點,但我無法將它們成對地配對。

我使用的代碼是Emgu CV的示例。

//---------------------------------------------------------------------------- 
// Copyright (C) 2004-2016 by EMGU Corporation. All rights reserved.  
//---------------------------------------------------------------------------- 
using System; 
using System.Collections.Generic; 
using System.Diagnostics; 
using System.Drawing; 
using System.Runtime.InteropServices; 
using Emgu.CV; 
using Emgu.CV.CvEnum; 
using Emgu.CV.Features2D; 
using Emgu.CV.Structure; 
using Emgu.CV.Util; 
#if !__IOS__ 
using Emgu.CV.Cuda; 
#endif 
using Emgu.CV.XFeatures2D; 

namespace FirstEmgu 
{ 

    public static class DrawMatches 
    { 
    // -------------------------------- 
    // ORIGINAL FUNCTION FROM EXAMPLE 
    // -------------------------------- 
     private static void FindMatch(Mat modelImage, Mat observedImage, out long matchTime, out VectorOfKeyPoint modelKeyPoints, out VectorOfKeyPoint observedKeyPoints, VectorOfVectorOfDMatch matches, out Mat mask, out Mat homography) 
     { 
      int k = 2; 
      double uniquenessThreshold = 0.8; 
      double hessianThresh = 300; 

      Stopwatch watch; 
      homography = null; 

      modelKeyPoints = new VectorOfKeyPoint(); 
      observedKeyPoints = new VectorOfKeyPoint(); 

#if !__IOS__ 
      if (CudaInvoke.HasCuda) 
      { 
       CudaSURF surfCuda = new CudaSURF((float)hessianThresh); 
       using (GpuMat gpuModelImage = new GpuMat(modelImage)) 
       //extract features from the object image 
       using (GpuMat gpuModelKeyPoints = surfCuda.DetectKeyPointsRaw(gpuModelImage, null)) 
       using (GpuMat gpuModelDescriptors = surfCuda.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints)) 
       using (CudaBFMatcher matcher = new CudaBFMatcher(DistanceType.L2)) 
       { 
        surfCuda.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints); 
        watch = Stopwatch.StartNew(); 

        // extract features from the observed image 
        using (GpuMat gpuObservedImage = new GpuMat(observedImage)) 
        using (GpuMat gpuObservedKeyPoints = surfCuda.DetectKeyPointsRaw(gpuObservedImage, null)) 
        using (GpuMat gpuObservedDescriptors = surfCuda.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints)) 
        //using (GpuMat tmp = new GpuMat()) 
        //using (Stream stream = new Stream()) 
        { 
         matcher.KnnMatch(gpuObservedDescriptors, gpuModelDescriptors, matches, k); 

         surfCuda.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints); 

         mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1); 
         mask.SetTo(new MCvScalar(255)); 
         Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask); 

         int nonZeroCount = CvInvoke.CountNonZero(mask); 
         if (nonZeroCount >= 4) 
         { 
          nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, 
           matches, mask, 1.5, 20); 
          if (nonZeroCount >= 4) 
           homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, 
            observedKeyPoints, matches, mask, 2); 
         } 
        } 
        watch.Stop(); 
       } 
      } 
      else 
#endif 
      { 
       using (UMat uModelImage = modelImage.ToUMat(AccessType.Read)) 
       using (UMat uObservedImage = observedImage.ToUMat(AccessType.Read)) 
       { 
        SURF surfCPU = new SURF(hessianThresh); 
        //extract features from the object image 
        UMat modelDescriptors = new UMat(); 
        surfCPU.DetectAndCompute(uModelImage, null, modelKeyPoints, modelDescriptors, false); 

        watch = Stopwatch.StartNew(); 

        // extract features from the observed image 
        UMat observedDescriptors = new UMat(); 
        surfCPU.DetectAndCompute(uObservedImage, null, observedKeyPoints, observedDescriptors, false); 
        BFMatcher matcher = new BFMatcher(DistanceType.L2); 
        matcher.Add(modelDescriptors); 

        matcher.KnnMatch(observedDescriptors, matches, k, null); 
        mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1); 
        mask.SetTo(new MCvScalar(255)); 
        Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask); 

        int nonZeroCount = CvInvoke.CountNonZero(mask); 
        if (nonZeroCount >= 4) 
        { 
         nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, 
          matches, mask, 1.5, 20); 
         if (nonZeroCount >= 4) 
          homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, 
           observedKeyPoints, matches, mask, 2); 
        } 

        watch.Stop(); 
       } 
      } 
      matchTime = watch.ElapsedMilliseconds; 
     } 
     // -------------------------------- 
     // ORIGINAL FUNCTION FROM EXAMPLE 
     // -------------------------------- 
     /// <summary> 
     /// Draw the model image and observed image, the matched features and homography projection. 
     /// </summary> 
     /// <param name="modelImage">The model image</param> 
     /// <param name="observedImage">The observed image</param> 
     /// <param name="matchTime">The output total time for computing the homography matrix.</param> 
     /// <returns>The model image and observed image, the matched features and homography projection.</returns> 
     public static Mat Draw(Mat modelImage, Mat observedImage, out long matchTime) 
     { 
      Mat homography; 
      VectorOfKeyPoint modelKeyPoints; 
      VectorOfKeyPoint observedKeyPoints; 
      using (VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch()) 
      { 
       Mat mask; 
       FindMatch(modelImage, observedImage, out matchTime, out modelKeyPoints, out observedKeyPoints, matches, 
        out mask, out homography); 

       //Draw the matched keypoints 
       Mat result = new Mat(); 
       Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints, 
        matches, result, new MCvScalar(255, 255, 255), new MCvScalar(255, 255, 255), mask); 

       #region draw the projected region on the image 

       if (homography != null) 
       { 
        //draw a rectangle along the projected model 
        Rectangle rect = new Rectangle(Point.Empty, modelImage.Size); 
        PointF[] pts = new PointF[] 
       { 
        new PointF(rect.Left, rect.Bottom), 
        new PointF(rect.Right, rect.Bottom), 
        new PointF(rect.Right, rect.Top), 
        new PointF(rect.Left, rect.Top) 
       }; 
        pts = CvInvoke.PerspectiveTransform(pts, homography); 

        Point[] points = Array.ConvertAll<PointF, Point>(pts, Point.Round); 
        using (VectorOfPoint vp = new VectorOfPoint(points)) 
        { 
         CvInvoke.Polylines(result, vp, true, new MCvScalar(255, 0, 0, 255), 5); 
        } 

       } 

       #endregion 

       return result; 

      } 
     } 

     // ---------------------------------- 
     // WRITTEN BY MYSELF 
     // ---------------------------------- 
     // Returns 4 points (usually rectangle) of similar points 
     // but can't be used, since sometimes this is a line (negative 
     // points) 
     public static Point[] FindPoints(Mat modelImage, Mat observedImage, out long matchTime) 
     { 
      Mat homography; 
      VectorOfKeyPoint modelKeyPoints; 
      VectorOfKeyPoint observedKeyPoints; 
      using (VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch()) 
      { 
       Mat mask; 
       FindMatch(modelImage, observedImage, out matchTime, out modelKeyPoints, out observedKeyPoints, matches, 
        out mask, out homography); 

       //Draw the matched keypoints 
       Mat result = new Mat(); 
       Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints, 
        matches, result, new MCvScalar(255, 255, 255), new MCvScalar(255, 255, 255), mask); 

       Point[] points = null; 
       if (homography != null) 
       { 
        //draw a rectangle along the projected model 
        Rectangle rect = new Rectangle(Point.Empty, modelImage.Size); 
        PointF[] pts = new PointF[] 
       { 
        new PointF(rect.Left, rect.Bottom), 
        new PointF(rect.Right, rect.Bottom), 
        new PointF(rect.Right, rect.Top), 
        new PointF(rect.Left, rect.Top) 
       }; 
        pts = CvInvoke.PerspectiveTransform(pts, homography); 

        points = Array.ConvertAll<PointF, Point>(pts, Point.Round); 

       } 

       return points; 
      } 
     } 
    } 
} 

編輯

我已經設法獲得一些積分了比賽的對象是這樣的:

Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints, 
        matches, result, new MCvScalar(255, 255, 255), new MCvScalar(255, 255, 255), mask); 

       for (int i = 0; i < matches.Size; i++) 
       { 
        var a = matches[i].ToArray(); 
        foreach (var e in a) 
        { 
         Point p = new Point(e.TrainIdx, e.QueryIdx); 
         Console.WriteLine(string.Format("Point: {0}", p)); 
        } 
        Console.WriteLine("-----------------------"); 
       } 

我想,這應該得到我的點。我設法讓它在python中工作,並且代碼沒有什麼不同。問題是返回的點太多。事實上,這將返回箱上Y.所有點

(45,1),(67,1)

(656,2),(77,2)

...

儘管我可能會接近,但它並沒有讓我得到我想要的點數。任何建議表示讚賞。

編輯2 這個問題:Find interest point in surf Detector Algorithm是非常相似的東西,我需要的東西。只有一個答案,但它沒有說明如何獲得匹配的點座標。這就是我所需要的,如果兩個圖像中都有一個對象,則從兩個圖像中獲取對象點的座標。

回答

2

FindMatch函數中,每對點由函數VoteForUniqueness進行驗證。此驗證的結果存儲在mask中。

因此,所有你需要做的是檢查是否匹配驗證或不:

for (int i = 0; i < matches.Size; i++) 
{ 
    var a = matches[i].ToArray(); 
    if (mask.GetData(i)[0] == 0) 
     continue; 
    foreach (var e in a) 
    { 
     Point p = new Point(e.TrainIdx, e.QueryIdx); 
     Console.WriteLine(string.Format("Point: {0}", p)); 
    } 
    Console.WriteLine("-----------------------"); 
} 
3

座標不進行TrainIdx和QueryIdx的,這些都是關鍵點的指數。這將給出模型和觀察圖像之間匹配的像素座標。

for (int i = 0; i < matches.Size; i++) 
{ 
    var arrayOfMatches = matches[i].ToArray(); 
    if (mask.GetData(i)[0] == 0) continue; 
    foreach (var match in arrayOfMatches) 
    { 
     var matchingModelKeyPoint = modelKeyPoints[match.TrainIdx]; 
     var matchingObservedKeyPoint = observedKeyPoints[match.QueryIdx]; 
     Console.WriteLine("Model coordinate '" + matchingModelKeyPoint.Point + "' matches observed coordinate '" + matchingObservedKeyPoint.Point + "'."); 
    } 
} 

arrayOfMatches中的項數等於K的值。我的理解是,最低距離的匹配是最好的。