2012-03-16 319 views
9

我一直在使用EMGU CV庫中的SURF特徵檢測示例。EMGU CV SURF圖像匹配

到目前爲止,它的工作令人驚訝;我可以檢測到兩張給定圖像之間的匹配對象,但是當圖像不匹配時,我遇到了一個問題。

我一直在尋找來自論壇的支持,但他們從我所在的位置下來。誰會知道哪些參數決定圖像是否匹配。當我用兩張不匹配的圖像進行測試時,代碼仍會繼續進行,就好像有匹配一樣,即使沒有匹配,也會在圖像的隨機位置繪製模糊的粗紅線。

如果沒有匹配,我希望從代碼中突破並且不要繼續。

附錄:

 static void Run() 
     { 
      Image<Gray, Byte> modelImage = new Image<Gray, byte>("HatersGonnaHate.png"); 
     Image<Gray, Byte> observedImage = new Image<Gray, byte>("box_in_scene.png"); 
     Stopwatch watch; 
     HomographyMatrix homography = null; 

     SURFDetector surfCPU = new SURFDetector(500, false); 

     VectorOfKeyPoint modelKeyPoints; 
     VectorOfKeyPoint observedKeyPoints; 
     Matrix<int> indices; 
     Matrix<float> dist; 
     Matrix<byte> mask; 

     if (GpuInvoke.HasCuda) 
     { 
      GpuSURFDetector surfGPU = new GpuSURFDetector(surfCPU.SURFParams, 0.01f); 
      using (GpuImage<Gray, Byte> gpuModelImage = new GpuImage<Gray, byte>(modelImage)) 
      //extract features from the object image 
      using (GpuMat<float> gpuModelKeyPoints = surfGPU.DetectKeyPointsRaw(gpuModelImage, null)) 
      using (GpuMat<float> gpuModelDescriptors = surfGPU.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints)) 
      using (GpuBruteForceMatcher matcher = new GpuBruteForceMatcher(GpuBruteForceMatcher.DistanceType.L2)) 
      { 
       modelKeyPoints = new VectorOfKeyPoint(); 
       surfGPU.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints); 
       watch = Stopwatch.StartNew(); 

       // extract features from the observed image 
       using (GpuImage<Gray, Byte> gpuObservedImage = new GpuImage<Gray, byte>(observedImage)) 
       using (GpuMat<float> gpuObservedKeyPoints = surfGPU.DetectKeyPointsRaw(gpuObservedImage, null)) 
       using (GpuMat<float> gpuObservedDescriptors = surfGPU.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints)) 
       using (GpuMat<int> gpuMatchIndices = new GpuMat<int>(gpuObservedDescriptors.Size.Height, 2, 1)) 
       using (GpuMat<float> gpuMatchDist = new GpuMat<float>(gpuMatchIndices.Size, 1)) 
       { 
        observedKeyPoints = new VectorOfKeyPoint(); 
        surfGPU.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints); 

        matcher.KnnMatch(gpuObservedDescriptors, gpuModelDescriptors, gpuMatchIndices, gpuMatchDist, 2, null); 

        indices = new Matrix<int>(gpuMatchIndices.Size); 
        dist = new Matrix<float>(indices.Size); 
        gpuMatchIndices.Download(indices); 
        gpuMatchDist.Download(dist); 

        mask = new Matrix<byte>(dist.Rows, 1); 

        mask.SetValue(255); 

        Features2DTracker.VoteForUniqueness(dist, 0.8, mask); 

        int nonZeroCount = CvInvoke.cvCountNonZero(mask); 
        if (nonZeroCount >= 4) 
        { 
        nonZeroCount = Features2DTracker.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20); 
        if (nonZeroCount >= 4) 
         homography = Features2DTracker.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 3); 
        } 

        watch.Stop(); 
       } 
      } 
     } 
     else 
     { 
      //extract features from the object image 
      modelKeyPoints = surfCPU.DetectKeyPointsRaw(modelImage, null); 
      //MKeyPoint[] kpts = modelKeyPoints.ToArray(); 
      Matrix<float> modelDescriptors = surfCPU.ComputeDescriptorsRaw(modelImage, null, modelKeyPoints); 

      watch = Stopwatch.StartNew(); 

      // extract features from the observed image 
      observedKeyPoints = surfCPU.DetectKeyPointsRaw(observedImage, null); 
      Matrix<float> observedDescriptors = surfCPU.ComputeDescriptorsRaw(observedImage, null, observedKeyPoints); 

      BruteForceMatcher matcher = new BruteForceMatcher(BruteForceMatcher.DistanceType.L2F32); 
      matcher.Add(modelDescriptors); 
      int k = 2; 
      indices = new Matrix<int>(observedDescriptors.Rows, k); 
      dist = new Matrix<float>(observedDescriptors.Rows, k); 
      matcher.KnnMatch(observedDescriptors, indices, dist, k, null); 

      mask = new Matrix<byte>(dist.Rows, 1); 

      mask.SetValue(255); 

      Features2DTracker.VoteForUniqueness(dist, 0.8, mask); 

      int nonZeroCount = CvInvoke.cvCountNonZero(mask); 
      if (nonZeroCount >= 4) 
      { 
       nonZeroCount = Features2DTracker.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20); 
       if (nonZeroCount >= 4) 
        homography = Features2DTracker.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 3); 
      } 

      watch.Stop(); 
     } 

     //Draw the matched keypoints 
     Image<Bgr, Byte> result = Features2DTracker.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints, 
      indices, new Bgr(255, 255, 255), new Bgr(255, 255, 255), mask, Features2DTracker.KeypointDrawType.NOT_DRAW_SINGLE_POINTS); 

     #region draw the projected region on the image 
     if (homography != null) 
     { //draw a rectangle along the projected model 
      Rectangle rect = modelImage.ROI; 
      PointF[] pts = new PointF[] { 
       new PointF(rect.Left, rect.Bottom), 
       new PointF(rect.Right, rect.Bottom), 
       new PointF(rect.Right, rect.Top), 
       new PointF(rect.Left, rect.Top)}; 
      homography.ProjectPoints(pts); 

      result.DrawPolyline(Array.ConvertAll<PointF, Point>(pts, Point.Round), true, new Bgr(Color.Red), 5); 
     } 
     #endregion 

     ImageViewer.Show(result, String.Format("Matched using {0} in {1} milliseconds", GpuInvoke.HasCuda ? "GPU" : "CPU", watch.ElapsedMilliseconds)); 
     } 


    } 

} 

`

+0

附加說明:爲了更清楚,當2張圖像不匹配時,我希望停止執行並檢查另一張圖像。 – user1246856 2012-03-16 22:01:45

+3

更新:我想我解決了這個問題。我剛減少了唯一性閾值: Features2DTracker.VoteForUniqueness(dist,0.8,mask); 從0.8變爲0.5。工作正常。 – user1246856 2012-03-17 09:42:24

+0

你能寫出你如何解答答案?謝謝 – 2012-03-28 08:20:29

回答

1

我不知道是否有一個適合圖像序列或所有幾何變形的所有情況的方法。

我建議你計算兩幅圖像之間的PSNR,並研究圖像序列的容差閾值。