2012-10-19 231 views
4

我一直在嘗試將深度流與我的Kinect中的RGB流對齊很長一段時間。我已經閱讀了幾篇關於它的文章,但我必須錯過一些關鍵點,因爲我無法讓它工作......將深度圖像與RGB圖像對齊

這是我設法做的一個圖像,我畫了一些容易察覺錯位 Kinect Depth misalignment

我一直試圖讓這個分解成小碼越好,但它仍然代碼塊好所以請原諒,下面的代碼片段是什麼Kinect的SDK調用每次有深度和RGB框架準備就緒。

正如你可以看到我一直在嘗試與

ColorImagePoint colorpoint = _Sensor.CoordinateMapper.MapDepthPointToColorPoint(DepthImageFormat.Resolution640x480Fps30, depthpoint, ColorImageFormat.RgbResolution640x480Fps30); 

我寧願使用CoordinateMapper.MapDepthFrameToColorFrame(因爲這應該可以解決這個問題),但我無法得到它的工作。 。我可能沒有這樣做雖然正確..

我使用微軟的Kinect SDK 1.6

private void EventAllFramesReady(Object Sender, AllFramesReadyEventArgs e) 
{ 
    System.Drawing.Color color; 

    Bitmap image = null; 
    Bitmap depth = null; 

    using (ColorImageFrame colorFrame = e.OpenColorImageFrame()) 
    { 
     using (DepthImageFrame depthFrame = e.OpenDepthImageFrame()) 
     { 
      // color 
      image = new Bitmap(colorFrame.Width, colorFrame.Height); 
      byte[] colorPixels = new byte[colorFrame.PixelDataLength]; 
      colorFrame.CopyPixelDataTo(colorPixels); 

      //lock bitmap, and work with BitmapData (way faster than SetPixel()) 
      BitmapData imageBitmapData = image.LockBits(new Rectangle(0, 0, image.Width, image.Height), 
               ImageLockMode.WriteOnly, 
               image.PixelFormat); 
      IntPtr IptrImage = imageBitmapData.Scan0; 
      byte[] PixelsImage = new byte[image.Width * image.Height * 4]; 


      // depth 
      depth = new Bitmap(depthFrame.Width, depthFrame.Height); 
      DepthImagePixel[] depthData = new DepthImagePixel[depthFrame.PixelDataLength]; 
      depthFrame.CopyDepthImagePixelDataTo(depthData); 

      //lock bitmap, and work with BitmapData (way faster than SetPixel()) 
      BitmapData depthBitmapData = depth.LockBits(new Rectangle(0, 0, depth.Width, depth.Height), 
               ImageLockMode.WriteOnly, 
               depth.PixelFormat); 
      IntPtr IptrDepth = depthBitmapData.Scan0; 
      byte[] PixelsDepth = new byte[depth.Width * depth.Height * 4]; 


      DepthImagePoint depthpoint = new DepthImagePoint(); 

      for (int x = 1; x < colorFrame.Width; x++) 
      { 
       for (int y = 1; y < colorFrame.Height; y++) 
       { 
        int i = ((y * image.Width) + x) * 4; 

        short depthdistanceRAW = (depthData[x + y * depth.Width]).Depth; 

        // convert distance value into a color 
        color = System.Drawing.Color.Pink; 
        if (depthdistanceRAW > 0 && depthdistanceRAW <= 4000) 
        { 
         int depthdistance = (int)((depthdistanceRAW/4090f) * 255f); 
         color = System.Drawing.Color.FromArgb((int)(depthdistance/2f), depthdistance, (int)(depthdistance * 0.7f)); 
        } 

        depthpoint.X = x; 
        depthpoint.Y = y; 
        depthpoint.Depth = depthdistanceRAW; 

        ColorImagePoint colorpoint = _Sensor.CoordinateMapper.MapDepthPointToColorPoint(DepthImageFormat.Resolution640x480Fps30, depthpoint, ColorImageFormat.RgbResolution640x480Fps30); 

        //if (colorpoint.X > 0 && colorpoint.X <= 640 && colorpoint.Y > 0 && colorpoint.Y <= 480) 
        //{ 
        int adjustedposition = ((colorpoint.Y * image.Width) + colorpoint.X) * 4; 
        //if (adjustedposition < depthData.Length) 
        //{ 
        PixelsDepth[i] = color.B; 
        PixelsDepth[i + 1] = color.G; 
        PixelsDepth[i + 2] = color.R; 
        PixelsDepth[i + 3] = DepthTransparency; 
        //} 
        //} 

        PixelsImage[i] = colorPixels[i]; 
        PixelsImage[i + 1] = colorPixels[i + 1]; 
        PixelsImage[i + 2] = colorPixels[i + 2]; 
        PixelsImage[i + 3] = 255; 
       } 
      } 

      Marshal.Copy(PixelsImage, 0, IptrImage, PixelsImage.Length); 
      image.UnlockBits(imageBitmapData); 

      Marshal.Copy(PixelsDepth, 0, IptrDepth, PixelsDepth.Length); 
      depth.UnlockBits(depthBitmapData); 
     } 
    } 

    _kf.UpdateImage(image); // update the RGB picture in the form 
    _kf.UpdateDepth(depth); // update the Depth picture in the form 
} 
+0

我編輯了您的標題。請參閱:「[應該在其標題中包含」標籤「](http://meta.stackexchange.com/questions/19190/)」,其中的共識是「不,他們不應該」。 –

+0

正式注意,謝謝:) – JensB

回答

0

我張貼的答案,一個similar question here

Coordinate Mapping Basics-WPF C# Sample顯示如何使用此方法。您可以在以下代碼中找到一些重要部分:

// Intermediate storage for the depth data received from the sensor 
private DepthImagePixel[] depthPixels; 
// Intermediate storage for the color data received from the camera 
private byte[] colorPixels; 
// Intermediate storage for the depth to color mapping 
private ColorImagePoint[] colorCoordinates; 
// Inverse scaling factor between color and depth 
private int colorToDepthDivisor; 
// Format we will use for the depth stream 
private const DepthImageFormat DepthFormat = DepthImageFormat.Resolution320x240Fps30; 
// Format we will use for the color stream 
private const ColorImageFormat ColorFormat = ColorImageFormat.RgbResolution640x480Fps30; 

//... 

// Initialization 
this.colorCoordinates = new ColorImagePoint[this.sensor.DepthStream.FramePixelDataLength]; 
this.depthWidth = this.sensor.DepthStream.FrameWidth; 
this.depthHeight = this.sensor.DepthStream.FrameHeight; 
int colorWidth = this.sensor.ColorStream.FrameWidth; 
int colorHeight = this.sensor.ColorStream.FrameHeight; 
this.colorToDepthDivisor = colorWidth/this.depthWidth; 
this.sensor.AllFramesReady += this.SensorAllFramesReady; 

//... 

private void SensorAllFramesReady(object sender, AllFramesReadyEventArgs e) 
{ 
    // in the middle of shutting down, so nothing to do 
    if (null == this.sensor) 
    { 
     return; 
    } 

    bool depthReceived = false; 
    bool colorReceived = false; 

    using (DepthImageFrame depthFrame = e.OpenDepthImageFrame()) 
    { 
     if (null != depthFrame) 
     { 
      // Copy the pixel data from the image to a temporary array 
      depthFrame.CopyDepthImagePixelDataTo(this.depthPixels); 

      depthReceived = true; 
     } 
    } 

    using (ColorImageFrame colorFrame = e.OpenColorImageFrame()) 
    { 
     if (null != colorFrame) 
     { 
      // Copy the pixel data from the image to a temporary array 
      colorFrame.CopyPixelDataTo(this.colorPixels); 

      colorReceived = true; 
     } 
    } 

    if (true == depthReceived) 
    { 
     this.sensor.CoordinateMapper.MapDepthFrameToColorFrame(
      DepthFormat, 
      this.depthPixels, 
      ColorFormat, 
      this.colorCoordinates); 

     // ... 

     int depthIndex = x + (y * this.depthWidth); 
     DepthImagePixel depthPixel = this.depthPixels[depthIndex]; 

     // scale color coordinates to depth resolution 
     int X = colorImagePoint.X/this.colorToDepthDivisor; 
     int Y = colorImagePoint.Y/this.colorToDepthDivisor; 

     // depthPixel is the depth for the (X,Y) pixel in the color frame 
    } 
}