2011-10-02 65 views
1

在意識到沒有MonoTouch的實時圖形效果庫之後,我決定編寫我自己的。經過一番研究後,我寫了一個完美的卷積方法,但即使使用不安全的代碼,其速度也非常慢。 我做錯了什麼?是否有一些我錯過的優化?如何在MonoTouch中優化此圖像卷積濾鏡方法?

這裏是我的c#類,任何建議,不管多小,都歡迎!

using System; 
using System.Drawing; 
using MonoTouch.CoreGraphics; 
using System.Runtime.InteropServices; 
using MonoTouch.UIKit; 
using MonoTouch; 

namespace FilterLibrary 
{ 

public class ConvMatrix 
{ 

    public int Factor { get; set; } 
    public int Offset { get; set; } 

    private int[,] _matrix = { {0, 0, 0, 0, 0}, 
           {0, 0, 0, 0, 0}, 
           {0, 0, 1, 0, 0}, 
           {0, 0, 0, 0, 0}, 
           {0, 0, 0, 0, 0} 
          }; 

    public int[,] Matrix 
    { 
     get { return _matrix; } 
     set 
     { 
      _matrix = value; 

      Factor = 0; 
      for (int i = 0; i < Size; i++) 
       for (int j = 0; j < Size; j++) 
        Factor += _matrix[i, j]; 

      if (Factor == 0) 
       Factor = 1; 
     } 
    } 

    private int _size = 5; 
    public int Size 
    { 
     get { return _size; } 
     set 
     { 
      if (value != 1 && value != 3 && value != 5 && value != 7) 
       _size = 5; 
      else 
       _size = value; 
     } 
    } 

    public ConvMatrix() 
    { 
     Offset = 0; 
     Factor = 1; 
    } 
} 

    public class ConvolutionFilter 
{ 
    public ConvolutionFilter() 
    { 
    } 

public static CGImage GaussianSmooth (CGImage image) 
    { 
     ConvMatrix matr = new ConvMatrix(); 
     matr.Matrix = new int[5, 5] { 
           { 1 , 4 , 7 , 4 , 1 }, 
           { 4 ,16 ,26 ,16 , 4 }, 
           { 7 ,26 ,41 ,26 , 7 }, 
           { 4 ,16 ,26 ,16 , 4 }, 
           { 1 , 4 , 7 , 4 , 1 } 
           }; 



     return Filter.ImageConvolution (image, matr); 

    } 


    public static CGImage MotionBlur (CGImage image) 
    { 
     ConvMatrix matr = new ConvMatrix(); 
     matr.Size = 7; 
     matr.Matrix = new int[7, 7] { 
             { 1 , 0 , 0 , 0 , 0 , 0 , 0}, 
             { 0 , 1 , 0 , 0 , 0 , 0 , 0}, 
             { 0 , 0 , 1 , 0 , 0 , 0 , 0}, 
             { 0 , 0 , 0 , 1 , 0 , 0 , 0}, 
             { 0 , 0 , 0 , 0 , 1 , 0 , 0}, 
             { 0 , 0 , 0 , 0 , 0 , 1 , 0}, 
             { 0 , 0 , 0 , 0 , 0 , 0 , 1} 
            }; 

     return Filter.ImageConvolution (image, matr); 
    } 

    public static CGBitmapContext ConvertToBitmapRGBA8 (CGImage imageRef) 
    { 
     // Create an empty bitmap context to draw the uiimage into 
     CGBitmapContext context = NewEmptyBitmapRGBA8ContextFromImage (imageRef); 
     if (context == null) { 
      Console.WriteLine ("ERROR: failed to create bitmap context"); 
      return null; 

     } 
     RectangleF rect = new RectangleF (0.0f, 0.0f, imageRef.Width, imageRef.Height); 
     context.ClearRect (rect); //Clear memory area from old garbage 
     context.DrawImage (rect, imageRef); // Draw image into the context to get the raw image data in our format 
     return context; 
    } 

    public static CGBitmapContext NewEmptyBitmapRGBA8ContextFromImage (CGImage image) 
    { 
     CGBitmapContext context = null; 
     CGColorSpace colorSpace; 
     IntPtr bitmapData; 

     int bitsPerComponent = 8; //Forcing only 8 bit formats for now... 

     int width = image.Width; 
     int height = image.Height; 

     int bytesPerRow = image.BytesPerRow; 
     int bufferLength = bytesPerRow * height; 

     colorSpace = CGColorSpace.CreateDeviceRGB(); 

     if (colorSpace == null) { 
      Console.WriteLine ("Error allocating color space RGB"); 
      return null; 
     } 

     // Allocate memory for image data 
     bitmapData = Marshal.AllocHGlobal (bufferLength); 

     //Create bitmap context forcing Premultiplied Alpha as required by Apple iOS 
     if (image.AlphaInfo == CGImageAlphaInfo.PremultipliedFirst || image.AlphaInfo == CGImageAlphaInfo.First) { 

      context = new CGBitmapContext (bitmapData, 
           width, 
           height, 
           bitsPerComponent, 
           bytesPerRow, 
           colorSpace, 
           CGImageAlphaInfo.PremultipliedFirst); // ARGB 
     } else { 

      if (image.AlphaInfo == CGImageAlphaInfo.PremultipliedLast || image.AlphaInfo == CGImageAlphaInfo.Last) { 

       context = new CGBitmapContext (bitmapData, 
           width, 
           height, 
           bitsPerComponent, 
           bytesPerRow, 
           colorSpace, 
           CGImageAlphaInfo.PremultipliedLast); //RGBA 
      } else { 
       Console.WriteLine ("ERROR image format non supported: " + image.AlphaInfo); 
       throw new Exception ("ERROR image format non supported: " + image.AlphaInfo); 
      } 


     } 

     if (context == null) { 

      Console.WriteLine ("Bitmap context from BitmapData not created"); 
     } 
     return context; 
    } 

    public static CGImage ImageConvolution (CGImage image, ConvMatrix fmat) 
    { 


     //Avoid division by 0 
     if (fmat.Factor == 0) 
      return image; 

     //Create a clone of the original image 
     CGImage srcImage = image.Clone(); 

     //init some temporary vars 
     int x, y, filterx, filtery, tempx, tempy; 
     int s = fmat.Size/2; 
     int a, r, g, b, tr, tg, tb, ta; 
     int a_div; 
     float a_mul; 

     //Compute pixel size (bytes per pixel) 
     int pixelSize = image.BitsPerPixel/image.BitsPerComponent; 

     //Create bitmap contexts 
     CGBitmapContext imageData = ConvertToBitmapRGBA8 (image); 
     CGBitmapContext srcImageData = ConvertToBitmapRGBA8 (srcImage); 

     // Scan0 is the memory address where pixel-array begins. 
     IntPtr scan0 = srcImageData.Data; 
     // Stride is the width of each row of pixels. 
     int stride = srcImageData.BytesPerRow; 


     unsafe { 
      byte* tempPixel; 
      for (y = s; y < srcImageData.Height - s; y++) { 
       for (x = s; x < srcImageData.Width - s; x++) { 
        a = r = g = b = 0; 
        a_div = 0; 
        a_mul = 0.0f; 

        //Convolution 
        for (filtery = 0; filtery < fmat.Size; filtery++) { 
         for (filterx = 0; filterx < fmat.Size; filterx++) { 

          // Get nearby pixel's position 
          tempx = x + filterx - s; 
          tempy = y + filtery - s; 

          // Go to that pixel in pixel-array 
          tempPixel = (byte*)scan0 + (tempy * stride) + (tempx * pixelSize); 

          if (srcImageData.AlphaInfo == CGImageAlphaInfo.First) { 
           // The format is ARGB (1 byte each). 
           ta = (int)*tempPixel; 
           tr = (int)*(tempPixel + 1); 
           tg = (int)*(tempPixel + 2); 
           tb = (int)*(tempPixel + 3); 

           a += fmat.Matrix [filtery, filterx] * ta; 
           r += fmat.Matrix [filtery, filterx] * (tr); 
           g += fmat.Matrix [filtery, filterx] * (tg); 
           b += fmat.Matrix [filtery, filterx] * (tb); 
          } 


          if (srcImageData.AlphaInfo == CGImageAlphaInfo.Last) { 
           // The format is RGBA (1 byte each). 
           tr = (int)*tempPixel; 
           tg = (int)*(tempPixel + 1); 
           tb = (int)*(tempPixel + 2); 
           ta = (int)*(tempPixel + 3); 

           a += fmat.Matrix [filtery, filterx] * ta; 
           r += fmat.Matrix [filtery, filterx] * (tr); 
           g += fmat.Matrix [filtery, filterx] * (tg); 
           b += fmat.Matrix [filtery, filterx] * (tb); 
          } 

          if (srcImageData.AlphaInfo == CGImageAlphaInfo.PremultipliedFirst) { 
           // The format is premultiplied ARGB (1 byte each). 
           ta = (int)*tempPixel; 
           tr = (int)*(tempPixel + 1); 
           tg = (int)*(tempPixel + 2); 
           tb = (int)*(tempPixel + 3); 

           // Computing alpha 
           a += fmat.Matrix [filtery, filterx] * ta; 
           a_div = (ta/255); 

           // Computing rgb 
           if (a_div == 0) { 
            r += fmat.Matrix [filtery, filterx] * (tr); 
            g += fmat.Matrix [filtery, filterx] * (tg); 
            b += fmat.Matrix [filtery, filterx] * (tb); 
           } else { 
            r += fmat.Matrix [filtery, filterx] * (tr/a_div); // "Dividing the premultiplied value by the 
            g += fmat.Matrix [filtery, filterx] * (tg/a_div); // alpha value to get the original color 
            b += fmat.Matrix [filtery, filterx] * (tb/a_div); // value before matrix multiplication" 
           } 

          } 


          if (srcImageData.AlphaInfo == CGImageAlphaInfo.PremultipliedLast) { 
           // The format is premultiplied RGBA (1 byte each). Get em 
           tr = (int)*tempPixel; 
           tg = (int)*(tempPixel + 1); 
           tb = (int)*(tempPixel + 2); 
           ta = (int)*(tempPixel + 3); 

           // Computing alpha 
           a += fmat.Matrix [filtery, filterx] * ta; 
           a_div = (ta/255); 

           // Computing rgb 
           if (a_div == 0) { 
            r += fmat.Matrix [filtery, filterx] * (tr); 
            g += fmat.Matrix [filtery, filterx] * (tg); 
            b += fmat.Matrix [filtery, filterx] * (tb); 
           } else { 

            r += fmat.Matrix [filtery, filterx] * (tr/a_div); // "Dividing the premultiplied value by the 
            g += fmat.Matrix [filtery, filterx] * (tg/a_div); // alpha value to get the original color 
            b += fmat.Matrix [filtery, filterx] * (tb/a_div); // value before matrix multiplication" 
           } 


          } 

         } 
        } 

        // Remove values out of [0,255] 
        a = Math.Min (Math.Max ((a/fmat.Factor) + fmat.Offset, 0), 255); 
        r = Math.Min (Math.Max ((r/fmat.Factor) + fmat.Offset, 0), 255); 
        g = Math.Min (Math.Max ((g/fmat.Factor) + fmat.Offset, 0), 255); 
        b = Math.Min (Math.Max ((b/fmat.Factor) + fmat.Offset, 0), 255); 

        // Premultiplying color value by alpha value if needed by image format 
        if (srcImageData.AlphaInfo == CGImageAlphaInfo.PremultipliedFirst || srcImageData.AlphaInfo == CGImageAlphaInfo.PremultipliedLast) { 
         a_mul = (a/255.0f); 
         r = (int)(r * a_mul); 
         g = (int)(g * a_mul);  
         b = (int)(b * a_mul); 
        } 

        // Finally compute new pixel position (in new image) and write the pixels. 
        if (srcImageData.AlphaInfo == CGImageAlphaInfo.PremultipliedFirst || srcImageData.AlphaInfo == CGImageAlphaInfo.First) { 
         // The format is ARGB (1 byte each) 
         byte* newpixel = (byte*)imageData.Data + (y * imageData.BytesPerRow) + (x * pixelSize); 
         *newpixel = (byte)a; 
         *(newpixel + 1) = (byte)r; 
         *(newpixel + 2) = (byte)g; 
         *(newpixel + 3) = (byte)b; 
        } 


        if (srcImageData.AlphaInfo == CGImageAlphaInfo.PremultipliedLast || srcImageData.AlphaInfo == CGImageAlphaInfo.Last) { 
         // The format is RGBA (1 byte each) 
         byte* newpixel = (byte*)imageData.Data + (y * imageData.BytesPerRow) + (x * pixelSize); 
         *newpixel = (byte)r; 
         *(newpixel + 1) = (byte)g; 
         *(newpixel + 2) = (byte)b; 
         *(newpixel + 3) = (byte)a; 
        } 
       } 
      } 
     } 

     return imageData.ToImage(); 
    } 

} 
} 

回答

0

那麼,有很多可以做到改善代碼,如將所有這些決定移出主循環;在外面做它們,用它們提供一個方法來調用這個循環。

但是,最主要的是要找出是否可以打包核心圖像,這應該是非常快的,因爲它會使用GPU上的着色器來完成。

+0

感謝您的回答。不幸的是,Core Image僅在iOS 5.0上可用,而MonoTouch目前還不支持。 :(我會嘗試從循環中移除這些ifs –

+0

MonoTouch 4.9.x支持iOS5 beta 7,我們希望本週發佈MonoTouch 5.0版本,就在Apple發佈後 – jstedfast

+0

謝謝!好消息! –