2012-06-26 71 views
0

這裏是我的代碼實現樣品緩衝液的委託方法iphone

- (void)captureOutput:(AVCaptureOutput *)captureOutput 
didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer 
    fromConnection:(AVCaptureConnection *)connection 
{ 
    // Create a UIImage from the sample buffer data 
    UIImage *image = [self imageFromSampleBuffer:sampleBuffer]; 
    dispatch_async(dispatch_get_main_queue(), ^{ 
     [[self imgView] setImage:image]; 
    }); 
} 

從樣本緩衝區中的數據創建一個UIImage

- (UIImage *) imageFromSampleBuffer:(CMSampleBufferRef) sampleBuffer 
{ 

    CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer); 
    // Lock the base address of the pixel buffer. 
    CVPixelBufferLockBaseAddress(imageBuffer,0); 

    // Get the number of bytes per row for the pixel buffer. 
    size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer); 
    // Get the pixel buffer width and height. 
    size_t width = CVPixelBufferGetWidth(imageBuffer); 
    size_t height = CVPixelBufferGetHeight(imageBuffer); 

    // Create a device-dependent RGB color space. 
    static CGColorSpaceRef colorSpace = NULL; 
    if (colorSpace == NULL) { 
     colorSpace = CGColorSpaceCreateDeviceRGB(); 
    if (colorSpace == NULL) { 
      // Handle the error appropriately. 
      return nil; 
     } 
    } 

    // Get the base address of the pixel buffer. 
    uint8_t *baseAddress = malloc(bytesPerRow * height);  
    memcpy(baseAddress, CVPixelBufferGetBaseAddress(imageBuffer), bytesPerRow * height); 

    // Get the data size for contiguous planes of the pixel buffer. 
    size_t bufferSize = CVPixelBufferGetDataSize(imageBuffer); 

    // Create a Quartz direct-access data provider that uses data we supply. 
    CGDataProviderRef dataProvider = CGDataProviderCreateWithData(NULL, baseAddress, bufferSize, NULL); 
    // Create a bitmap image from data supplied by the data provider. 
    CGImageRef cgImage = CGImageCreate(width, height, 8, 32, bytesPerRow, 
       colorSpace, kCGImageAlphaNoneSkipFirst | kCGBitmapByteOrder32Little, dataProvider, NULL, true, kCGRenderingIntentDefault); 

    CGDataProviderRelease(dataProvider); 

    // Create and return an image object to represent the Quartz image. 
    UIImage *image = [UIImage imageWithCGImage:cgImage]; 
    CGImageRelease(cgImage); 
    free(baseAddress); 
    CVPixelBufferUnlockBaseAddress(imageBuffer, 0); 

    return image; 
} 

不過我在我的圖像視圖沒有得到圖像誰能幫我找錯。

在此先感謝。

回答

3

這是使我的工作代碼:

- (void)captureOutput:(AVCaptureOutput *)captureOutput 
didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer 
    fromConnection:(AVCaptureConnection *)connection 
{ 
// Create a UIImage from the sample buffer data 
UIImage *img = [self imageFromSampleBuffer:sampleBuffer]; 
dispatch_async(dispatch_get_main_queue(), ^{ 

    self.imgView.image=img; 
}); 

} 
// Create a UIImage from sample buffer data 
- (UIImage *) imageFromSampleBuffer:(CMSampleBufferRef) sampleBuffer 
{ 

// Get a CMSampleBuffer's Core Video image buffer for the media data 
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer); 
// Lock the base address of the pixel buffer 
CVPixelBufferLockBaseAddress(imageBuffer, 0); 

// Get the number of bytes per row for the pixel buffer 
size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer); 
// Get the pixel buffer width and height 
size_t width = CVPixelBufferGetWidth(imageBuffer); 
size_t height = CVPixelBufferGetHeight(imageBuffer); 


// Get the number of bytes per row for the pixel buffer 
u_int8_t *baseAddress = (u_int8_t *)malloc(bytesPerRow*height); 
memcpy(baseAddress, CVPixelBufferGetBaseAddress(imageBuffer), bytesPerRow * height ); 

// size_t bufferSize = CVPixelBufferGetDataSize(imageBuffer); 

// Create a device-dependent RGB color space 
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB(); 

// Create a bitmap graphics context with the sample buffer data 

//The context draws into a bitmap which is `width' 
// pixels wide and `height' pixels high. The number of components for each 
//  pixel is specified by `space' 
CGContextRef context = CGBitmapContextCreate(baseAddress, width, height, 8, 
              bytesPerRow, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaNoneSkipFirst); 

// Create a Quartz image from the pixel data in the bitmap graphics context 
CGImageRef quartzImage = CGBitmapContextCreateImage(context); 

// Unlock the pixel buffer 
CVPixelBufferUnlockBaseAddress(imageBuffer,0); 


// Free up the context and color space 
CGContextRelease(context); 
CGColorSpaceRelease(colorSpace); 

// Create an image object from the Quartz image 
UIImage *image = [UIImage imageWithCGImage:quartzImage scale:1.0 orientation:UIImageOrientationRight]; 

free(baseAddress); 
// Release the Quartz image 
CGImageRelease(quartzImage); 


return (image); 
} 
-2

嘗試使用

UIImage *image = [[UIImage alloc] initWithCGImage:]; 

,而不是

UIImage *image = [UIImage imageWithCGImage:cgImage]; 
+0

我有點困惑這個建議 - 你會做什麼改變? – Monolo

0

試試這個:

- (UIImage *) imageFromSampleBuffer:(CMSampleBufferRef) sampleBuffer 
{ 
    CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer); 
    CVPixelBufferLockBaseAddress(imageBuffer, 0); 

    uint8_t *baseAddress = (uint8_t *)CVPixelBufferGetBaseAddress(imageBuffer); 
    size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer); 
    size_t width = CVPixelBufferGetWidth(imageBuffer); 
    size_t height = CVPixelBufferGetHeight(imageBuffer); 

    CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB(); 
    CGContextRef newContext = CGBitmapContextCreate(baseAddress, width, height, 8, bytesPerRow, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst); 
    CGImageRef newImage = CGBitmapContextCreateImage(newContext); 

    CGContextRelease(newContext); 
    CGColorSpaceRelease(colorSpace); 

    return [UIImage imageWithCGImage:newImage]; 

    CGImageRelease(newImage); 
    CVPixelBufferUnlockBaseAddress(imageBuffer, 0); 
} 
0

使用GLKView加快渲染;然後,將此代碼插入您的代理方法中:

- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection 
{ 
    CMFormatDescriptionRef formatDesc = CMSampleBufferGetFormatDescription(sampleBuffer); 

    // update the video dimensions information 
    _currentVideoDimensions = CMVideoFormatDescriptionGetDimensions(formatDesc); 

    CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer); 

// If you want to apply filters to your UIImage before you display it... 

    CIImage *tempImage = [CIImage imageWithCVPixelBuffer:(CVPixelBufferRef)imageBuffer options:nil]; 
    CIFilter *filter = [CIFilter filterWithName: @"CIExposureAdjust" keysAndValues:@"inputImage", tempImage, nil]; 
    [filter setValue:@(self.tag) forKey:@"inputEV"]; 
    CIFilter *filter2 = [CIFilter filterWithName: @"CILinearToSRGBToneCurve" keysAndValues:@"inputImage", [filter valueForKey: @"outputImage"], nil]; 
    CIFilter *filter3 = [CIFilter filterWithName: @"CIGammaAdjust" keysAndValues:@"inputImage", [filter2 valueForKey: @"outputImage"], nil]; 
    [filter3 setValue:@(self.tag) forKey:@"inputPower"]; 
    CIImage *sourceImage = [filter3 valueForKey: @"outputImage"]; 

    CGRect sourceExtent = sourceImage.extent; 

    CGFloat sourceAspect = sourceExtent.size.width/sourceExtent.size.height; 
    CGFloat previewAspect = _videoPreviewViewBounds.size.width/_videoPreviewViewBounds.size.height; 

    // we want to maintain the aspect radio of the screen size, so we clip the video image 
    CGRect drawRect = sourceExtent; 
    if (sourceAspect > previewAspect) 
    { 
     // use full height of the video image, and center crop the width 
     drawRect.origin.x += (drawRect.size.width - drawRect.size.height * previewAspect)/2.0; 
     drawRect.size.width = drawRect.size.height * previewAspect; 
    } 
    else 
    { 
     // use full width of the video image, and center crop the height 
     drawRect.origin.y += (drawRect.size.height - drawRect.size.width/previewAspect)/2.0; 
     drawRect.size.height = drawRect.size.width/previewAspect; 
    } 

    [_videoPreviewView bindDrawable]; 

    if (_eaglContext != [EAGLContext currentContext]) 
     [EAGLContext setCurrentContext:_eaglContext]; 

    // clear eagl view to grey 
    glClearColor(0.5, 0.5, 0.5, 1.0); 
    glClear(GL_COLOR_BUFFER_BIT); 

    // set the blend mode to "source over" so that CI will use that 
    glEnable(GL_BLEND); 
    glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA); 

    [_ciContext drawImage:sourceImage inRect:_videoPreviewViewBounds fromRect:drawRect]; 

    [_videoPreviewView display]; 
} 

您可以隨時將CIImage輕鬆轉換爲UIImage;但爲什麼?通過渲染圖像轉化爲EAGLContext,你得到的GPU加速的圖像顯示。我想不出爲什麼你需要UIImage的,除非這是你知道的唯一途徑。

注意,該代碼可以減少到只有幾行,但很少有誰需要什麼不足的方式正確時,他們正在與樣品緩衝工作從視頻文件中定向和顏色幀。這涵蓋了你需要的一切,如果是這樣的話。 (我想你會發現很難得到從樣本緩衝區正確定位自身產生的UIImage的...)