我做了類似的事情 - 我抓住了委託方法中的像素,製作了它們的CGImageRef,然後將它們分派到正常優先級隊列中,並在那裏進行修改。由於AVFoundation必須爲回調方法使用CADisplayLink,因此它具有最高的優先級。在我的特殊情況下,我沒有抓住所有像素,因此它在30fps的iPhone 4上工作。根據你想要運行的設備,你有像素數,fps等等的權衡。
另一個想法是獲取2個像素子集的冪 - 例如每行和每第四行每隔4個。我再次在20-30fps的應用中做了類似的事情。然後,您可以在派發的塊中進一步操作這個較小的圖像。
如果這看起來令人望而生畏,爲工作代碼提供賞金。
CODE:
// Image is oriented with bottle neck to the left and the bottle bottom on the right
- (void)captureOutput:(AVCaptureVideoDataOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
{
#if 1
AVCaptureDevice *camera = [(AVCaptureDeviceInput *)[captureSession.inputs lastObject] device];
if(camera.adjustingWhiteBalance || camera.adjustingExposure) NSLog(@"GOTCHA: %d %d", camera.adjustingWhiteBalance, camera.adjustingExposure);
printf("foo\n");
#endif
if(saveState != saveOne && saveState != saveAll) return;
@autoreleasepool {
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
//NSLog(@"PE: value=%lld timeScale=%d flags=%x", prStamp.value, prStamp.timescale, prStamp.flags);
/*Lock the image buffer*/
CVPixelBufferLockBaseAddress(imageBuffer,0);
NSRange captureRange;
if(saveState == saveOne) {
#if 0 // B G R A MODE !
NSLog(@"PIXEL_TYPE: 0x%lx", CVPixelBufferGetPixelFormatType(imageBuffer));
uint8_t *newPtr = (uint8_t *)CVPixelBufferGetBaseAddress(imageBuffer);
NSLog(@"ONE VAL %x %x %x %x", newPtr[0], newPtr[1], newPtr[2], newPtr[3]);
}
exit(0);
#endif
[edgeFinder setupImageBuffer:imageBuffer];
BOOL success = [edgeFinder delineate:1];
if(!success) {
dispatch_async(dispatch_get_main_queue(), ^{ edgeFinder = nil; [delegate error]; });
saveState = saveNone;
} else
bottleRange = edgeFinder.sides;
xRange.location = edgeFinder.shoulder;
xRange.length = edgeFinder.bottom - xRange.location;
NSLog(@"bottleRange 1: %@ neck=%d bottom=%d", NSStringFromRange(bottleRange), edgeFinder.shoulder, edgeFinder.bottom);
//searchRows = [edgeFinder expandRange:bottleRange];
rowsPerSwath = lrintf((bottleRange.length*NUM_DEGREES_TO_GRAB)*(float)M_PI/360.0f);
NSLog(@"rowsPerSwath = %d", rowsPerSwath);
saveState = saveIdling;
captureRange = NSMakeRange(0, [WLIPBase numRows]);
dispatch_async(dispatch_get_main_queue(),^
{
[delegate focusDone];
edgeFinder = nil;
captureOutput.alwaysDiscardsLateVideoFrames = YES;
});
} else {
NSInteger rows = rowsPerSwath;
NSInteger newOffset = bottleRange.length - rows;
if(newOffset & 1) {
--newOffset;
++rows;
}
captureRange = NSMakeRange(bottleRange.location + newOffset/2, rows);
}
//NSLog(@"captureRange=%u %u", captureRange.location, captureRange.length);
/*Get information about the image*/
uint8_t *baseAddress = (uint8_t *)CVPixelBufferGetBaseAddress(imageBuffer);
size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
size_t width = CVPixelBufferGetWidth(imageBuffer);
// Note Apple sample code cheats big time - the phone is big endian so this reverses the "apparent" order of bytes
CGContextRef newContext = CGBitmapContextCreate(NULL, width, captureRange.length, 8, bytesPerRow, colorSpace, kCGImageAlphaNoneSkipFirst | kCGBitmapByteOrder32Little); // Video in ARGB format
assert(newContext);
uint8_t *newPtr = (uint8_t *)CGBitmapContextGetData(newContext);
size_t offset = captureRange.location * bytesPerRow;
memcpy(newPtr, baseAddress + offset, captureRange.length * bytesPerRow);
CVPixelBufferUnlockBaseAddress(imageBuffer, 0);
OSAtomicIncrement32(&totalImages);
int32_t curDepth = OSAtomicIncrement32(&queueDepth);
if(curDepth > maxDepth) maxDepth = curDepth;
#define kImageContext @"kImageContext"
#define kState @"kState"
#define kPresTime @"kPresTime"
CMTime prStamp = CMSampleBufferGetPresentationTimeStamp(sampleBuffer); // when it was taken?
//CMTime deStamp = CMSampleBufferGetDecodeTimeStamp(sampleBuffer); // now?
NSDictionary *dict = [NSDictionary dictionaryWithObjectsAndKeys:
[NSValue valueWithBytes:&saveState objCType:@encode(saveImages)], kState,
[NSValue valueWithNonretainedObject:(__bridge id)newContext], kImageContext,
[NSValue valueWithBytes:&prStamp objCType:@encode(CMTime)], kPresTime,
nil ];
dispatch_async(imageQueue,^
{
// could be on any thread now
OSAtomicDecrement32(&queueDepth);
if(!isCancelled) {
saveImages state; [(NSValue *)[dict objectForKey:kState] getValue:&state];
CGContextRef context; [(NSValue *)[dict objectForKey:kImageContext] getValue:&context];
CMTime stamp; [(NSValue *)[dict objectForKey:kPresTime] getValue:&stamp];
CGImageRef newImageRef = CGBitmapContextCreateImage(context);
CGContextRelease(context);
UIImageOrientation orient = state == saveOne ? UIImageOrientationLeft : UIImageOrientationUp;
UIImage *image = [UIImage imageWithCGImage:newImageRef scale:1.0 orientation:orient]; // imageWithCGImage: UIImageOrientationUp UIImageOrientationLeft
CGImageRelease(newImageRef);
NSData *data = UIImagePNGRepresentation(image);
// NSLog(@"STATE:[%d]: value=%lld timeScale=%d flags=%x", state, stamp.value, stamp.timescale, stamp.flags);
{
NSString *name = [NSString stringWithFormat:@"%d.png", num];
NSString *path = [[wlAppDelegate snippetsDirectory] stringByAppendingPathComponent:name];
BOOL ret = [data writeToFile:path atomically:NO];
//NSLog(@"WROTE %d err=%d w/time %f path:%@", num, ret, (double)stamp.value/(double)stamp.timescale, path);
if(!ret) {
++errors;
} else {
dispatch_async(dispatch_get_main_queue(),^
{
if(num) [delegate progress:(CGFloat)num/(CGFloat)(MORE_THAN_ONE_REV * SNAPS_PER_SEC) file:path];
});
}
++num;
}
} else NSLog(@"CANCELLED");
});
}
}
如果您要求如何獲得較小的預覽圖像,同時仍然可以抓取靜態照片,則可以將AVCaptureStillImageOutput和AVCaptureVideoDataOutput添加到捕獲會話中(如果定位iOS 4.3+)。對於正常的視頻播放,您將獲得較小的預覽幀,然後當您觸發照片捕獲方法時,將切換到完整的照片分辨率。這就是我在我的GPUImage框架內部執行的操作:https://github.com/BradLarson/GPUImage,您可以查看SimplePhotoFilter示例應用程序以查看這個實例。 –