1
我正在嘗試編寫一個程序,該程序作爲其功能的一部分,不斷捕獲視頻並實時計算給定幀視頻數據的平均亮度,或儘可能接近實時。這是我第一次進入任何視頻/ iOS相機的東西,所以除了我自己的東西之外,我只是在互聯網上搜集了很多我一直在研究的東西。眼下這段代碼在我ViewController.m文件編譯時,我的設備上運行,但它似乎沒有被做任何事情:iOS:獲取實時(ish)視頻數據
- (void)viewDidLoad{
[super viewDidLoad];
_val = 0;
//Set up the video capture session.
NSLog(@"Setting up the capture session...\n");
captureSession = [[AVCaptureSession alloc] init];
//Add input.
NSLog(@"Adding video input...\n");
AVCaptureDevice *captureDevice = [self frontFacingCameraIfAvailable];
if(captureDevice){
NSError *error;
videoInputDevice = [AVCaptureDeviceInput deviceInputWithDevice:captureDevice error:&error];
if(!error){
if([captureSession canAddInput:videoInputDevice])
[captureSession addInput:videoInputDevice];
else
NSLog(@"Couldn't add video input.\n");
}else{
NSLog(@"Couldn't create video input.\n");
}
}else{
NSLog(@"Couldn't create capture device.\n");
}
//Add output.
NSLog(@"Adding video data output...\n");
vidOutput = [[AVCaptureVideoDataOutput alloc] init];
vidOutput.alwaysDiscardsLateVideoFrames = YES;
if([captureSession canAddOutput:vidOutput])
[captureSession addOutput:vidOutput];
else
NSLog(@"Couldn't add video output.\n");
NSString* key = (NSString*)kCVPixelBufferPixelFormatTypeKey;
NSNumber* value = [NSNumber numberWithUnsignedInt:kCVPixelFormatType_420YpCbCr8BiPlanarFullRange];
NSDictionary* videoSettings = [NSDictionary dictionaryWithObject:value forKey:key];
[vidOutput setVideoSettings:videoSettings];
dispatch_queue_t queue = dispatch_queue_create("MyQueue", NULL);
[vidOutput setSampleBufferDelegate:self queue:queue];
}
- (void)viewDidUnload{
[super viewDidUnload];
// Release any retained subviews of the main view.
}
-(AVCaptureDevice *)frontFacingCameraIfAvailable{
NSArray *videoDevices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo];
AVCaptureDevice *captureDevice = nil;
for (AVCaptureDevice *device in videoDevices){
if (device.position == AVCaptureDevicePositionFront){
captureDevice = device;
break;
}
}
//couldn't find one on the front, so just get the default video device.
if (!captureDevice){
captureDevice = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
}
return captureDevice;
}
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection: (AVCaptureConnection *)connection{
// Create autorelease pool because we are not in the main_queue
@autoreleasepool {
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
// Lock the imagebuffer
CVPixelBufferLockBaseAddress(imageBuffer,0);
// Get information about the image
uint8_t *baseAddress = (uint8_t *)CVPixelBufferGetBaseAddress(imageBuffer);
// size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
size_t width = CVPixelBufferGetWidth(imageBuffer);
size_t height = CVPixelBufferGetHeight(imageBuffer);
size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
CVPlanarPixelBufferInfo_YCbCrBiPlanar *bufferInfo = (CVPlanarPixelBufferInfo_YCbCrBiPlanar *)baseAddress;
// This just moved the pointer past the offset
baseAddress = (uint8_t *)CVPixelBufferGetBaseAddressOfPlane(imageBuffer, 0);
// convert the image
UIImage *image = [self makeImage:baseAddress bufferInfo:bufferInfo width:width height:height bytesPerRow:bytesPerRow];
// Update the display with the captured image for DEBUG purposes
//dispatch_async(dispatch_get_main_queue(), ^{
//[self.vImage setImage:image];
//});
CGImageRef cgImage = [image CGImage];
CGDataProviderRef provider = CGImageGetDataProvider(cgImage);
CFDataRef bitmapData = CGDataProviderCopyData(provider);
const UInt8* data = CFDataGetBytePtr(bitmapData);
int cols = width - 1;
int rows = height - 1;
float avgLuminance = 0.0;
for(int i = 0; i < cols; i++){
for(int j = 0; j < rows; j++){
const UInt8* pixel = data + j*bytesPerRow + i*4;
avgLuminance += pixel[0]*0.299 + pixel[1]*0.587 + pixel[2]*0.114;
}
}
avgLuminance /= (cols*rows);
NSLog(@"Average Luminance: %f\n", avgLuminance);
}
}
-(UIImage *)makeImage:(uint8_t *)inBaseAddress bufferInfo:(CVPlanarPixelBufferInfo_YCbCrBiPlanar *)inBufferInfo width: (size_t)Width height:(size_t)Height bytesPerRow:(size_t)BytesPerRow{
NSUInteger yPitch = EndianU32_BtoN(inBufferInfo->componentInfoY.rowBytes);
uint8_t *rgbBuffer = (uint8_t *)malloc(Width * Height * 4);
uint8_t *yBuffer = (uint8_t *)inBaseAddress;
uint8_t val;
int bytesPerPixel = 4;
// for each byte in the input buffer, fill in the output buffer with four bytes
// the first byte is the Alpha channel, then the next three contain the same
// value of the input buffer
for(int y = 0; y < Height*Width; y++){
val = yBuffer[y];
// Alpha channel
rgbBuffer[(y*bytesPerPixel)] = 0xff;
// next three bytes same as input
rgbBuffer[(y*bytesPerPixel)+1] = rgbBuffer[(y*bytesPerPixel)+2] = rgbBuffer[y*bytesPerPixel+3] = val;
}
// Create a device-dependent RGB color space
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(rgbBuffer, yPitch, Height, 8,yPitch*bytesPerPixel, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedLast);
CGImageRef quartzImage = CGBitmapContextCreateImage(context);
CGContextRelease(context);
//UIImage *image = [[UIImage alloc] initWithCGImage:quartzImage scale:(CGFloat)0.5 orientation:UIImageOrientationRight];
UIImage *image = [UIImage imageWithCGImage:quartzImage];
CGImageRelease(quartzImage);
free(rgbBuffer);
return image;
}
設置我的.h文件作爲AVCaptureVideoDataOutputSampleBufferDelegate
,但給我的感覺我並不完全理解我需要在代碼中不斷從攝影機獲取數據,因爲CaptureOutput方法在任何地方都不會被調用。我應該如何/在哪裏調用它以獲得持續不斷的數據流?
那麼我會做的是創建一個AVCaptureVideoPreviewLayer(參考http://stackoverflow.com/questions/5002789/get-uiimage-from-views-layers)的快照,然後從中獲取數據。儘管如此,它可能並不像你想要的那樣乾淨。 –