2013-04-15 105 views
1

調整拍攝的圖像,我將要拍攝的圖像進行實時疊加就像魔方的狗是如何工作的 - 下面做過代碼按照此處http://www.musicalgeometry.com/?p=1681如何使用覆蓋

我知道如何覆蓋在預覽圖像圖層和捕捉圖像,我看着蘋果示例代碼,如果在相機膠捲中檢測到臉部,它將保存一個紅色方框。

編輯:

我會想將它保存在1920×1080的後置攝像頭和1280 X 960,下面的代碼工作節省實時疊加和圖像但卻對準被關閉,我不知道爲什麼有人可以幫忙嗎?

歡呼

enter image description here 這是捕捉

- (id)init { 
    if ((self = [super init])) { 
     [self setCaptureSession:[[AVCaptureSession alloc] init]]; 
     [self.captureSession setSessionPreset:AVCaptureSessionPresetHigh]; 
    } 
    NSLog(@"init called"); 
    return self; 
} 


-(void)takePictureWithOverlay:(UIImage*)overlay andRect:(CGRect)overlayRect 
{ 
    // Find out the current orientation and tell the still image output. 
    AVCaptureConnection *stillImageConnection = [self.stillImageOutput connectionWithMediaType:AVMediaTypeVideo]; 

    //UIDeviceOrientation curDeviceOrientation = [[UIDevice currentDevice] orientation]; 
    // AVCaptureVideoOrientation avcaptureOrientation = [self avOrientationForDeviceOrientation:curDeviceOrientation]; 

    [stillImageConnection setVideoOrientation:AVCaptureVideoOrientationPortrait]; 

    [stillImageConnection setVideoScaleAndCropFactor:self.effectiveScale]; 

    [self.stillImageOutput captureStillImageAsynchronouslyFromConnection:stillImageConnection 
                 completionHandler:^(CMSampleBufferRef imageDataSampleBuffer, NSError *error) { 
                  if (error) { 
                   [self displayErrorOnMainQueue:error withMessage:@"Take picture failed"]; 
                  } 
                  else { 
                   // trivial simple JPEG case 
                   NSData *jpegData = [AVCaptureStillImageOutput jpegStillImageNSDataRepresentation:imageDataSampleBuffer]; 

                   UIImage *image = [[UIImage alloc] initWithData:jpegData]; 

                   ///// 
                   CGSize imageSize = [image size]; 
                   CGSize overlaySize = [overlay size]; 

                   UIGraphicsBeginImageContext(imageSize); 

                   [image drawInRect:CGRectMake(0, 0, imageSize.width, imageSize.height)]; 

                   NSLog(@"aaa %f", [UIScreen mainScreen].applicationFrame.size.width); 
                   NSLog(@"aaa %f", [UIScreen mainScreen].applicationFrame.size.height); 
                   NSLog(@"aaa %f", [[UIScreen mainScreen] bounds].size.height); 

                   CGFloat xScaleFactor = imageSize.width/320;//320; 
                   CGFloat yScaleFactor = imageSize.height/568;//480;//568; 

                   NSLog(@"xScaleFactor size %F",xScaleFactor); 
                   NSLog(@"yScaleFactor size %F",yScaleFactor);    

                   //144 for 568 
                   [overlay drawInRect:CGRectMake(overlayRect.origin.x * xScaleFactor, overlayRect.origin.y*yScaleFactor 
                           , overlaySize.width * xScaleFactor, overlaySize.height * yScaleFactor)]; // rect used in AROverlayViewController was (30,100,260,200) 
                   UIImage *combinedImage = UIGraphicsGetImageFromCurrentImageContext(); 
                   [self setStillImage:combinedImage]; 
                   UIGraphicsEndImageContext(); 
                   ///// 
                  } 
                  [[NSNotificationCenter defaultCenter] postNotificationName:kImageCapturedSuccessfully object:nil]; 

                 }]; 


} 
+0

通過實時你的意思是編程需要,並保存圖像(無需用戶動作)? –

+0

嗨斯里卡, 我的意思是,我想保存它,一旦用戶點擊快門按鈕並保存在一起的覆蓋 – Desmond

回答

1

找到我的答案從這裏經過預覽層

enter image description here 這是。 http://developer.apple.com/library/ios/#qa/qa1714/_index.html

// Render the UIView into the CGContextRef using the 
// CALayer/-renderInContext: method 
- (void)renderView:(UIView*)view inContext:(CGContextRef)context 
{ 
    // -renderInContext: renders in the coordinate space of the layer, 
    // so we must first apply the layer's geometry to the graphics context 
    CGContextSaveGState(context); 
    // Center the context around the window's anchor point 
    CGContextTranslateCTM(context, [view center].x, [view center].y); 
    // Apply the window's transform about the anchor point 
    CGContextConcatCTM(context, [view transform]); 
    // Offset by the portion of the bounds left of and above the anchor point 
    CGContextTranslateCTM(context, 
          -[view bounds].size.width * [[view layer] anchorPoint].x, 
          -[view bounds].size.height * [[view layer] anchorPoint].y); 

    // Render the layer hierarchy to the current context 
    [[view layer] renderInContext:context]; 

    // Restore the context 
    CGContextRestoreGState(context); 
} 

-(void)takePictureWithOverlay:(UIView *)overlay andRect:(CGRect)overlayRect 
{ 
    // Find out the current orientation and tell the still image output. 
    self.videoConnection = [self.stillImageOutput connectionWithMediaType:AVMediaTypeVideo]; 

    //UIDeviceOrientation curDeviceOrientation = [[UIDevice currentDevice] orientation]; 
    // AVCaptureVideoOrientation avcaptureOrientation = [self avOrientationForDeviceOrientation:curDeviceOrientation]; 

    [self.videoConnection setVideoOrientation:AVCaptureVideoOrientationPortrait]; 

    [self.videoConnection setVideoScaleAndCropFactor:self.effectiveScale]; 

    [self.stillImageOutput captureStillImageAsynchronouslyFromConnection:self.videoConnection 
                 completionHandler:^(CMSampleBufferRef imageDataSampleBuffer, NSError *error) { 
                  if (error) { 
                   [self displayErrorOnMainQueue:error withMessage:@"Take picture failed"]; 
                  } 
                  else { 
                   // trivial simple JPEG case 
                   NSData *jpegData = [AVCaptureStillImageOutput jpegStillImageNSDataRepresentation:imageDataSampleBuffer]; 

                   UIImage *image = [[UIImage alloc] initWithData:jpegData]; 
                   NSLog(@"cgsize of image %@", NSStringFromCGSize(image.size)); 
                   CGSize imageSize = [[UIScreen mainScreen] bounds].size; 
                   NSLog(@"cgsize %@", NSStringFromCGSize(imageSize)); 

                   UIGraphicsBeginImageContextWithOptions(imageSize, NO, 0); 
                   CGContextRef context = UIGraphicsGetCurrentContext(); 
                   // Draw the image returned by the camera sample buffer into the context. 
                   // Draw it into the same sized rectangle as the view that is displayed on the screen. 
                    float menubarUIOffset = 200.0; 
                    UIGraphicsPushContext(context); 
                    [image drawInRect:CGRectMake(0, 0, imageSize.width, imageSize.height)]; 
                    UIGraphicsPopContext(); 

                   // Render the camera overlay view into the graphic context that we created above. 
                    [self renderView:overlay inContext:context]; 

                   //Retrieve the screenshot image containing both the camera content and the overlay view 
                   UIImage *screenshot = UIGraphicsGetImageFromCurrentImageContext(); 
                   [self setStillImage:screenshot]; 
                   UIGraphicsEndImageContext(); 
                   ///// 
                  } 
                  [[NSNotificationCenter defaultCenter] postNotificationName:kImageCapturedSuccessfully object:nil]; 

                 }]; 


}