2017-07-19 81 views
1

我試圖使用AVVideoComposition在視頻頂部添加一些文本並保存視頻。 這是我使用的代碼:Swift 3:如何使用AVVideoComposition導出帶有文本的視頻

創建AVMutableComposition and AVVideoComposition

var mutableComp =   AVMutableComposition() 
var mutableVidComp =  AVMutableVideoComposition() 
var compositionSize :  CGSize? 

func configureAsset(){ 

    let options =    [AVURLAssetPreferPreciseDurationAndTimingKey : "true"] 
    let videoAsset =    AVURLAsset(url: Bundle.main.url(forResource: "Car", withExtension: "mp4")! , options : options) 
    let videoAssetSourceTrack = videoAsset.tracks(withMediaType: AVMediaTypeVideo).first! as AVAssetTrack 

    compositionSize = videoAssetSourceTrack.naturalSize 

    let mutableVidTrack =  mutableComp.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: kCMPersistentTrackID_Invalid) 
    let trackRange =   CMTimeRangeMake(kCMTimeZero, videoAsset.duration) 

    do { 
     try mutableVidTrack.insertTimeRange(trackRange, of: videoAssetSourceTrack, at: kCMTimeZero) 

     mutableVidTrack.preferredTransform = videoAssetSourceTrack.preferredTransform 

    }catch { print(error) } 

    snapshot =  mutableComp 
    mutableVidComp = AVMutableVideoComposition(propertiesOf: videoAsset) 
} 

II設置各層

func applyVideoEffectsToComposition() { 

    // 1 - Set up the text layer 
    let subTitle1Text =   CATextLayer() 
    subTitle1Text.font =   "Helvetica-Bold" as CFTypeRef 
    subTitle1Text.frame =   CGRect(x: self.view.frame.midX - 60 , y: self.view.frame.midY - 50, width: 120, height: 100) 
    subTitle1Text.string =   "Bench" 
    subTitle1Text.foregroundColor = UIColor.black.cgColor 
    subTitle1Text.alignmentMode = kCAAlignmentCenter 

    // 2 - The usual overlay 
    let overlayLayer = CALayer() 
    overlayLayer.addSublayer(subTitle1Text) 
    overlayLayer.frame = CGRect(x: 0, y: 0, width: compositionSize!.width, height: compositionSize!.height) 
    overlayLayer.masksToBounds = true 


    // 3 - set up the parent layer 
    let parentLayer = CALayer() 
    let videoLayer = CALayer() 
    parentLayer.frame = CGRect(x: 0, y: 0, width: compositionSize!.width, height: compositionSize!.height) 
    videoLayer.frame = CGRect(x: 0, y: 0, width: compositionSize!.width, height: compositionSize!.height) 

    parentLayer.addSublayer(videoLayer) 
    parentLayer.addSublayer(overlayLayer) 

    mutableVidComp.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: parentLayer) 

} 

III。保存視頻與AVMutbaleVideoComposition

func saveAsset(){ 

func deleteFile(_ filePath:URL) { 

guard FileManager.default.fileExists(atPath: filePath.path) else { return } 

do { 
try FileManager.default.removeItem(atPath: filePath.path) } 
catch {fatalError("Unable to delete file: \(error) : \(#function).")} } 


let documentsDirectory = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)[0] as URL 
let filePath =   documentsDirectory.appendingPathComponent("rendered-audio.mp4") 
deleteFile(filePath) 

if let exportSession = AVAssetExportSession(asset: mutableComp , presetName: AVAssetExportPresetHighestQuality){ 

    exportSession.videoComposition = mutableVidComp 

// exportSession.canPerformMultiplePassesOverSourceMediaData = true 
exportSession.outputURL =     filePath 
exportSession.shouldOptimizeForNetworkUse = true 
exportSession.timeRange =     CMTimeRangeMake(kCMTimeZero, mutableComp.duration) 
exportSession.outputFileType =    AVFileTypeQuickTimeMovie 



exportSession.exportAsynchronously { 
print("finished: \(filePath) : \(exportSession.status.rawValue) ") 

if exportSession.status.rawValue == 4 { 

print("Export failed -> Reason: \(exportSession.error!.localizedDescription))") 
print(exportSession.error!) }}}} 

然後我跑在viewDidLoad法這三種方法進行快速測試。問題是,當我運行應用程序時,導出的結果是沒有標題的原始視頻。我在這裏錯過了什麼?

UPDATE

我注意到,在 部分代碼的II添加subTitle1Text.backgroundColor特性使得彩色CGRect相應的出口時,subTitle1Text.frame出現在視頻的頂部。 (See Image) 當此代碼修改爲使用AVSynchronizedLayer進行播放時,可以在視頻頂部看到所需的圖層,並在其上顯示文字。 所以也許這是AVFoundation本身的錯誤。我想我只剩下選擇使用customVideoCompositorClass。問題在於,渲染視頻需要很長時間。這是一個使用的示例:AVVideoCompositinghttps://github.com/samsonjs/LayerVideoCompositor

回答

1

這是我在項目中使用的完整工作代碼。它將在底部顯示CATextLayer(0,0)。而在導出會話結束時,它將取代播放器項目中的新路徑。我使用Objective C代碼中的一個模型來獲取方向。請在設備上進行測試。 AVPLayer不會在模擬器中正確顯示文本圖層。

let composition = AVMutableComposition.init() 

    let videoComposition = AVMutableVideoComposition() 
    videoComposition.frameDuration = CMTimeMake(1, 30) 
    videoComposition.renderScale = 1.0 

    let compositionCommentaryTrack: AVMutableCompositionTrack? = composition.addMutableTrack(withMediaType: AVMediaTypeAudio, preferredTrackID: kCMPersistentTrackID_Invalid) 


    let compositionVideoTrack: AVMutableCompositionTrack? = composition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: kCMPersistentTrackID_Invalid) 


    let clipVideoTrack:AVAssetTrack = self.currentAsset.tracks(withMediaType: AVMediaTypeVideo)[0] 

    let audioTrack: AVAssetTrack? = self.currentAsset.tracks(withMediaType: AVMediaTypeAudio)[0] 

    try? compositionCommentaryTrack?.insertTimeRange(CMTimeRangeMake(kCMTimeZero, self.currentAsset.duration), of: audioTrack!, at: kCMTimeZero) 

    try? compositionVideoTrack?.insertTimeRange(CMTimeRangeMake(kCMTimeZero, self.currentAsset.duration), of: clipVideoTrack, at: kCMTimeZero) 

    let orientation = VideoModel.videoOrientation(self.currentAsset) 
    var isPortrait = false 

    switch orientation { 
    case .landscapeRight: 
     isPortrait = false 
    case .landscapeLeft: 
     isPortrait = false 
    case .portrait: 
     isPortrait = true 
    case .portraitUpsideDown: 
     isPortrait = true 
    } 

    var naturalSize = clipVideoTrack.naturalSize 

    if isPortrait 
    { 
     naturalSize = CGSize.init(width: naturalSize.height, height: naturalSize.width) 
    } 

    videoComposition.renderSize = naturalSize 

    let scale = CGFloat(1.0) 

    var transform = CGAffineTransform.init(scaleX: CGFloat(scale), y: CGFloat(scale)) 

    switch orientation { 
    case .landscapeRight: break 
    // isPortrait = false 
    case .landscapeLeft: 
     transform = transform.translatedBy(x: naturalSize.width, y: naturalSize.height) 
     transform = transform.rotated(by: .pi) 
    case .portrait: 
     transform = transform.translatedBy(x: naturalSize.width, y: 0) 
     transform = transform.rotated(by: CGFloat(M_PI_2)) 
    case .portraitUpsideDown:break 
    } 

    let frontLayerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: compositionVideoTrack!) 
    frontLayerInstruction.setTransform(transform, at: kCMTimeZero) 

    let MainInstruction = AVMutableVideoCompositionInstruction() 
    MainInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, composition.duration) 
    MainInstruction.layerInstructions = [frontLayerInstruction] 
    videoComposition.instructions = [MainInstruction] 

    let parentLayer = CALayer.init() 
    parentLayer.frame = CGRect.init(x: 0, y: 0, width: naturalSize.width, height: naturalSize.height) 

    let videoLayer = CALayer.init() 
    videoLayer.frame = parentLayer.frame 


    let layer = CATextLayer() 
    layer.string = "HELLO ALL" 
    layer.foregroundColor = UIColor.white.cgColor 
    layer.backgroundColor = UIColor.orange.cgColor 
    layer.fontSize = 32 
    layer.frame = CGRect.init(x: 0, y: 0, width: 300, height: 100) 

    var rct = layer.frame; 

    let widthScale = self.playerView.frame.size.width/naturalSize.width 

    rct.size.width /= widthScale 
    rct.size.height /= widthScale 
    rct.origin.x /= widthScale 
    rct.origin.y /= widthScale 



    parentLayer.addSublayer(videoLayer) 
    parentLayer.addSublayer(layer) 

    videoComposition.animationTool = AVVideoCompositionCoreAnimationTool.init(postProcessingAsVideoLayer: videoLayer, in: parentLayer) 

    let documentsPath = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)[0] 
    let videoPath = documentsPath+"/cropEditVideo.mov" 

    let fileManager = FileManager.default 

    if fileManager.fileExists(atPath: videoPath) 
    { 
     try! fileManager.removeItem(atPath: videoPath) 
    } 

    print("video path \(videoPath)") 

    var exportSession = AVAssetExportSession.init(asset: composition, presetName: AVAssetExportPresetHighestQuality) 
    exportSession?.videoComposition = videoComposition 
    exportSession?.outputFileType = AVFileTypeQuickTimeMovie 
    exportSession?.outputURL = URL.init(fileURLWithPath: videoPath) 
    exportSession?.videoComposition = videoComposition 
    var exportProgress: Float = 0 
    let queue = DispatchQueue(label: "Export Progress Queue") 
    queue.async(execute: {() -> Void in 
     while exportSession != nil { 
      //    int prevProgress = exportProgress; 
      exportProgress = (exportSession?.progress)! 
      print("current progress == \(exportProgress)") 
      sleep(1) 
     } 
    }) 

    exportSession?.exportAsynchronously(completionHandler: { 


     if exportSession?.status == AVAssetExportSessionStatus.failed 
     { 
      print("Failed \(exportSession?.error)") 
     }else if exportSession?.status == AVAssetExportSessionStatus.completed 
     { 
      exportSession = nil 

      let asset = AVAsset.init(url: URL.init(fileURLWithPath: videoPath)) 
      DispatchQueue.main.async { 
       let item = AVPlayerItem.init(asset: asset) 


       self.player.replaceCurrentItem(with: item) 

       let assetDuration = CMTimeGetSeconds(composition.duration) 
       self.progressSlider.maximumValue = Float(assetDuration) 

       self.syncLayer.removeFromSuperlayer() 
       self.lblIntro.isHidden = true 

       self.player.play() 
       //     let url = URL.init(fileURLWithPath: videoPath) 
       //     let activityVC = UIActivityViewController(activityItems: [url], applicationActivities: []) 
       //     self.present(activityVC, animated: true, completion: nil) 
      } 

     } 
    }) 

下面是我的VideoModel類的代碼

-(AVCaptureVideoOrientation)videoOrientation:(AVAsset *)asset 
{ 
    AVCaptureVideoOrientation result = 0; 
    NSArray *tracks = [asset tracksWithMediaType:AVMediaTypeVideo]; 
    if([tracks count] > 0) { 
     AVAssetTrack *videoTrack = [tracks objectAtIndex:0]; 
     CGAffineTransform t = videoTrack.preferredTransform; 
     // Portrait 
     if(t.a == 0 && t.b == 1.0 && t.c == -1.0 && t.d == 0) 
     { 
      result = AVCaptureVideoOrientationPortrait; 
     } 
     // PortraitUpsideDown 
     if(t.a == 0 && t.b == -1.0 && t.c == 1.0 && t.d == 0) { 

      result = AVCaptureVideoOrientationPortraitUpsideDown; 
     } 
     // LandscapeRight 
     if(t.a == 1.0 && t.b == 0 && t.c == 0 && t.d == 1.0) 
     { 
      result = AVCaptureVideoOrientationLandscapeRight; 
     } 
     // LandscapeLeft 
     if(t.a == -1.0 && t.b == 0 && t.c == 0 && t.d == -1.0) 
     { 
      result = AVCaptureVideoOrientationLandscapeLeft; 
     } 
    } 
    return result; 
} 

讓我知道,如果你需要在這個任何更多的幫助。

+0

感謝您的回答Amrit。我用你的答案更新了我的代碼,但不幸的是,在輸出視頻中仍然沒有字幕。顯然,添加'AVMutableVideoCompositionInstruction'似乎無法解決問題。這令人非常痛心。 – 6994

+0

查看我更新的答案,它會幫助你解決你的問題。 –

+0

我改變了我的代碼以再次匹配您的答案。該視頻在模擬器中輸出正常,但在CATextLayer中沒有任何標題。因此,根據您的建議,我嘗試在設備上運行項目,但視頻根本不導出,並持續等待在調試區域持續打印消息「當前進度== 0」。我也找不到解決這個問題的方法。 – 6994

相關問題