2017-08-23 132 views
0

我正在從iPhone麥克風獲取音頻數據並將其發送到套接字,我已經嘗試使用AVAudioEngine來獲取音頻緩衝區,但是一些如何不使用它。所以你可以給我建議什麼是更好的方式來實時獲取記錄緩衝數據。如何獲取音頻錄製緩衝區數據?

override func viewDidLoad() { 
     super.viewDidLoad() 



     // initialize engine 
     engine = AVAudioEngine() 
     guard nil != engine?.inputNode else { 
      // @TODO: error out 
      return 
     } 



     SocketIOManager.sharedInstance.socket.on("listen") {data, ack in 

      let BuffurData:Data = data[0] as! Data 

      // let playData = self?.audioBufferToNSData(PCMBuffer: BuffurData as! AVAudioPCMBuffer) 
      do { 
       // let data = NSData(bytes: &BuffurData, length: BuffurData.count) 

       let player = try AVAudioPlayer(data:BuffurData) 
       player.play() 
      } catch let error as NSError { 
       print(error.description) 
      } 



      print("socket connected \(data)") 
     } 
    } 

func installTap() { 

     engine = AVAudioEngine() 
     guard let engine = engine, let input = engine.inputNode else { 
      // @TODO: error out 
      return 
     } 

     let format = input.inputFormat(forBus: 0) 
     input.installTap(onBus: 0, bufferSize:4096, format:format, block: { [weak self] buffer, when in 

      guard let this = self else { 
       return 
      } 

      // writing to file: for testing purposes only 
      do { 
       try this.file!.write(from: buffer) 
      } catch { 

      } 

      if let channel1Buffer = buffer.floatChannelData?[0] { 
       let test = self?.copyAudioBufferBytes(buffer) 
       let stram = self?.toNSData(PCMBuffer: buffer) 
       SocketIOManager.sharedInstance.socket.emit("talk",stram!); 

       // socket.on("listen", function (data) 



       /*! @property floatChannelData 
       @abstract Access the buffer's float audio samples. 
       @discussion 
       floatChannelData returns pointers to the buffer's audio samples if the buffer's format is 
       32-bit float, or nil if it is another format. 

       The returned pointer is to format.channelCount pointers to float. Each of these pointers 
       is to "frameLength" valid samples, which are spaced by "stride" samples. 

       If format.interleaved is false (as with the standard deinterleaved float format), then 
       the pointers will be to separate chunks of memory. "stride" is 1. 

       If format.interleaved is true, then the pointers will refer into the same chunk of interleaved 
       samples, each offset by 1 frame. "stride" is the number of interleaved channels. 
       */ 

       // @TODO: send data, better to pass into separate queue for processing 
      }    
     }) 

     engine.prepare() 

     do { 
      try engine.start() 
     } catch { 
      // @TODO: error out 
     } 
    } 
+0

什麼是與AVAudioEngine問題? –

+0

看我的更新代碼,我希望我將緩衝區轉換爲數據併發送到套接字,當我通過套接字獲取一些數據時,我無法播放它。看到viewDidLoad函數,我嘗試從套接字響應中播放它。 –

+0

您應該使用AVAudioPlayerNode播放緩衝區。首先,將數據轉換成緩衝區然後播放。 –

回答

0

試試這個代碼:

var audioPlayerQueue = DispatchQueue(label: "audioPlayerQueue", qos: DispatchQoS.userInteractive) 

    var peerAudioPlayer: AVAudioPlayerNode = AVAudioPlayerNode() 
     var peerInputFormat: AVAudioFormat? 

    override func viewDidLoad() { 
      super.viewDidLoad() 
      // initialize engine 
      engine = AVAudioEngine() 
      guard nil != engine?.inputNode else { 
       // @TODO: error out 
       return 
      } 
      engine.attach(self.peerAudioPlayer) 
      self.peerInputFormat = AVAudioFormat.init(commonFormat: .pcmFormatFloat32, sampleRate: 44100, channels: 1, interleaved: false) 
    self.peerAudioEngine.connect(peerAudioPlayer, to: self.peerAudioEngine.mainMixerNode, format: peerInput?.outputFormat(forBus: 0)) 
      do { 
       peerAudioEngine.prepare() 
       try peerAudioEngine.start() 

      }catch let error { 
       print(error.localizedDescription) 
      } 

      SocketIOManager.sharedInstance.socket.on("listen") {data, ack in 


          let pcmBuffer = toPCMBuffer(data: data) 
    self.audioPlayerQueue.async { 
         self.peerAudioPlayer.scheduleBuffer(pcmBuffer, completionHandler: nil) 
         if self.peerAudioEngine.isRunning { 
          self.peerAudioPlayer.play() 
         }else { 
          do { 
           try self.peerAudioEngine.start() 
          }catch { 
           print(error.localizedDescription) 
          } 
         } 
        } 
    } 

       print("socket connected \(data)") 
      } 
     } 


    func toPCMBuffer(data: NSData) -> AVAudioPCMBuffer { 
      let audioFormat = AVAudioFormat(commonFormat: AVAudioCommonFormat.pcmFormatFloat32, sampleRate: 44100, channels: 1, interleaved: false) // given NSData audio format 
      let PCMBuffer = AVAudioPCMBuffer(pcmFormat: audioFormat, frameCapacity: UInt32(data.length)/audioFormat.streamDescription.pointee.mBytesPerFrame) 
      PCMBuffer.frameLength = PCMBuffer.frameCapacity 
      let channels = UnsafeBufferPointer(start: PCMBuffer.floatChannelData, count: Int(PCMBuffer.format.channelCount)) 
      data.getBytes(UnsafeMutableRawPointer(channels[0]) , length: data.length) 
      return PCMBuffer 
     } 




func installTap() { 

     engine = AVAudioEngine() 
     guard let engine = engine, let input = engine.inputNode else { 
      // @TODO: error out 
      return 
     } 

     let format = input.inputFormat(forBus: 0) 
     input.iinstallTap(onBus: 0, bufferSize: 4410, format: format, block: { (buffer:AVAudioPCMBuffer, AVAudioTime) in 
      guard let this = self else { 
       return 
      } 
       let stram = self?.toNSData(PCMBuffer: buffer) 
       SocketIOManager.sharedInstance.socket.emit("talk",stram!); 
      }    
     }) 

     do {  
      engine.prepare() 
      try engine.start() 
     } catch { 
      // @TODO: error out 
     } 
    } 

編輯:對於啓用大功率音箱

func speakerEnabled(_ enabled:Bool)->Bool{ 
     let session = AVAudioSession.sharedInstance(); 
     var options = session.categoryOptions; 

     if (enabled) { 
      options.insert(AVAudioSessionCategoryOptions.defaultToSpeaker); 
     } else { 
      options.remove(AVAudioSessionCategoryOptions.defaultToSpeaker); 
     } 


     try! session.setCategory(AVAudioSessionCategoryPlayAndRecord, 
           with:options) 
     return true; 
    } 
+0

peerInput是什麼? –

+0

它,只是格式,AVAudioFormat @DipenChudasama –

+0

是的,但它在self.peerAudioPlayer.play()崩潰,得到的消息「所需的條件是假的:_engine!=無」 –

相關問題