2017-10-28 108 views
1

我需要以小塊播放通過套接字播放的原始音頻數據。我已經讀過,我想使用循環緩衝區,但在Objective C中找到了一些解決方案,但無法使它們中的任何一個工作,尤其是在Swift 3中。
任何人都可以幫助我嗎?如何在Swift中播放來自套接字的原始音頻數據

+0

您是否知道傳入音頻數據的格式? – dave234

+0

@Dave我只是得到字節,所以格式應該是PCM –

回答

0

首先你執行ring buffer就像這樣。

public struct RingBuffer<T> { 
    private var array: [T?] 
    private var readIndex = 0 
    private var writeIndex = 0 

    public init(count: Int) { 
    array = [T?](repeating: nil, count: count) 
    } 

    /* Returns false if out of space. */ 
    @discardableResult public mutating func write(element: T) -> Bool { 
    if !isFull { 
     array[writeIndex % array.count] = element 
     writeIndex += 1 
     return true 
    } else { 
     return false 
    } 
    } 

    /* Returns nil if the buffer is empty. */ 
    public mutating func read() -> T? { 
    if !isEmpty { 
     let element = array[readIndex % array.count] 
     readIndex += 1 
     return element 
    } else { 
     return nil 
    } 
    } 

    fileprivate var availableSpaceForReading: Int { 
    return writeIndex - readIndex 
    } 

    public var isEmpty: Bool { 
    return availableSpaceForReading == 0 
    } 

    fileprivate var availableSpaceForWriting: Int { 
    return array.count - availableSpaceForReading 
    } 

    public var isFull: Bool { 
    return availableSpaceForWriting == 0 
    } 
} 

之後,你就像這樣實現音頻單元。 (根據需要進行修改)

class ToneGenerator { 
    fileprivate var toneUnit: AudioUnit? = nil 

    init() { 
     setupAudioUnit() 
    } 

    deinit { 
     stop() 
    } 

    func setupAudioUnit() { 

     // Configure the description of the output audio component we want to find: 
     let componentSubtype: OSType 
     #if os(OSX) 
      componentSubtype = kAudioUnitSubType_DefaultOutput 
     #else 
      componentSubtype = kAudioUnitSubType_RemoteIO 
     #endif 
     var defaultOutputDescription = AudioComponentDescription(componentType: kAudioUnitType_Output, 
                   componentSubType: componentSubtype, 
                   componentManufacturer: kAudioUnitManufacturer_Apple, 
                   componentFlags: 0, 
                   componentFlagsMask: 0) 
     let defaultOutput = AudioComponentFindNext(nil, &defaultOutputDescription) 

     var err: OSStatus 

     // Create a new instance of it in the form of our audio unit: 
     err = AudioComponentInstanceNew(defaultOutput!, &toneUnit) 
     assert(err == noErr, "AudioComponentInstanceNew failed") 

     // Set the render callback as the input for our audio unit: 
     var renderCallbackStruct = AURenderCallbackStruct(inputProc: renderCallback as? AURenderCallback, 
                  inputProcRefCon: nil) 
     err = AudioUnitSetProperty(toneUnit!, 
            kAudioUnitProperty_SetRenderCallback, 
            kAudioUnitScope_Input, 
            0, 
            &renderCallbackStruct, 
            UInt32(MemoryLayout<AURenderCallbackStruct>.size)) 
     assert(err == noErr, "AudioUnitSetProperty SetRenderCallback failed") 

     // Set the stream format for the audio unit. That is, the format of the data that our render callback will provide. 
     var streamFormat = AudioStreamBasicDescription(mSampleRate: Float64(sampleRate), 
                 mFormatID: kAudioFormatLinearPCM, 
                 mFormatFlags: kAudioFormatFlagsNativeFloatPacked|kAudioFormatFlagIsNonInterleaved, 
                 mBytesPerPacket: 4 /*four bytes per float*/, 
      mFramesPerPacket: 1, 
      mBytesPerFrame: 4, 
      mChannelsPerFrame: 1, 
      mBitsPerChannel: 4*8, 
      mReserved: 0) 
     err = AudioUnitSetProperty(toneUnit!, 
            kAudioUnitProperty_StreamFormat, 
            kAudioUnitScope_Input, 
            0, 
            &streamFormat, 
            UInt32(MemoryLayout<AudioStreamBasicDescription>.size)) 
     assert(err == noErr, "AudioUnitSetProperty StreamFormat failed") 

    } 

    func start() { 
     var status: OSStatus 
     status = AudioUnitInitialize(toneUnit!) 
     status = AudioOutputUnitStart(toneUnit!) 
     assert(status == noErr) 
    } 

    func stop() { 
     AudioOutputUnitStop(toneUnit!) 
     AudioUnitUninitialize(toneUnit!) 
    } 

} 

這是固定值

private let sampleRate = 16000 
private let amplitude: Float = 1.0 
private let frequency: Float = 440 

/// Theta is changed over time as each sample is provided. 
private var theta: Float = 0.0 


private func renderCallback(_ inRefCon: UnsafeMutableRawPointer, 
          ioActionFlags: UnsafeMutablePointer<AudioUnitRenderActionFlags>, 
          inTimeStamp: UnsafePointer<AudioTimeStamp>, 
          inBusNumber: UInt32, 
          inNumberFrames: UInt32, 
          ioData: UnsafeMutablePointer<AudioBufferList>) -> OSStatus { 
    let abl = UnsafeMutableAudioBufferListPointer(ioData) 
    let buffer = abl[0] 
    let pointer: UnsafeMutableBufferPointer<Float32> = UnsafeMutableBufferPointer(buffer) 
    for frame in 0..<inNumberFrames { 
     let pointerIndex = pointer.startIndex.advanced(by: Int(frame)) 
     pointer[pointerIndex] = sin(theta) * amplitude 
     theta += 2.0 * Float(M_PI) * frequency/Float(sampleRate) 
    } 
    return noErr 
} 

你需要把數據在一個循環緩衝區,然後播放聲音。

+0

p.s.這是來自udp的原始音頻的代碼 PCM 16000 頻率440 –

+0

你如何在這裏讀取緩衝區併發送播放?我正試圖理解你的代碼 –

相關問題