我需要以小塊播放通過套接字播放的原始音頻數據。我已經讀過,我想使用循環緩衝區,但在Objective C中找到了一些解決方案,但無法使它們中的任何一個工作,尤其是在Swift 3中。
任何人都可以幫助我嗎?如何在Swift中播放來自套接字的原始音頻數據
1
A
回答
0
首先你執行ring buffer就像這樣。
public struct RingBuffer<T> {
private var array: [T?]
private var readIndex = 0
private var writeIndex = 0
public init(count: Int) {
array = [T?](repeating: nil, count: count)
}
/* Returns false if out of space. */
@discardableResult public mutating func write(element: T) -> Bool {
if !isFull {
array[writeIndex % array.count] = element
writeIndex += 1
return true
} else {
return false
}
}
/* Returns nil if the buffer is empty. */
public mutating func read() -> T? {
if !isEmpty {
let element = array[readIndex % array.count]
readIndex += 1
return element
} else {
return nil
}
}
fileprivate var availableSpaceForReading: Int {
return writeIndex - readIndex
}
public var isEmpty: Bool {
return availableSpaceForReading == 0
}
fileprivate var availableSpaceForWriting: Int {
return array.count - availableSpaceForReading
}
public var isFull: Bool {
return availableSpaceForWriting == 0
}
}
之後,你就像這樣實現音頻單元。 (根據需要進行修改)
class ToneGenerator {
fileprivate var toneUnit: AudioUnit? = nil
init() {
setupAudioUnit()
}
deinit {
stop()
}
func setupAudioUnit() {
// Configure the description of the output audio component we want to find:
let componentSubtype: OSType
#if os(OSX)
componentSubtype = kAudioUnitSubType_DefaultOutput
#else
componentSubtype = kAudioUnitSubType_RemoteIO
#endif
var defaultOutputDescription = AudioComponentDescription(componentType: kAudioUnitType_Output,
componentSubType: componentSubtype,
componentManufacturer: kAudioUnitManufacturer_Apple,
componentFlags: 0,
componentFlagsMask: 0)
let defaultOutput = AudioComponentFindNext(nil, &defaultOutputDescription)
var err: OSStatus
// Create a new instance of it in the form of our audio unit:
err = AudioComponentInstanceNew(defaultOutput!, &toneUnit)
assert(err == noErr, "AudioComponentInstanceNew failed")
// Set the render callback as the input for our audio unit:
var renderCallbackStruct = AURenderCallbackStruct(inputProc: renderCallback as? AURenderCallback,
inputProcRefCon: nil)
err = AudioUnitSetProperty(toneUnit!,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Input,
0,
&renderCallbackStruct,
UInt32(MemoryLayout<AURenderCallbackStruct>.size))
assert(err == noErr, "AudioUnitSetProperty SetRenderCallback failed")
// Set the stream format for the audio unit. That is, the format of the data that our render callback will provide.
var streamFormat = AudioStreamBasicDescription(mSampleRate: Float64(sampleRate),
mFormatID: kAudioFormatLinearPCM,
mFormatFlags: kAudioFormatFlagsNativeFloatPacked|kAudioFormatFlagIsNonInterleaved,
mBytesPerPacket: 4 /*four bytes per float*/,
mFramesPerPacket: 1,
mBytesPerFrame: 4,
mChannelsPerFrame: 1,
mBitsPerChannel: 4*8,
mReserved: 0)
err = AudioUnitSetProperty(toneUnit!,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
0,
&streamFormat,
UInt32(MemoryLayout<AudioStreamBasicDescription>.size))
assert(err == noErr, "AudioUnitSetProperty StreamFormat failed")
}
func start() {
var status: OSStatus
status = AudioUnitInitialize(toneUnit!)
status = AudioOutputUnitStart(toneUnit!)
assert(status == noErr)
}
func stop() {
AudioOutputUnitStop(toneUnit!)
AudioUnitUninitialize(toneUnit!)
}
}
這是固定值
private let sampleRate = 16000
private let amplitude: Float = 1.0
private let frequency: Float = 440
/// Theta is changed over time as each sample is provided.
private var theta: Float = 0.0
private func renderCallback(_ inRefCon: UnsafeMutableRawPointer,
ioActionFlags: UnsafeMutablePointer<AudioUnitRenderActionFlags>,
inTimeStamp: UnsafePointer<AudioTimeStamp>,
inBusNumber: UInt32,
inNumberFrames: UInt32,
ioData: UnsafeMutablePointer<AudioBufferList>) -> OSStatus {
let abl = UnsafeMutableAudioBufferListPointer(ioData)
let buffer = abl[0]
let pointer: UnsafeMutableBufferPointer<Float32> = UnsafeMutableBufferPointer(buffer)
for frame in 0..<inNumberFrames {
let pointerIndex = pointer.startIndex.advanced(by: Int(frame))
pointer[pointerIndex] = sin(theta) * amplitude
theta += 2.0 * Float(M_PI) * frequency/Float(sampleRate)
}
return noErr
}
你需要把數據在一個循環緩衝區,然後播放聲音。
+0
p.s.這是來自udp的原始音頻的代碼 PCM 16000 頻率440 –
+0
你如何在這裏讀取緩衝區併發送播放?我正試圖理解你的代碼 –
相關問題
- 1. IO直接播放來自UDP流(NSData)的原始音頻
- 2. 如何在iOS中使用原始數據播放音頻?
- 3. 原始音頻播放5
- 4. iPhone,以字節形式播放原始數據作爲音頻
- 5. 將音頻播放到套接字node.js
- 6. 音頻標記中的原始緩衝區數據播放
- 7. 如何在iPhone中播放原始音頻? (使用ffmpeg)
- 8. Android從C++端播放原始音頻
- 9. 無法播放原始音頻文件
- 10. 用QPython audiostream原始PCM音頻播放
- 11. 如何使用AVCaptureSession中收到的原始數據播放音頻?
- 12. 播放mp3原始音頻數據而不寫入文件
- 13. 如何在播放時處理ipod庫音頻文件原始數據
- 14. 如何播放基本原始的端口音頻
- 15. Qt來自Axis Camera的音頻播放
- 16. 在原始linux套接字上接收來自任何協議的數據包
- 17. 原始聲音播放
- 18. 在Swift中使用AV音頻播放器進行條件音頻播放
- 19. iPhone中的原生音頻播放器
- 20. 播放字符串數據。 (播放字符串值爲音頻)
- 21. 在Win8中播放原始PCM波形音頻(用戶預覽)
- 22. 如何播放音頻先完成音頻文件,然後開始播放?
- 23. 如何將通過套接字接收的數據放入音頻隊列並播放它
- 24. 在swift中自動播放聲音
- 25. 跳轉來播放音頻
- 26. 音頻播放器不自動播放?
- 27. 如何自動播放音頻文件?
- 28. 如何製作音頻自動播放
- 29. 如何停止播放重複播放的音頻來源
- 30. 如何在MPMoviePlayer播放視頻流時訪問原始視頻數據
您是否知道傳入音頻數據的格式? – dave234
@Dave我只是得到字節,所以格式應該是PCM –