我正在做我的語音識別項目。錄製代碼運行良好,但我們不知道如何編寫代碼以將音頻存儲在Firebase中。用於語音識別的IOS Swift 3
你能幫我一下嗎?
import UIKit
import AVFoundation
import Firebase
import Speech
import FirebaseStorage
class SpeechViewController: UIViewController,AVAudioPlayerDelegate,AVAudioRecorderDelegate {
var usersStorageRef: FIRStorageReference!
var audioPlayer: AVAudioPlayer?
var audioRecorder: AVAudioRecorder?
private let speechRecognizer = SFSpeechRecognizer(locale:
Locale(identifier: "en-US"))!
private var speechRecognitionRequest:
SFSpeechAudioBufferRecognitionRequest?
private var speechRecognitionTask: SFSpeechRecognitionTask?
private let audioEngine = AVAudioEngine()
@IBOutlet weak var myTextView: UITextView!
@IBOutlet weak var StopButton: UIButton!
@IBOutlet weak var StartButton: UIButton!
override func viewDidLoad() {
super.viewDidLoad()
authorizeSR()
let storage = FIRStorage.storage()
let storageRef = storage.reference(forURL: "gs://login-92e0b.appspot.com")
//let localFile = URL(string: "sound.caf")!
usersStorageRef = storageRef.child("users")
StopButton.isEnabled = false
let fileMgr = FileManager.default
let dirPaths = fileMgr.urls(for: .documentDirectory,
in: .userDomainMask)
let soundFileURL = dirPaths[0].appendingPathComponent("sound.caf")
let recordSettings =
[AVEncoderAudioQualityKey: AVAudioQuality.min.rawValue,
AVEncoderBitRateKey: 16,
AVNumberOfChannelsKey: 2,
AVSampleRateKey: 44100.0] as [String : Any]
let audioSession = AVAudioSession.sharedInstance()
do {
try audioSession.setCategory(
AVAudioSessionCategoryPlayAndRecord)
} catch let error as NSError {
print("audioSession error: \(error.localizedDescription)")
}
do {
try audioRecorder = AVAudioRecorder(url: soundFileURL,
settings: recordSettings as [String : AnyObject])
audioRecorder?.prepareToRecord()
} catch let error as NSError {
print("audioSession error: \(error.localizedDescription)")
}
// Do any additional setup after loading the view.
}
func authorizeSR() {
SFSpeechRecognizer.requestAuthorization { authStatus in
OperationQueue.main.addOperation {
switch authStatus {
case .authorized:
self.StartButton.isEnabled = true
case .denied:
self.StartButton.isEnabled = false
self.StartButton.setTitle("Speech recognition access denied by user", for: .disabled)
case .restricted:
self.StartButton.isEnabled = false
self.StartButton.setTitle("Speech recognition restricted on device", for: .disabled)
case .notDetermined:
self.StartButton.isEnabled = false
self.StartButton.setTitle("Speech recognition not authorized", for: .disabled)
}
}
}
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
@IBAction func StartTranscribing(_ sender: Any) {
if audioRecorder?.isRecording == false {
StopButton.isEnabled = true
audioRecorder?.record()
}
StopButton.isEnabled = true
StartButton.isEnabled = false
try! startSession()
}
func startSession() throws {
if let recognitionTask = speechRecognitionTask {
recognitionTask.cancel()
self.speechRecognitionTask = nil
}
let audioSession = AVAudioSession.sharedInstance()
try audioSession.setCategory(AVAudioSessionCategoryRecord)
speechRecognitionRequest = SFSpeechAudioBufferRecognitionRequest()
guard let recognitionRequest = speechRecognitionRequest else { fatalError("SFSpeechAudioBufferRecognitionRequest object creation failed") }
guard let inputNode = audioEngine.inputNode else { fatalError("Audio engine has no input node") }
recognitionRequest.shouldReportPartialResults = true
speechRecognitionTask = speechRecognizer.recognitionTask(with: recognitionRequest) { result, error in
var finished = false
if let result = result {
self.myTextView.text =
result.bestTranscription.formattedString
finished = result.isFinal
}
if error != nil || finished {
self.audioEngine.stop()
inputNode.removeTap(onBus: 0)
self.speechRecognitionRequest = nil
self.speechRecognitionTask = nil
self.StartButton.isEnabled = true
}
}
let recordingFormat = inputNode.outputFormat(forBus: 0)
inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer: AVAudioPCMBuffer, when: AVAudioTime) in
self.speechRecognitionRequest?.append(buffer)
}
audioEngine.prepare()
try audioEngine.start()
}
@IBAction func StopTranscribing(_ sender: Any) {
if audioEngine.isRunning {
audioEngine.stop()
speechRecognitionRequest?.endAudio()
StartButton.isEnabled = true
StopButton.isEnabled = false
}
StopButton.isEnabled = false
StartButton.isEnabled = true
if audioRecorder?.isRecording == true {
audioRecorder?.stop()
} else {
audioPlayer?.stop()
}
displayAlertMessage(messageToDisplay: "SUCESSFUL")
}
func displayAlertMessage(messageToDisplay: String)
{
let alertController = UIAlertController(title: "Alert", message: messageToDisplay, preferredStyle: .alert)
let OKAction = UIAlertAction(title: "OK", style: .default) { (action:UIAlertAction!) in
// Code in this block will trigger when OK button tapped.
let storyBoard: UIStoryboard = UIStoryboard(name: "Main", bundle: nil)
let newView1 = storyBoard.instantiateViewController(withIdentifier: "loggedVC") as! loggedViewController
self.present(newView1, animated: true, completion: nil)
print("Ok button tapped");
}
alertController.addAction(OKAction)
self.present(alertController, animated: true, completion:nil)
}
func audioPlayerDidFinishPlaying(_ player: AVAudioPlayer, successfully flag: Bool) {
StartButton.isEnabled = true
StopButton.isEnabled = false
}
func audioPlayerDecodeErrorDidOccur(_ player: AVAudioPlayer, error: Error?) {
print("Audio Play Decode Error")
}
func audioRecorderDidFinishRecording(_ recorder: AVAudioRecorder, successfully flag: Bool) {
}
func audioRecorderEncodeErrorDidOccur(_ recorder: AVAudioRecorder, error: Error?) {
print("Audio Record Encode Error")
}
}
評論不延長討論;這個談話已經[轉移到聊天](http://chat.stackoverflow.com/rooms/138404/discussion-on-question-by-kalpana-anandan-ios-swift-3-for-speech-recognition)。 –