2014-02-11 70 views
1

我一直在引用Android Speech Recognition as a service on Android 4.1 & 4.2文章來嘗試和實現服務中的語音識別。將語音發送到文本Android

我想我說得對。在我的設備上運行時,我得到了「準備言語」吐司消息,這是我在onReadyForSpeech()函數中聲明的。

根據Hoan Nguyen爲上述帖子提供ans的人,只要調用onReadyForSpeech()函數,我們就可以開始講話。

我的問題是我不知道如何獲得我們正在講話的講話並將其轉換爲文本以及在哪裏做。

有人知道該怎麼做嗎?我知道這是一個非常蹩腳的問題,但我第一次使用語音識別。所以請耐心等待。

對此非常感謝。感謝提前:)

public class MyService extends Service 
    { 
     protected AudioManager mAudioManager; 
     protected SpeechRecognizer mSpeechRecognizer; 
     protected Intent mSpeechRecognizerIntent; 
     protected final Messenger mServerMessenger = new Messenger(new IncomingHandler(this)); 

     protected boolean mIsListening; 
     protected volatile boolean mIsCountDownOn; 

     static final int MSG_RECOGNIZER_START_LISTENING = 1; 
     static final int MSG_RECOGNIZER_CANCEL = 2; 

    @Override 
    public void onCreate() 
    { 
     super.onCreate(); 
     mAudioManager = (AudioManager) getSystemService(Context.AUDIO_SERVICE); 
     mSpeechRecognizer = SpeechRecognizer.createSpeechRecognizer(this); 
     mSpeechRecognizer.setRecognitionListener(new SpeechRecognitionListener()); 
     mSpeechRecognizerIntent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH); 
     mSpeechRecognizerIntent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL, 
             RecognizerIntent.LANGUAGE_MODEL_FREE_FORM); 
     mSpeechRecognizerIntent.putExtra(RecognizerIntent.EXTRA_CALLING_PACKAGE, 
             this.getPackageName()); 

     mSpeechRecognizer.startListening(mSpeechRecognizerIntent); 
     //Toast.makeText(this, "onCreate", Toast.LENGTH_SHORT).show(); 
     Log.d("onCreate","Entered"); 
    } 


    protected static class IncomingHandler extends Handler 
    { 
     private WeakReference<MyService> mtarget; 

     IncomingHandler(MyService target) 
     { 
      mtarget = new WeakReference<MyService>(target); 

      Log.d("IncomingHandler","Entered"); 
     } 


     @Override 
     public void handleMessage(Message msg) 
     { 
      Log.d("handleMessage","Entered"); 

      final MyService target = mtarget.get(); 

      switch (msg.what) 
      { 
       case MSG_RECOGNIZER_START_LISTENING: 

        if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN) 
        { 
         // turn off beep sound 
         target.mAudioManager.setStreamMute(AudioManager.STREAM_SYSTEM, true); 
        } 
        if (!target.mIsListening) 
        { 
         target.mSpeechRecognizer.startListening(target.mSpeechRecognizerIntent); 
         target.mIsListening = true; 
         Log.d("TAG", "message start listening"); 
         //$NON-NLS-1$ 
        } 
        break; 

       case MSG_RECOGNIZER_CANCEL: 
         target.mSpeechRecognizer.cancel(); 
         target.mIsListening = false; 
         Log.d("TAG", "message canceled recognizer"); //$NON-NLS-1$ 
         break; 
      } 
     } 
    } 

    // Count down timer for Jelly Bean work around 
    protected CountDownTimer mNoSpeechCountDown = new CountDownTimer(5000, 5000) 
    { 

     @Override 
     public void onTick(long millisUntilFinished) 
     { 
      // TODO Auto-generated method stub 
      Log.d("onTick","Entered"); 
     } 

     @Override 
     public void onFinish() 
     { 
      Log.d("onFinish","Entered"); 

      mIsCountDownOn = false; 
      Message message = Message.obtain(null, MSG_RECOGNIZER_CANCEL); 
      try 
      { 
       mServerMessenger.send(message); 
       message = Message.obtain(null, MSG_RECOGNIZER_START_LISTENING); 
       mServerMessenger.send(message); 
      } 
      catch (RemoteException e) 
      { 

      } 
     } 
    }; 

    @Override 
    public int onStartCommand(Intent intent, int flags, int startId) { 
     // TODO Auto-generated method stub 
     //mSpeechRecognizer.startListening(mSpeechRecognizerIntent); 

     try 
     { 
      Message msg = new Message(); 
      msg.what = MSG_RECOGNIZER_START_LISTENING; 
      mServerMessenger.send(msg); 
     } 
     catch (RemoteException e) 
     { 
      Log.d("msg",""+e); 
     } 
     return START_NOT_STICKY; 
     //return super.onStartCommand(intent, flags, startId); 
    } 

    @Override 
    public void onDestroy() 
    { 
     super.onDestroy(); 

     if (mIsCountDownOn) 
     { 
      mNoSpeechCountDown.cancel(); 
     } 
     if (mSpeechRecognizer != null) 
     { 
      mSpeechRecognizer.destroy(); 
     } 

     Log.d("onDestroy","Entered"); 
    } 

    protected class SpeechRecognitionListener implements RecognitionListener 
    { 

     private static final String TAG = "Sppech---->"; 

     @Override 
     public void onBeginningOfSpeech() 
     { 
      // speech input will be processed, so there is no need for count down anymore 
      if (mIsCountDownOn) 
      { 
       mIsCountDownOn = false; 
       mNoSpeechCountDown.cancel(); 
      }    
      //Log.d(TAG, "onBeginingOfSpeech"); //$NON-NLS-1$ 
      Log.d("onBeginningOfSpeech","Entered"); 
     } 

     @Override 
     public void onBufferReceived(byte[] buffer) 
     { 
      String sTest = ""; 
      Log.d("onBufferReceived","Entered"); 
     } 

     @Override 
     public void onEndOfSpeech() 
     { 
      //Log.d(TAG, "onEndOfSpeech"); //$NON-NLS-1$ 
      Log.d("onEndOfSpeech","Entered"); 
     } 

     @Override 
     public void onError(int error) 
     { 
      if (mIsCountDownOn) 
      { 
       mIsCountDownOn = false; 
       mNoSpeechCountDown.cancel(); 
      } 
      mIsListening = false; 
      Message message = Message.obtain(null, MSG_RECOGNIZER_START_LISTENING); 
      try 
      { 
        mServerMessenger.send(message); 
      } 
      catch (RemoteException e) 
      { 

      } 
      //Log.d(TAG, "error = " + error); //$NON-NLS-1$ 
      Log.d("onError","Entered"); 
     } 

     @Override 
     public void onEvent(int eventType, Bundle params) 
     { 

     } 

     @Override 
     public void onPartialResults(Bundle partialResults) 
     { 

     } 

     @Override 
     public void onReadyForSpeech(Bundle params) 
     { 
      if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN) 
      { 
       mIsCountDownOn = true; 
       mNoSpeechCountDown.start(); 
       mAudioManager.setStreamMute(AudioManager.STREAM_SYSTEM, false); 
      } 
      //Log.d("TAG", "onReadyForSpeech"); 
      Toast.makeText(getApplicationContext(), "Ready for Speech", Toast.LENGTH_SHORT).show(); 
      Log.d("onReadyForSpeech","Entered");//$NON-NLS-1$ 
     } 

     @Override 
     public void onResults(Bundle results) 
     { 
      //Log.d(TAG, "onResults"); //$NON-NLS-1$ 

     } 

     @Override 
     public void onRmsChanged(float rmsdB) 
     { 

     } 



    } 

    @Override 
    public IBinder onBind(Intent intent) { 
     // TODO Auto-generated method stub 
     return null; 
    } 
} 

回答

1

你得到它onResult(Bundle result),然後你就可以得到什麼用戶對着一個ArrayList

ArrayList<String> matches = result.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION); 
+0

它的工作,並感謝了很多。我只知道你會回答這個問題。雖然你看不見它,但現在我的臉上有一個很大的笑容:P – ik024

+0

現在,我剛剛複製了你的代碼,但沒有理解我真正想了解你所做的每件事情的大部分內容。可以通過提供一些鏈接來幫助我,讓我能理解它。 – ik024

+0

剛剛閱讀RecognitionListener,RecognizerIntent和SpeechRecognizer在 http://developer.android.com/reference/android/speech/package-summary.html –