我發現,如果我做一個音頻記錄一個Arduino基於機器人(我用這個qtmultimedia)必須後手 Read more here
那麼我可以上傳到Google,然後把它送我回去一些JSON
然後我寫了一些C++/QT這使成QML插件 這是(alpha)代碼。請注意,請確保將您的FLAC FILE.flac> 替換爲您真實的FLAC文件。
speechrecognition.cpp
#include <QNetworkReply>
#include <QNetworkRequest>
#include <QSslSocket>
#include <QUrl>
#include <QJsonDocument>
#include <QJsonArray>
#include <QJsonObject>
#include "speechrecognition.h"
#include <QFile>
#include <QDebug>
const char* SpeechRecognition::kContentType = "audio/x-flac; rate=8000";
const char* SpeechRecognition::kUrl = "http://www.google.com/speech-api/v1/recognize?xjerr=1&client=directions&lang=en";
SpeechRecognition::SpeechRecognition(QObject* parent)
: QObject(parent)
{
network_ = new QNetworkAccessManager(this);
connect(network_, SIGNAL(finished(QNetworkReply*)),
this, SLOT(replyFinished(QNetworkReply*)));
}
void SpeechRecognition::start(){
const QUrl url(kUrl);
QNetworkRequest req(url);
req.setHeader(QNetworkRequest::ContentTypeHeader, kContentType);
req.setAttribute(QNetworkRequest::DoNotBufferUploadDataAttribute, false);
req.setAttribute(QNetworkRequest::CacheLoadControlAttribute,
QNetworkRequest::AlwaysNetwork);
QFile *compressedFile = new QFile("<YOUR FLAC FILE.flac>");
compressedFile->open(QIODevice::ReadOnly);
reply_ = network_->post(req, compressedFile);
}
void SpeechRecognition::replyFinished(QNetworkReply* reply) {
Result result = Result_ErrorNetwork;
Hypotheses hypotheses;
if (reply->error() != QNetworkReply::NoError) {
qDebug() << "ERROR \n" << reply->errorString();
} else {
qDebug() << "Running ParserResponse for \n" << reply << result;
ParseResponse(reply, &result, &hypotheses);
}
emit Finished(result, hypotheses);
reply_->deleteLater();
reply_ = NULL;
}
void SpeechRecognition::ParseResponse(QIODevice* reply, Result* result,
Hypotheses* hypotheses)
{
QString getReplay ;
getReplay = reply->readAll();
qDebug() << "The Replay " << getReplay;
QJsonDocument jsonDoc = QJsonDocument::fromJson(getReplay.toUtf8());
QVariantMap data = jsonDoc.toVariant().toMap();
const int status = data.value("status", Result_ErrorNetwork).toInt();
*result = static_cast<Result>(status);
if (status != Result_Success)
return;
QVariantList list = data.value("hypotheses", QVariantList()).toList();
foreach (const QVariant& variant, list) {
QVariantMap map = variant.toMap();
if (!map.contains("utterance") || !map.contains("confidence"))
continue;
Hypothesis hypothesis;
hypothesis.utterance = map.value("utterance", QString()).toString();
hypothesis.confidence = map.value("confidence", 0.0).toReal();
*hypotheses << hypothesis;
qDebug() << "confidence = " << hypothesis.confidence << "\n Your Results = "<< hypothesis.utterance;
setResults(hypothesis.utterance);
}
}
void SpeechRecognition::setResults(const QString &results)
{
if(m_results == results)
return;
m_results = results;
emit resultsChanged();
}
QString SpeechRecognition::results()const
{
return m_results;
}
speechrecognition.h
#ifndef SPEECHRECOGNITION_H
#define SPEECHRECOGNITION_H
#include <QObject>
#include <QList>
class QIODevice;
class QNetworkAccessManager;
class QNetworkReply;
class SpeechRecognition : public QObject {
Q_OBJECT
Q_PROPERTY(QString results READ results NOTIFY resultsChanged)
public:
SpeechRecognition(QObject* parent = 0);
static const char* kUrl;
static const char* kContentType;
struct Hypothesis {
QString utterance;
qreal confidence;
};
typedef QList<Hypothesis> Hypotheses;
// This enumeration follows the values described here:
// http://www.w3.org/2005/Incubator/htmlspeech/2010/10/google-api-draft.html#speech-input-error
enum Result {
Result_Success = 0,
Result_ErrorAborted,
Result_ErrorAudio,
Result_ErrorNetwork,
Result_NoSpeech,
Result_NoMatch,
Result_BadGrammar
};
Q_INVOKABLE void start();
void Cancel();
QString results()const;
void setResults(const QString &results);
signals:
void Finished(Result result, const Hypotheses& hypotheses);
void resultsChanged();
private slots:
void replyFinished(QNetworkReply* reply);
private:
void ParseResponse(QIODevice* reply, Result* result, Hypotheses* hypotheses);
private:
QNetworkAccessManager* network_;
QNetworkReply* reply_;
QByteArray buffered_raw_data_;
int num_samples_recorded_;
QString m_results;
};
#endif // SPEECHRECOGNITION_H
微軟的API是http://msdn.microsoft.com/en-us/library/ms720151 (v = vs.85)的.aspx – 2013-04-30 09:31:35