List of usage examples for android.speech RecognizerIntent EXTRA_LANGUAGE
String EXTRA_LANGUAGE
To view the source code for android.speech RecognizerIntent EXTRA_LANGUAGE.
Click Source Link
From source file:com.eugene.fithealthmaingit.UI.NavFragments.FragmentSearch.java
private void promptSpeechInput(EditText e) { ((InputMethodManager) getActivity().getSystemService(Context.INPUT_METHOD_SERVICE)) .hideSoftInputFromWindow(e.getWindowToken(), 0); Intent intent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH); intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL, RecognizerIntent.LANGUAGE_MODEL_FREE_FORM); intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE, Locale.getDefault()); intent.putExtra(RecognizerIntent.EXTRA_PROMPT, "Say Something"); try {//from w w w .j a v a2 s. c o m startActivityForResult(intent, REQ_CODE_SPEECH_INPUT); } catch (ActivityNotFoundException a) { Toast.makeText(getActivity().getApplicationContext(), "Not Supported", Toast.LENGTH_SHORT).show(); } }
From source file:root.gast.playground.speech.SpeechRecognitionPlay.java
/** * create the {@link RecognizerIntent} based on the many preferences */// w w w .ja v a2 s . co m private Intent readRecognizerIntentFromPreferences() { Intent intentToSend; //web search handling boolean isWebSearchAction = preferences.getBoolean(this, R.string.pref_websearch, R.string.pref_websearch_default); boolean isHandsFreeAction = preferences.getBoolean(this, R.string.pref_handsfree, R.string.pref_handsfree_default); if (isWebSearchAction) { intentToSend = RecognizerIntentFactory.getWebSearchRecognizeIntent(); final boolean ADD_ORIGIN = true; if (ADD_ORIGIN && Build.VERSION.SDK_INT >= 14) { intentToSend.putExtra(RecognizerIntent.EXTRA_ORIGIN, true); } } else { if (isHandsFreeAction && Build.VERSION.SDK_INT >= 16) { intentToSend = RecognizerIntentFactory.getHandsFreeRecognizeIntent(); } else { intentToSend = RecognizerIntentFactory.getBlankRecognizeIntent(); } } //language model boolean isFreeFormModel = preferences.getBoolean(this, R.string.pref_languagemodel, R.string.pref_languagemodel_default); if (isFreeFormModel) { intentToSend.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL, RecognizerIntent.LANGUAGE_MODEL_FREE_FORM); } else { intentToSend.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL, RecognizerIntent.LANGUAGE_MODEL_WEB_SEARCH); } //common extras String language = preferences.getString(getResources().getString(R.string.pref_language), getResources().getString(R.string.pref_language_default)); intentToSend.putExtra(RecognizerIntent.EXTRA_LANGUAGE, language); String prompt = getResources().getString(R.string.speech_prompt) + ": " + whatYouAreTryingToSay.getText().toString(); intentToSend.putExtra(RecognizerIntent.EXTRA_PROMPT, prompt); intentToSend.putExtra(RecognizerIntent.EXTRA_MAX_RESULTS, preferences.getInt(this, R.string.pref_maxresults, R.string.pref_maxresults_default)); intentToSend.putExtra(RecognizerIntent.EXTRA_PARTIAL_RESULTS, preferences.getBoolean(this, R.string.pref_partial, R.string.pref_partial_default)); setIfValueSpecified(RecognizerIntent.EXTRA_SPEECH_INPUT_COMPLETE_SILENCE_LENGTH_MILLIS, R.string.pref_complete_silence, R.string.pref_complete_silence_default, intentToSend); setIfValueSpecified(RecognizerIntent.EXTRA_SPEECH_INPUT_MINIMUM_LENGTH_MILLIS, R.string.pref_minimum_input_length, R.string.pref_minimum_input_length_default, intentToSend); setIfValueSpecified(RecognizerIntent.EXTRA_SPEECH_INPUT_POSSIBLY_COMPLETE_SILENCE_LENGTH_MILLIS, R.string.pref_possibly_complete_silence_length, R.string.pref_possibly_complete_silence_length_default, intentToSend); //pendingIntent handling boolean doPending = preferences.getBoolean(this, R.string.pref_withpendingintent, R.string.pref_withpendingintent); if (doPending) { Intent pendingIntentSource = new Intent(this, SpeechRecognitionResultsActivity.class); PendingIntent pi = PendingIntent.getActivity(this, 0, pendingIntentSource, 0); Bundle extraInfoBundle = new Bundle(); // pass in what you are trying to say so the results activity can // show it extraInfoBundle.putString(SpeechRecognitionResultsActivity.WHAT_YOU_ARE_TRYING_TO_SAY_INTENT_INPUT, whatYouAreTryingToSay.getText().toString()); // set the variables in the intent this is sending intentToSend.putExtra(RecognizerIntent.EXTRA_RESULTS_PENDINGINTENT, pi); intentToSend.putExtra(RecognizerIntent.EXTRA_RESULTS_PENDINGINTENT_BUNDLE, extraInfoBundle); } Log.d(TAG, "sending recognizer intent: " + intentToSend.getExtras().toString()); return intentToSend; }
From source file:org.botlibre.sdk.activity.MicConfiguration.java
@TargetApi(23) private void beginListening() { setStreamVolume();//from www.j av a 2 s. c o m lastReply = System.currentTimeMillis(); muteMicBeep(true); Intent intent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH); if (MainActivity.offlineSpeech) { intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL, MainActivity.voice.language); if (!this.failedOfflineLanguage) { //en-US will use the English in offline. intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE, "en-US"); // intent.putExtra(RecognizerIntent.EXTRA_PREFER_OFFLINE, true); } intent.putExtra(RecognizerIntent.EXTRA_PREFER_OFFLINE, true); } else { if (MainActivity.voice.language != null && !MainActivity.voice.language.isEmpty()) { intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL, MainActivity.voice.language); if (!this.failedOfflineLanguage) { intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE, MainActivity.voice.language); } } else { intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL, "en"); if (!this.failedOfflineLanguage) { intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE, "en"); } } } try { Log.d("BeginListening", "StartListening"); this.speech.startListening(intent); setMicIcon(true, false); } catch (ActivityNotFoundException a) { Log.d("BeginListening", "CatchError: " + a.getMessage()); Toast t = Toast.makeText(getApplicationContext(), "Your device doesn't support Speech to Text", Toast.LENGTH_SHORT); t.show(); txt.setText("Status: Your device doesn't support Speech to text."); } }
From source file:com.example.michel.facetrack.FaceTrackerActivity.java
/** * Start speech to text intent. This opens up Google Speech Recognition API dialog box to listen the speech input. * *//*from w w w. j a v a 2s.c o m*/ private void startSpeechToText() { Log.e("start speech to text", " start speech to text"); Intent intent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH); intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE, Locale.getDefault()); intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL, RecognizerIntent.LANGUAGE_MODEL_FREE_FORM); intent.putExtra(RecognizerIntent.EXTRA_PROMPT, "Speak something..."); try { startActivityForResult(intent, SPEECH_RECOGNITION_CODE); System.out.println("hello 2"); } catch (ActivityNotFoundException a) { Toast.makeText(getApplicationContext(), "Sorry! Speech recognition is not supported in this device.", Toast.LENGTH_SHORT).show(); } }
From source file:br.liveo.searchliveo.SearchCardLiveo.java
private void startVoice(EditText editText) { ((InputMethodManager) mContext.getSystemService(Context.INPUT_METHOD_SERVICE)) .hideSoftInputFromWindow(editText.getWindowToken(), 0); Intent intent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH); intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL, RecognizerIntent.LANGUAGE_MODEL_FREE_FORM); intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE, Locale.getDefault()); intent.putExtra(RecognizerIntent.EXTRA_PROMPT, mContext.getString(R.string.searchview_voice)); try {/*w ww . j av a 2 s .co m*/ mContext.startActivityForResult(intent, REQUEST_CODE_SPEECH_INPUT); } catch (ActivityNotFoundException a) { Toast.makeText(mContext.getApplicationContext(), R.string.not_supported, Toast.LENGTH_SHORT).show(); } }
From source file:com.wizardsofm.deskclock.alarms.AlarmActivity.java
void listenForCommand() { // if (speech == null) { // speech = SpeechRecognizer.createSpeechRecognizer(this); // speech.setRecognitionListener(MainActivity.this); // }// w w w. j ava2s .c o m // speech = SpeechRecognizer.createSpeechRecognizer(this); // speech.setRecognitionListener(this); i = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH); i.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL, RecognizerIntent.LANGUAGE_MODEL_FREE_FORM); i.putExtra(RecognizerIntent.EXTRA_LANGUAGE, Locale.getDefault()); i.putExtra(RecognizerIntent.EXTRA_PROMPT, "Say something"); // i.putExtra("android.speech.extra.DICTATION_MODE", true); // i.putExtra(RecognizerIntent.EXTRA_PARTIAL_RESULTS, true); i.putExtra(RecognizerIntent.EXTRA_SPEECH_INPUT_MINIMUM_LENGTH_MILLIS, 5000); try { startActivityForResult(i, 100); new CountDownTimer(5000, 1000) { public void onTick(long millisUntilFinished) { //do nothing, just let it tick } public void onFinish() { if (!alarmStopped) { listenForCommand(); } } }.start(); // speech.startListening(i); } catch (Exception e) { } }
From source file:com.eveningoutpost.dexdrip.Home.java
public void promptSpeechNoteInput(View abc) { if (recognitionRunning) return;// w ww . j a v a2 s . c o m recognitionRunning = true; Intent intent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH); intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL, RecognizerIntent.LANGUAGE_MODEL_FREE_FORM); intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE, Locale.getDefault()); // intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE, "en-US"); // debug voice intent.putExtra(RecognizerIntent.EXTRA_PROMPT, getString(R.string.speak_your_note_text)); try { startActivityForResult(intent, REQ_CODE_SPEECH_NOTE_INPUT); } catch (ActivityNotFoundException a) { Toast.makeText(getApplicationContext(), getString(R.string.speech_recognition_is_not_supported), Toast.LENGTH_LONG).show(); } }
From source file:com.eveningoutpost.dexdrip.Home.java
/** * Showing google speech input dialog/*from w w w . j av a 2 s .c o m*/ */ private void promptSpeechInput() { if (recognitionRunning) return; recognitionRunning = true; Intent intent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH); intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL, RecognizerIntent.LANGUAGE_MODEL_FREE_FORM); intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE, Locale.getDefault()); // intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE, "en-US"); // debug voice intent.putExtra(RecognizerIntent.EXTRA_PROMPT, getString(R.string.speak_your_treatment)); try { startActivityForResult(intent, REQ_CODE_SPEECH_INPUT); } catch (ActivityNotFoundException a) { Toast.makeText(getApplicationContext(), R.string.speech_recognition_is_not_supported, Toast.LENGTH_LONG) .show(); } }
From source file:com.delexus.imitationzhihu.MySearchView.java
/** * Create and return an Intent that can launch the voice search activity, perform a specific * voice transcription, and forward the results to the searchable activity. * * @param baseIntent The voice app search intent to start from * @return A completely-configured intent ready to send to the voice search activity *//*from w w w. j a v a2 s .c om*/ private Intent createVoiceAppSearchIntent(Intent baseIntent, SearchableInfo searchable) { ComponentName searchActivity = searchable.getSearchActivity(); // create the necessary intent to set up a search-and-forward operation // in the voice search system. We have to keep the bundle separate, // because it becomes immutable once it enters the PendingIntent Intent queryIntent = new Intent(Intent.ACTION_SEARCH); queryIntent.setComponent(searchActivity); PendingIntent pending = PendingIntent.getActivity(getContext(), 0, queryIntent, PendingIntent.FLAG_ONE_SHOT); // Now set up the bundle that will be inserted into the pending intent // when it's time to do the search. We always build it here (even if empty) // because the voice search activity will always need to insert "QUERY" into // it anyway. Bundle queryExtras = new Bundle(); if (mAppSearchData != null) { queryExtras.putParcelable(SearchManager.APP_DATA, mAppSearchData); } // Now build the intent to launch the voice search. Add all necessary // extras to launch the voice recognizer, and then all the necessary extras // to forward the results to the searchable activity Intent voiceIntent = new Intent(baseIntent); // Add all of the configuration options supplied by the searchable's metadata String languageModel = RecognizerIntent.LANGUAGE_MODEL_FREE_FORM; String prompt = null; String language = null; int maxResults = 1; Resources resources = getResources(); if (searchable.getVoiceLanguageModeId() != 0) { languageModel = resources.getString(searchable.getVoiceLanguageModeId()); } if (searchable.getVoicePromptTextId() != 0) { prompt = resources.getString(searchable.getVoicePromptTextId()); } if (searchable.getVoiceLanguageId() != 0) { language = resources.getString(searchable.getVoiceLanguageId()); } if (searchable.getVoiceMaxResults() != 0) { maxResults = searchable.getVoiceMaxResults(); } voiceIntent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL, languageModel); voiceIntent.putExtra(RecognizerIntent.EXTRA_PROMPT, prompt); voiceIntent.putExtra(RecognizerIntent.EXTRA_LANGUAGE, language); voiceIntent.putExtra(RecognizerIntent.EXTRA_MAX_RESULTS, maxResults); voiceIntent.putExtra(RecognizerIntent.EXTRA_CALLING_PACKAGE, searchActivity == null ? null : searchActivity.flattenToShortString()); // Add the values that configure forwarding the results voiceIntent.putExtra(RecognizerIntent.EXTRA_RESULTS_PENDINGINTENT, pending); voiceIntent.putExtra(RecognizerIntent.EXTRA_RESULTS_PENDINGINTENT_BUNDLE, queryExtras); return voiceIntent; }
From source file:com.androzic.vnspeech.MapFragment.java
private void onTalk() { Intent intent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH); intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL, RecognizerIntent.LANGUAGE_MODEL_FREE_FORM); Locale locale = new Locale("vi", "VN"); intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE, locale); intent.putExtra(RecognizerIntent.EXTRA_PROMPT, "??c lnh"); try {/*from w w w . ja v a2 s . c o m*/ startActivityForResult(intent, REQ_CODE_SPEECH_INPUT); } catch (ActivityNotFoundException a) { Toast.makeText(getContext(), "Thit b ny khng h tr nhn dng gi?ng ni.", Toast.LENGTH_SHORT).show(); } }