List of usage examples for android.speech.tts TextToSpeech SUCCESS
int SUCCESS
To view the source code for android.speech.tts TextToSpeech SUCCESS.
Click Source Link
From source file:com.med.fast.ocr.OcrCaptureActivity.java
/** * Initializes the UI and creates the detector pipeline. *//*from www. ja v a 2 s . c o m*/ @Override public void onCreate(Bundle bundle) { super.onCreate(bundle); setContentView(R.layout.ocr_capture); // Set good defaults for capturing text. boolean autoFocus = true; boolean useFlash = false; // Check for the camera permission before accessing the camera. If the // permission is not granted yet, request permission. int rc = ActivityCompat.checkSelfPermission(this, Manifest.permission.CAMERA); if (rc == PackageManager.PERMISSION_GRANTED) { createCameraSource(autoFocus, useFlash); } else { requestCameraPermission(); } gestureDetector = new GestureDetector(this, new CaptureGestureListener()); scaleGestureDetector = new ScaleGestureDetector(this, new ScaleListener()); Snackbar.make(mGraphicOverlay, "Tap to Speak. Pinch/Stretch to zoom", Snackbar.LENGTH_LONG).show(); // TODO: Set up the Text To Speech engine. TextToSpeech.OnInitListener listener = new TextToSpeech.OnInitListener() { @Override public void onInit(final int status) { if (status == TextToSpeech.SUCCESS) { Log.d("TTS", "Text to speech engine started successfully."); tts.setLanguage(Locale.US); } else { Log.d("TTS", "Error starting the text to speech engine."); } } }; tts = new TextToSpeech(this.getApplicationContext(), listener); }
From source file:ocr.OcrCaptureActivity.java
/** * Initializes the UI and creates the detector pipeline. *///from w ww . j ava 2 s. c o m @Override public void onCreate(Bundle bundle) { super.onCreate(bundle); setContentView(R.layout.activity_ocr_capture); mPreview = (CameraSourcePreview) findViewById(R.id.preview); mGraphicOverlay = (GraphicOverlay<OcrGraphic>) findViewById(R.id.graphicOverlay); // Set good defaults for capturing text. boolean autoFocus = true; boolean useFlash = false; // Check for the camera permission before accessing the camera. If the // permission is not granted yet, request permission. int rc = ActivityCompat.checkSelfPermission(this, Manifest.permission.CAMERA); if (rc == PackageManager.PERMISSION_GRANTED) { createCameraSource(autoFocus, useFlash); } else { requestCameraPermission(); } gestureDetector = new GestureDetector(this, new CaptureGestureListener()); scaleGestureDetector = new ScaleGestureDetector(this, new ScaleListener()); Snackbar.make(mGraphicOverlay, "Tap to Speak. Pinch/Stretch to zoom", Snackbar.LENGTH_LONG).show(); // TODO: Set up the Text To Speech engine. TextToSpeech.OnInitListener listener = new TextToSpeech.OnInitListener() { @Override public void onInit(final int status) { if (status == TextToSpeech.SUCCESS) { Log.d("TTS", "Text to speech engine started successfully."); tts.setLanguage(Locale.US); } else { Log.d("TTS", "Error starting the text to speech engine."); } } }; tts = new TextToSpeech(this.getApplicationContext(), listener); }
From source file:com.example.robert.bluetoothnew.BluetoothChatFragment.java
@Override public void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setHasOptionsMenu(true);//from w w w . ja v a 2 s .c o m // Get local Bluetooth adapter mBluetoothAdapter = BluetoothAdapter.getDefaultAdapter(); initSingleTonTemp(); // If the adapter is null, then Bluetooth is not supported intiCallWeb(); toWebPosition(); if (mBluetoothAdapter == null) { FragmentActivity activity = getActivity(); Toast.makeText(activity, "Bluetooth is not available", Toast.LENGTH_LONG).show(); activity.finish(); } // No bluetooth service tts = new TextToSpeech(getActivity(), new TextToSpeech.OnInitListener() { @RequiresApi(api = Build.VERSION_CODES.LOLLIPOP) @Override public void onInit(int status) { Log.v("abasdasd", "status 123"); if (status == TextToSpeech.SUCCESS) { int result = tts.setLanguage(Locale.TAIWAN); if (result == TextToSpeech.LANG_MISSING_DATA || result == TextToSpeech.LANG_NOT_SUPPORTED) { Log.e("TTS", "This Language is not supported"); } else { } } else { Log.e("TTS", "Initilization Failed!"); } } }); }
From source file:argusui.com.argus.OcrCaptureActivity.java
/** * Initializes the UI and creates the detector pipeline. *//*from www .j a v a 2s.c o m*/ @Override public void onCreate(Bundle icicle) { super.onCreate(icicle); setContentView(R.layout.ocr_capture); mPreview = (CameraSourcePreview) findViewById(R.id.preview); mGraphicOverlay = (GraphicOverlay<OcrGraphic>) findViewById(R.id.graphicOverlay); // read parameters from the intent used to launch the activity. boolean autoFocus = getIntent().getBooleanExtra(AutoFocus, false); boolean useFlash = getIntent().getBooleanExtra(UseFlash, false); // Check for the camera permission before accessing the camera. If the // permission is not granted yet, request permission. int rc = ActivityCompat.checkSelfPermission(this, Manifest.permission.CAMERA); if (rc == PackageManager.PERMISSION_GRANTED) { createCameraSource(autoFocus, useFlash); } else { requestCameraPermission(); } gestureDetector = new GestureDetector(this, new CaptureGestureListener()); scaleGestureDetector = new ScaleGestureDetector(this, new ScaleListener()); Snackbar.make(mGraphicOverlay, "Tap to capture. Pinch/Stretch to zoom", Snackbar.LENGTH_LONG).show(); // TODO: Set up the Text To Speech engine. TextToSpeech.OnInitListener listener = new TextToSpeech.OnInitListener() { @Override public void onInit(final int status) { if (status == TextToSpeech.SUCCESS) { Log.d("TTS", "Text to speech engine started successfully."); tts.setLanguage(Locale.US); } else { Log.d("TTS", "Error starting the text to speech engine."); } } }; tts = new TextToSpeech(this.getApplicationContext(), listener); }
From source file:spinc.spmmvp.google_vision.ocrRead_Complete.OcrCaptureActivity.java
/** * Initializes the UI and creates the detector pipeline. */// w ww . j av a 2 s .c o m @Override public void onCreate(Bundle bundle) { super.onCreate(bundle); setContentView(R.layout.ocr_capture_complete); mPreview = (CameraSourcePreview) findViewById(R.id.preview); mGraphicOverlay = (GraphicOverlay<OcrGraphic>) findViewById(R.id.graphicOverlay); // Set good defaults for capturing text. boolean autoFocus = true; boolean useFlash = false; // Check for the camera permission before accessing the camera. If the // permission is not granted yet, request permission. int rc = ActivityCompat.checkSelfPermission(this, Manifest.permission.CAMERA); if (rc == PackageManager.PERMISSION_GRANTED) { createCameraSource(autoFocus, useFlash); } else { requestCameraPermission(); } gestureDetector = new GestureDetector(this, new CaptureGestureListener()); scaleGestureDetector = new ScaleGestureDetector(this, new ScaleListener()); Snackbar.make(mGraphicOverlay, "Tap to Speak. Pinch/Stretch to zoom", Snackbar.LENGTH_LONG).show(); // Set up the Text To Speech engine. TextToSpeech.OnInitListener listener = new TextToSpeech.OnInitListener() { @Override public void onInit(final int status) { if (status == TextToSpeech.SUCCESS) { Log.d("OnInitListener", "Text to speech engine started successfully."); tts.setLanguage(Locale.US); } else { Log.d("OnInitListener", "Error starting the text to speech engine."); } } }; tts = new TextToSpeech(this.getApplicationContext(), listener); }
From source file:com.projecttango.examples.java.pointcloud.PointCloudActivity.java
@Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_point_cloud); mPointCountTextView = (TextView) findViewById(R.id.point_count_textview); mAverageZTextView = (TextView) findViewById(R.id.average_z_textview); mSurfaceView = (RajawaliSurfaceView) findViewById(R.id.gl_surface_view); mPointCloudManager = new TangoPointCloudManager(); mTangoUx = setupTangoUxAndLayout();/* ww w .j a va 2 s .com*/ mRenderer = new PointCloudRajawaliRenderer(this); setupRenderer(); /* Setup tts */ tts = new TextToSpeech(getApplicationContext(), new TextToSpeech.OnInitListener() { @Override public void onInit(int status) { if (status == TextToSpeech.SUCCESS) { tts.setLanguage(Locale.US); tts.speak("ICU helper initialized.", TextToSpeech.QUEUE_FLUSH, null); } } }); DisplayManager displayManager = (DisplayManager) getSystemService(DISPLAY_SERVICE); if (displayManager != null) { displayManager.registerDisplayListener(new DisplayManager.DisplayListener() { @Override public void onDisplayAdded(int displayId) { } @Override public void onDisplayChanged(int displayId) { synchronized (this) { setDisplayRotation(); } } @Override public void onDisplayRemoved(int displayId) { } }, null); } }
From source file:com.example.kathyxu.googlesheetsapi.controller.OcrCaptureActivity.java
/** * Initializes the UI and creates the detector pipeline. *///from w ww .j av a 2 s . c o m @Override public void onCreate(Bundle bundle) { super.onCreate(bundle); setContentView(R.layout.ocr_capture); mPreview = (CameraSourcePreview) findViewById(R.id.preview); mGraphicOverlay = (GraphicOverlay<OcrGraphic>) findViewById(R.id.graphicOverlay); // Set good defaults for capturing text. boolean autoFocus = true; boolean useFlash = false; // Check for the camera permission before accessing the camera. If the // permission is not granted yet, request permission. int rc = ActivityCompat.checkSelfPermission(this, Manifest.permission.CAMERA); if (rc == PackageManager.PERMISSION_GRANTED) { createCameraSource(autoFocus, useFlash); } else { requestCameraPermission(); } gestureDetector = new GestureDetector(this, new CaptureGestureListener()); scaleGestureDetector = new ScaleGestureDetector(this, new ScaleListener()); Snackbar.make(mGraphicOverlay, "Tap each block to store the text. Press back when you are done", Snackbar.LENGTH_INDEFINITE).show(); // Set up the Text To Speech engine. TextToSpeech.OnInitListener listener = new TextToSpeech.OnInitListener() { @Override public void onInit(final int status) { if (status == TextToSpeech.SUCCESS) { Log.d("OnInitListener", "Text to speech engine started successfully."); tts.setLanguage(Locale.US); } else { Log.d("OnInitListener", "Error starting the text to speech engine."); } } }; tts = new TextToSpeech(this.getApplicationContext(), listener); }
From source file:com.hichinaschool.flashcards.anki.ReadText.java
public static void initializeTts(Context context) { mReviewer = context;/*from w w w . j av a2 s. c o m*/ mTts = new TextToSpeech(context, new TextToSpeech.OnInitListener() { @Override public void onInit(int status) { // TODO: check if properly initialized (does not work yet) if (status != TextToSpeech.SUCCESS) { int result = mTts.setLanguage(Locale.US); if (result == TextToSpeech.LANG_MISSING_DATA || result == TextToSpeech.LANG_NOT_SUPPORTED) { } else { Log.e(AnkiDroidApp.TAG, "TTS initialized and set to US"); } } else { Log.e(AnkiDroidApp.TAG, "Initialization of TTS failed"); } AnkiDroidApp.getCompat().setTtsOnUtteranceProgressListener(mTts); } }); mTtsParams = new HashMap<String, String>(); mTtsParams.put(TextToSpeech.Engine.KEY_PARAM_UTTERANCE_ID, "stringId"); }
From source file:net.bible.service.device.speak.TextToSpeechController.java
@Override public void onInit(int status) { Log.d(TAG, "Tts initialised"); boolean isOk = false; // status can be either TextToSpeech.SUCCESS or TextToSpeech.ERROR. if (mTts != null && status == TextToSpeech.SUCCESS) { Log.d(TAG, "Tts initialisation succeeded"); boolean localeOK = false; Locale locale = null;/*from w w w .j ava2 s . c o m*/ for (int i = 0; i < localePreferenceList.size() && !localeOK; i++) { locale = localePreferenceList.get(i); Log.d(TAG, "Checking for locale:" + locale); int result = mTts.setLanguage(locale); localeOK = ((result != TextToSpeech.LANG_MISSING_DATA) && (result != TextToSpeech.LANG_NOT_SUPPORTED)); if (localeOK) { Log.d(TAG, "Successful locale:" + locale); currentLocale = locale; } } if (!localeOK) { Log.e(TAG, "TTS missing or not supported"); // Language data is missing or the language is not supported. ttsLanguageSupport.addUnsupportedLocale(locale); showError(R.string.tts_lang_not_available); } else { // The TTS engine has been successfully initialized. ttsLanguageSupport.addSupportedLocale(locale); int ok = mTts.setOnUtteranceCompletedListener(this); if (ok == TextToSpeech.ERROR) { Log.e(TAG, "Error registering onUtteranceCompletedListener"); } else { // everything seems to have succeeded if we get here isOk = true; // say the text startSpeaking(); // add event listener to stop on call stopIfPhoneCall(); } } } else { Log.d(TAG, "Tts initialisation failed"); // Initialization failed. showError(R.string.error_occurred); } if (!isOk) { shutdown(); } }
From source file:com.google.android.gms.samples.vision.ocrreader.OcrCaptureActivity.java
/** * Initializes the UI and creates the detector pipeline. *//*from w w w . j a va 2 s . com*/ @Override public void onCreate(Bundle bundle) { super.onCreate(bundle); setContentView(R.layout.ocr_capture); mPreview = (CameraSourcePreview) findViewById(R.id.preview); mGraphicOverlay = (GraphicOverlay<OcrGraphic>) findViewById(R.id.graphicOverlay); // Set good defaults for capturing text. boolean autoFocus = true; boolean useFlash = false; // Check for the camera permission before accessing the camera. If the // permission is not granted yet, request permission. int rc = ActivityCompat.checkSelfPermission(this, Manifest.permission.CAMERA); if (rc == PackageManager.PERMISSION_GRANTED) { createCameraSource(autoFocus, useFlash); } else { requestCameraPermission(); } gestureDetector = new GestureDetector(this, new CaptureGestureListener()); scaleGestureDetector = new ScaleGestureDetector(this, new ScaleListener()); Snackbar.make(mGraphicOverlay, "Tap to Speak. Pinch/Stretch to zoom", Snackbar.LENGTH_LONG).show(); // Set up the Text To Speech engine. TextToSpeech.OnInitListener listener = new TextToSpeech.OnInitListener() { @Override public void onInit(final int status) { if (status == TextToSpeech.SUCCESS) { Log.d("OnInitListener", "Text to speech engine started successfully."); tts.setLanguage(Locale.US); } else { Log.d("OnInitListener", "Error starting the text to speech engine."); } } }; tts = new TextToSpeech(this.getApplicationContext(), listener); }