Example usage for android.speech.tts TextToSpeech TextToSpeech

List of usage examples for android.speech.tts TextToSpeech TextToSpeech

Introduction

In this page you can find the example usage for android.speech.tts TextToSpeech TextToSpeech.

Prototype

public TextToSpeech(Context context, OnInitListener listener) 

Source Link

Document

The constructor for the TextToSpeech class, using the default TTS engine.

Usage

From source file:argusui.com.argus.OcrCaptureActivity.java

/**
 * Initializes the UI and creates the detector pipeline.
 *//*w  w  w .  j a  va2s .  co  m*/
@Override
public void onCreate(Bundle icicle) {
    super.onCreate(icicle);
    setContentView(R.layout.ocr_capture);

    mPreview = (CameraSourcePreview) findViewById(R.id.preview);
    mGraphicOverlay = (GraphicOverlay<OcrGraphic>) findViewById(R.id.graphicOverlay);

    // read parameters from the intent used to launch the activity.
    boolean autoFocus = getIntent().getBooleanExtra(AutoFocus, false);
    boolean useFlash = getIntent().getBooleanExtra(UseFlash, false);

    // Check for the camera permission before accessing the camera.  If the
    // permission is not granted yet, request permission.
    int rc = ActivityCompat.checkSelfPermission(this, Manifest.permission.CAMERA);
    if (rc == PackageManager.PERMISSION_GRANTED) {
        createCameraSource(autoFocus, useFlash);
    } else {
        requestCameraPermission();
    }

    gestureDetector = new GestureDetector(this, new CaptureGestureListener());
    scaleGestureDetector = new ScaleGestureDetector(this, new ScaleListener());

    Snackbar.make(mGraphicOverlay, "Tap to capture. Pinch/Stretch to zoom", Snackbar.LENGTH_LONG).show();

    // TODO: Set up the Text To Speech engine.
    TextToSpeech.OnInitListener listener = new TextToSpeech.OnInitListener() {
        @Override
        public void onInit(final int status) {
            if (status == TextToSpeech.SUCCESS) {
                Log.d("TTS", "Text to speech engine started successfully.");
                tts.setLanguage(Locale.US);
            } else {
                Log.d("TTS", "Error starting the text to speech engine.");
            }
        }
    };
    tts = new TextToSpeech(this.getApplicationContext(), listener);
}

From source file:com.example.michel.facetrack.FaceTrackerActivity.java

/**
 * Initializes the UI and initiates the creation of a face detector.
 *//*from w w  w .  j  a  va  2 s  .c  o m*/
@Override
public void onCreate(Bundle icicle) {
    super.onCreate(icicle);
    setContentView(R.layout.main);

    mPreview = (CameraSourcePreview) findViewById(R.id.preview);
    mGraphicOverlay = (GraphicOverlay) findViewById(R.id.faceOverlay);

    // Check for the camera permission before accessing the camera.  If the
    // permission is not granted yet, request permission.
    int rc = ActivityCompat.checkSelfPermission(this, Manifest.permission.CAMERA);
    if (rc == PackageManager.PERMISSION_GRANTED) {
        createCameraSource();
    } else {
        requestCameraPermission();
    }

    mTTS = new TextToSpeech(getApplicationContext(), new TextToSpeech.OnInitListener() {
        @Override
        public void onInit(int status) {
            if (status != TextToSpeech.ERROR) {
                mTTS.setLanguage(Locale.CANADA);
                //                    mTTS.setOnUtteranceCompletedListener(new TextToSpeech.OnUtteranceCompletedListener() {
                //                        @Override
                //                        public void onUtteranceCompleted(String utteranceId) {
                //                            startSpeechToText();
                //                        }
                //                    });
                String toSpeak = "Blind spot opened. What do you want?";
                mTTS.speak(toSpeak, TextToSpeech.QUEUE_FLUSH, null);
                waitStartSTT(8000);

            }

        }
    });

    /*mPreview.setOnClickListener(new View.OnClickListener() {
    @Override
    public void onClick(View v) {
        mCameraSource.takePicture(new CameraSource.ShutterCallback() {
            @Override
            public void onShutter() {
            
            }
        }, new CameraSource.PictureCallback() {
            @Override
            public void onPictureTaken(byte[] bytes) {
                String file_timestamp = Long.toString(System.currentTimeMillis());
                Log.e("File: ", Environment.getExternalStorageDirectory() + "/" + file_timestamp + ".jpg");
                final File file = new File(Environment.getExternalStorageDirectory() + "/" + file_timestamp + ".jpg");
                try {
                    save(bytes, file);
            
                    String toSpeak = "Image saved";
                    mTTS.speak(toSpeak, TextToSpeech.QUEUE_FLUSH, null);
                    Toast.makeText(FaceTrackerActivity.this, "Saved to " + Environment.getExternalStorageDirectory() + "/" + file_timestamp + ".jpg", Toast.LENGTH_SHORT).show();
                } catch (IOException e) {
                    e.printStackTrace();
                }
            }
            
            private void save(byte[] bytes, final File file) throws IOException {
                OutputStream output = null;
                try {
                    output = new FileOutputStream(file);
                    output.write(bytes);
                } finally {
                    if (null != output) {
                        output.close();
                    }
                }
                Float happiness = sendPhotoToAzure(file); // Sending a blob (photo) to the Azure Storage
                String photo_url = "https://blindspot.blob.core.windows.net/image/" + file.getName();
                Log.e("Photo_url : ", photo_url);
    //                        Float happiness = getHappiness(photo_url); // Call the Microsoft's Emotion API using the photo url
                Log.e("Happiness: ", Float.toString(happiness));
            }
        });
    }
    });*/

    lastFaceTime = System.currentTimeMillis();

}

From source file:spinc.spmmvp.google_vision.ocrRead_Complete.OcrCaptureActivity.java

/**
 * Initializes the UI and creates the detector pipeline.
 *///  w  w w.j a v  a 2 s . co  m
@Override
public void onCreate(Bundle bundle) {
    super.onCreate(bundle);
    setContentView(R.layout.ocr_capture_complete);

    mPreview = (CameraSourcePreview) findViewById(R.id.preview);
    mGraphicOverlay = (GraphicOverlay<OcrGraphic>) findViewById(R.id.graphicOverlay);

    // Set good defaults for capturing text.
    boolean autoFocus = true;
    boolean useFlash = false;

    // Check for the camera permission before accessing the camera.  If the
    // permission is not granted yet, request permission.
    int rc = ActivityCompat.checkSelfPermission(this, Manifest.permission.CAMERA);
    if (rc == PackageManager.PERMISSION_GRANTED) {
        createCameraSource(autoFocus, useFlash);
    } else {
        requestCameraPermission();
    }

    gestureDetector = new GestureDetector(this, new CaptureGestureListener());
    scaleGestureDetector = new ScaleGestureDetector(this, new ScaleListener());

    Snackbar.make(mGraphicOverlay, "Tap to Speak. Pinch/Stretch to zoom", Snackbar.LENGTH_LONG).show();

    // Set up the Text To Speech engine.
    TextToSpeech.OnInitListener listener = new TextToSpeech.OnInitListener() {
        @Override
        public void onInit(final int status) {
            if (status == TextToSpeech.SUCCESS) {
                Log.d("OnInitListener", "Text to speech engine started successfully.");
                tts.setLanguage(Locale.US);
            } else {
                Log.d("OnInitListener", "Error starting the text to speech engine.");
            }
        }
    };
    tts = new TextToSpeech(this.getApplicationContext(), listener);
}

From source file:com.example.kathyxu.googlesheetsapi.controller.OcrCaptureActivity.java

/**
 * Initializes the UI and creates the detector pipeline.
 *//* www.  j  a v  a  2s  .  c  o m*/
@Override
public void onCreate(Bundle bundle) {
    super.onCreate(bundle);
    setContentView(R.layout.ocr_capture);

    mPreview = (CameraSourcePreview) findViewById(R.id.preview);
    mGraphicOverlay = (GraphicOverlay<OcrGraphic>) findViewById(R.id.graphicOverlay);

    // Set good defaults for capturing text.
    boolean autoFocus = true;
    boolean useFlash = false;

    // Check for the camera permission before accessing the camera.  If the
    // permission is not granted yet, request permission.
    int rc = ActivityCompat.checkSelfPermission(this, Manifest.permission.CAMERA);
    if (rc == PackageManager.PERMISSION_GRANTED) {
        createCameraSource(autoFocus, useFlash);
    } else {
        requestCameraPermission();
    }

    gestureDetector = new GestureDetector(this, new CaptureGestureListener());
    scaleGestureDetector = new ScaleGestureDetector(this, new ScaleListener());

    Snackbar.make(mGraphicOverlay, "Tap each block to store the text. Press back when you are done",
            Snackbar.LENGTH_INDEFINITE).show();

    // Set up the Text To Speech engine.
    TextToSpeech.OnInitListener listener = new TextToSpeech.OnInitListener() {
        @Override
        public void onInit(final int status) {
            if (status == TextToSpeech.SUCCESS) {
                Log.d("OnInitListener", "Text to speech engine started successfully.");
                tts.setLanguage(Locale.US);
            } else {
                Log.d("OnInitListener", "Error starting the text to speech engine.");
            }
        }
    };
    tts = new TextToSpeech(this.getApplicationContext(), listener);
}

From source file:com.google.android.gms.samples.vision.ocrreader.OcrCaptureActivity.java

/**
 * Initializes the UI and creates the detector pipeline.
 *//*from ww  w .  j a v a2 s . c  om*/
@Override
public void onCreate(Bundle bundle) {
    super.onCreate(bundle);
    setContentView(R.layout.ocr_capture);

    mPreview = (CameraSourcePreview) findViewById(R.id.preview);
    mGraphicOverlay = (GraphicOverlay<OcrGraphic>) findViewById(R.id.graphicOverlay);

    // Set good defaults for capturing text.
    boolean autoFocus = true;
    boolean useFlash = false;

    // Check for the camera permission before accessing the camera.  If the
    // permission is not granted yet, request permission.
    int rc = ActivityCompat.checkSelfPermission(this, Manifest.permission.CAMERA);
    if (rc == PackageManager.PERMISSION_GRANTED) {
        createCameraSource(autoFocus, useFlash);
    } else {
        requestCameraPermission();
    }

    gestureDetector = new GestureDetector(this, new CaptureGestureListener());
    scaleGestureDetector = new ScaleGestureDetector(this, new ScaleListener());

    Snackbar.make(mGraphicOverlay, "Tap to Speak. Pinch/Stretch to zoom", Snackbar.LENGTH_LONG).show();

    // Set up the Text To Speech engine.
    TextToSpeech.OnInitListener listener = new TextToSpeech.OnInitListener() {
        @Override
        public void onInit(final int status) {
            if (status == TextToSpeech.SUCCESS) {
                Log.d("OnInitListener", "Text to speech engine started successfully.");
                tts.setLanguage(Locale.US);
            } else {
                Log.d("OnInitListener", "Error starting the text to speech engine.");
            }
        }
    };
    tts = new TextToSpeech(this.getApplicationContext(), listener);
}

From source file:com.perchtech.humraz.blind.libraryact.java

/**
 * Initializes the UI and creates the detector pipeline.
 *///w  ww .j a va 2  s  .  c o m
@Override
public void onCreate(Bundle bundle) {
    super.onCreate(bundle);
    mContext = getApplicationContext();
    setContentView(R.layout.ocr_capture);

    mPreview = (CameraSourcePreview) findViewById(R.id.preview);
    mGraphicOverlay = (GraphicOverlay<OcrGraphic>) findViewById(R.id.graphicOverlay);

    // Set good defaults for capturing text.
    boolean autoFocus = true;
    boolean useFlash = false;

    // Check for the camera permission before accessing the camera.  If the
    // permission is not granted yet, request permission.
    int rc = ActivityCompat.checkSelfPermission(this, Manifest.permission.CAMERA);
    if (rc == PackageManager.PERMISSION_GRANTED) {
        createCameraSource(autoFocus, useFlash);
    } else {
        requestCameraPermission();
    }

    gestureDetector = new GestureDetector(this, new CaptureGestureListener());
    scaleGestureDetector = new ScaleGestureDetector(this, new ScaleListener());

    Snackbar.make(mGraphicOverlay, "Tap to Speak. Pinch/Stretch to zoom", Snackbar.LENGTH_LONG).show();

    // Set up the Text To Speech engine.
    TextToSpeech.OnInitListener listener = new TextToSpeech.OnInitListener() {
        @Override
        public void onInit(final int status) {
            if (status == TextToSpeech.SUCCESS) {
                Log.d("OnInitListener", "Text to speech engine started successfully.");
                tts.setLanguage(Locale.US);
            } else {
                Log.d("OnInitListener", "Error starting the text to speech engine.");
            }
        }
    };
    tts = new TextToSpeech(this.getApplicationContext(), listener);
    final Handler handler = new Handler();
    handler.postDelayed(new Runnable() {
        @Override
        public void run() {
            speakOut("library mode");
        }
    }, 500);

}

From source file:com.microsoft.AzureIntelligentServicesExample.MainActivity.java

@Override
protected void onCreate(Bundle savedInstanceState) {
    super.onCreate(savedInstanceState);
    setContentView(R.layout.activity_main);

    this._logText = (EditText) findViewById(R.id.editText1);
    this._startButton = (Button) findViewById(R.id.button1);

    tts = new TextToSpeech(getApplicationContext(), new TextToSpeech.OnInitListener() {
        @Override/*from  w w  w  .j  a  v  a  2 s  . c  om*/
        public void onInit(int status) {
            if (status == TextToSpeech.SUCCESS) {

                int result = tts.setLanguage(Locale.US);
                if (result == TextToSpeech.LANG_MISSING_DATA || result == TextToSpeech.LANG_NOT_SUPPORTED) {
                    Log.e("TTS", "This Language is not supported");
                    Intent installIntent = new Intent();
                    installIntent.setAction(TextToSpeech.Engine.ACTION_INSTALL_TTS_DATA);
                    startActivity(installIntent);
                }
            } else {
                Log.e("TTS", "Initilization Failed!");
            }
        }
    });

    ImageView sendBtn = (ImageView) findViewById(R.id.sendBtn);
    final EditText message = (EditText) findViewById(R.id.message);

    sendBtn.setOnClickListener(new OnClickListener() {
        @Override
        public void onClick(View arg0) {

            _logText.append("ME : " + message.getText().toString() + "\n");
            speakOut(message.getText().toString());
            message.setText("");
        }
    });

    if (getString(R.string.primaryKey).startsWith("Please")) {
        new AlertDialog.Builder(this).setTitle(getString(R.string.add_subscription_key_tip_title))
                .setMessage(getString(R.string.add_subscription_key_tip)).setCancelable(false).show();
    }

    // setup the buttons
    final MainActivity This = this;
    this._startButton.setOnClickListener(new OnClickListener() {
        @Override
        public void onClick(View arg0) {
            This.StartButton_Click(arg0);
        }
    });

}

From source file:com.app.azza.ocr.OcrCaptureActivity.java

/**
 * Initializes the UI and creates the detector pipeline.
 *//*w w  w. j a  v  a 2 s.c o m*/
@Override
public void onCreate(Bundle bundle) {
    super.onCreate(bundle);
    setContentView(R.layout.ocr_capture);

    mPreview = (CameraSourcePreview) findViewById(R.id.preview);
    mGraphicOverlay = (GraphicOverlay<OcrGraphic>) findViewById(R.id.graphicOverlay);

    // Set good defaults for capturing text.
    boolean autoFocus = true;
    boolean useFlash = false;

    // Check for the camera permission before accessing the camera.  If the
    // permission is not granted yet, request permission.
    int rc = ActivityCompat.checkSelfPermission(this, Manifest.permission.CAMERA);
    if (rc == PackageManager.PERMISSION_GRANTED) {
        createCameraSource(autoFocus, useFlash);
    } else {
        requestCameraPermission();
    }

    gestureDetector = new GestureDetector(this, new CaptureGestureListener());
    scaleGestureDetector = new ScaleGestureDetector(this, new ScaleListener());

    Snackbar.make(mGraphicOverlay, "Tap to Speak. Pinch/Stretch to zoom", Snackbar.LENGTH_LONG).show();

    // Set up the Text To Speech engine.
    TextToSpeech.OnInitListener listener = new TextToSpeech.OnInitListener() {
        @Override
        public void onInit(final int status) {
            if (status == TextToSpeech.SUCCESS) {
                Log.d("OnInitListener", "Text to speech engine started successfully.");
                tts.setLanguage(Locale.US);
            } else {
                Log.d("OnInitListener", "Error starting the text to speech engine.");
            }
        }
    };
    tts = new TextToSpeech(this.getApplicationContext(), listener);

}

From source file:at.the.gogo.windig.activities.WindigActivity.java

private void checkTTS() {
    // Fire off an intent to check if a TTS engine is installed
    // final Intent checkIntent = new Intent();
    // checkIntent.setAction(TextToSpeech.Engine.ACTION_CHECK_TTS_DATA);
    // startActivityForResult(checkIntent,
    // WindigActivity.MY_TTS_CHECK_CODE);

    // directly instantiate TTS
    if (wantToUseTTS) {
        CoreInfoHolder.getInstance().setTts(new TextToSpeech(this, this));
    }//from w ww.  j  a v  a 2  s  . co m

}

From source file:com.xengar.android.englishverbs.ui.DetailsActivity.java

@Override
protected void onCreate(Bundle savedInstanceState) {
    super.onCreate(savedInstanceState);
    setContentView(R.layout.activity_details);
    Toolbar toolbar = (Toolbar) findViewById(R.id.toolbar);
    setSupportActionBar(toolbar);//w w w. j av  a2  s  . c o  m
    getSupportActionBar().setDisplayHomeAsUpEnabled(true);

    Bundle bundle = getIntent().getExtras();
    demo = bundle.getBoolean(DEMO_MODE, false);
    verbID = bundle.getLong(VERB_ID, -1);
    String title = bundle.getString(VERB_NAME);
    getSupportActionBar().setTitle(title);

    // Invalidate the options menu, so the "Edit" menu option can be hidden.
    invalidateOptionsMenu();

    //Text
    infinitive = (TextView) findViewById(R.id.infinitive);
    simplePast = (TextView) findViewById(R.id.simple_past);
    pastParticiple = (TextView) findViewById(R.id.past_participle);
    pInfinitive = (TextView) findViewById(R.id.phonetic_infinitive);
    pSimplePast = (TextView) findViewById(R.id.phonetic_simple_past);
    pPastParticiple = (TextView) findViewById(R.id.phonetic_past_participle);
    definition = (TextView) findViewById(R.id.definition);
    translation = (TextView) findViewById(R.id.translation);
    sample1 = (TextView) findViewById(R.id.sample1);
    sample2 = (TextView) findViewById(R.id.sample2);
    sample3 = (TextView) findViewById(R.id.sample3);

    // define click listeners
    LinearLayout header = (LinearLayout) findViewById(R.id.play_infinitive);
    header.setOnClickListener(this);
    header = (LinearLayout) findViewById(R.id.play_simple_past);
    header.setOnClickListener(this);
    header = (LinearLayout) findViewById(R.id.play_past_participle);
    header.setOnClickListener(this);

    // initialize Speaker
    tts = new TextToSpeech(this, new TextToSpeech.OnInitListener() {
        @Override
        public void onInit(int status) {
            if (status == TextToSpeech.SUCCESS) {
                int result = tts.setLanguage(Locale.US);
                if (result == TextToSpeech.LANG_MISSING_DATA || result == TextToSpeech.LANG_NOT_SUPPORTED) {
                    if (LOG) {
                        Log.e("TTS", "This Language is not supported");
                    }
                }
            } else {
                if (LOG) {
                    Log.e("TTS", "Initilization Failed!");
                }
            }
        }
    });

    // Initialize a loader to read the verb data from the database and display it
    getLoaderManager().initLoader(EXISTING_VERB_LOADER, null, this);
    showFavoriteButtons();

    // Obtain the FirebaseAnalytics instance.
    mFirebaseAnalytics = FirebaseAnalytics.getInstance(this);
    ActivityUtils.firebaseAnalyticsLogEventSelectContent(mFirebaseAnalytics, PAGE_VERB_DETAILS,
            PAGE_VERB_DETAILS, TYPE_PAGE);

    // create AdMob banner
    listener = new LogAdListener(mFirebaseAnalytics, DETAILS_ACTIVITY);
    mAdView = ActivityUtils.createAdMobBanner(this, listener);

    if (demo) {
        defineDemoMode();
    }
}