Example usage for android.content Intent ACTION_DEVICE_STORAGE_LOW

List of usage examples for android.content Intent ACTION_DEVICE_STORAGE_LOW

Introduction

In this page you can find the example usage for android.content Intent ACTION_DEVICE_STORAGE_LOW.

Prototype

String ACTION_DEVICE_STORAGE_LOW

To view the source code for android.content Intent ACTION_DEVICE_STORAGE_LOW.

Click Source Link

Document

Broadcast Action: A sticky broadcast that indicates low storage space condition on the device

This is a protected intent that can only be sent by the system.

Usage

From source file:org.photosynq.android.photosynq.barcode.BarcodeCaptureActivity.java

/**
 * Creates and starts the camera.  Note that this uses a higher resolution in comparison
 * to other detection examples to enable the barcode detector to detect small barcodes
 * at long distances./*w  w w. j  ava  2  s. c  o  m*/
 *
 * Suppressing InlinedApi since there is a check that the minimum version is met before using
 * the constant.
 */
@SuppressLint("InlinedApi")
private void createCameraSource(boolean autoFocus, boolean useFlash) {
    Context context = getApplicationContext();

    // A barcode detector is created to track barcodes.  An associated multi-processor instance
    // is set to receive the barcode detection results, track the barcodes, and maintain
    // graphics for each barcode on screen.  The factory is used by the multi-processor to
    // create a separate tracker instance for each barcode.
    BarcodeDetector barcodeDetector = new BarcodeDetector.Builder(context).build();
    BarcodeTrackerFactory barcodeFactory = new BarcodeTrackerFactory(mGraphicOverlay, mTrackerCallback);
    barcodeDetector.setProcessor(new MultiProcessor.Builder<>(barcodeFactory).build());

    if (!barcodeDetector.isOperational()) {
        // Note: The first time that an app using the barcode or face API is installed on a
        // device, GMS will download a native libraries to the device in order to do detection.
        // Usually this completes before the app is run for the first time.  But if that
        // download has not yet completed, then the above call will not detect any barcodes
        // and/or faces.
        //
        // isOperational() can be used to check if the required native libraries are currently
        // available.  The detectors will automatically become operational once the library
        // downloads complete on device.
        Log.w(TAG, "Detector dependencies are not yet available.");

        // Check for low storage.  If there is low storage, the native library will not be
        // downloaded, so detection will not become operational.
        IntentFilter lowstorageFilter = new IntentFilter(Intent.ACTION_DEVICE_STORAGE_LOW);
        boolean hasLowStorage = registerReceiver(null, lowstorageFilter) != null;

        if (hasLowStorage) {
            Toast.makeText(this, R.string.low_storage_error, Toast.LENGTH_LONG).show();
            Log.w(TAG, getString(R.string.low_storage_error));
        }
    }

    // Creates and starts the camera.  Note that this uses a higher resolution in comparison
    // to other detection examples to enable the barcode detector to detect small barcodes
    // at long distances.
    CameraSource.Builder builder = new CameraSource.Builder(getApplicationContext(), barcodeDetector)
            .setFacing(CameraSource.CAMERA_FACING_BACK).setRequestedPreviewSize(1600, 1024)
            .setRequestedFps(15.0f);

    // make sure that auto focus is an available option
    if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.ICE_CREAM_SANDWICH) {
        builder = builder.setFocusMode(autoFocus ? Camera.Parameters.FOCUS_MODE_CONTINUOUS_PICTURE : null);
    }
    mCameraSource = builder.setFlashMode(useFlash ? Camera.Parameters.FLASH_MODE_TORCH : null).build();

}

From source file:com.nice295.fridgeplease.OcrCaptureActivity.java

/**
 * Creates and starts the camera.  Note that this uses a higher resolution in comparison
 * to other detection examples to enable the ocr detector to detect small text samples
 * at long distances./* w ww  .j  av a2 s .co m*/
 * <p>
 * Suppressing InlinedApi since there is a check that the minimum version is met before using
 * the constant.
 */
@SuppressLint("InlinedApi")
private void createCameraSource(boolean autoFocus, boolean useFlash) {
    Context context = getApplicationContext();

    // A text recognizer is created to find text.  An associated processor instance
    // is set to receive the text recognition results and display graphics for each text block
    // on screen.
    TextRecognizer textRecognizer = new TextRecognizer.Builder(context).build();
    //khlee
    // textRecognizer.setProcessor(new OcrDetectorProcessor(mGraphicOverlay));
    mItems = Paper.book().read("items", new HashMap<String, String>());
    //textRecognizer.setProcessor(new OcrDetectorProcessor(mGraphicOverlay, mItems));
    textRecognizer.setProcessor(new OcrDetectorProcessor(mGraphicOverlay, mItems, this));

    if (!textRecognizer.isOperational()) {
        // Note: The first time that an app using a Vision API is installed on a
        // device, GMS will download a native libraries to the device in order to do detection.
        // Usually this completes before the app is run for the first time.  But if that
        // download has not yet completed, then the above call will not detect any text,
        // barcodes, or faces.
        //
        // isOperational() can be used to check if the required native libraries are currently
        // available.  The detectors will automatically become operational once the library
        // downloads complete on device.
        Log.w(TAG, "Detector dependencies are not yet available.");

        // Check for low storage.  If there is low storage, the native library will not be
        // downloaded, so detection will not become operational.
        IntentFilter lowstorageFilter = new IntentFilter(Intent.ACTION_DEVICE_STORAGE_LOW);
        boolean hasLowStorage = registerReceiver(null, lowstorageFilter) != null;

        if (hasLowStorage) {
            Toast.makeText(this, R.string.low_storage_error, Toast.LENGTH_LONG).show();
            Log.w(TAG, getString(R.string.low_storage_error));
        }
    }

    // Creates and starts the camera.  Note that this uses a higher resolution in comparison
    // to other detection examples to enable the text recognizer to detect small pieces of text.
    mCameraSource = new CameraSource.Builder(getApplicationContext(), textRecognizer)
            .setFacing(CameraSource.CAMERA_FACING_BACK).setRequestedPreviewSize(1280, 1024)
            .setRequestedFps(2.0f).setFlashMode(useFlash ? Camera.Parameters.FLASH_MODE_TORCH : null)
            .setFocusMode(autoFocus ? Camera.Parameters.FOCUS_MODE_CONTINUOUS_PICTURE : null).build();
}

From source file:com.masseyhacks.sjam.cheqout.ScannerActivity.java

/**
 * Creates and starts the camera.  Note that this uses a higher resolution in comparison
 * to other detection examples to enable the barcode detector to detect small barcodes
 * at long distances.//from  w  w w  .java 2  s  .com
 *
 * Suppressing InlinedApi since there is a check that the minimum version is met before using
 * the constant.
 */
@SuppressLint("InlinedApi")
private void createCameraSource(boolean autoFocus, boolean useFlash) {
    Context context = getApplicationContext();

    // A barcode detector is created to track barcodes.  An associated multi-processor instance
    // is set to receive the barcode detection results, track the barcodes, and maintain
    // graphics for each barcode on screen.  The factory is used by the multi-processor to
    // create a separate tracker instance for each barcode.
    BarcodeDetector barcodeDetector = new BarcodeDetector.Builder(context).build();
    BarcodeTrackerFactory barcodeFactory = new BarcodeTrackerFactory(mGraphicOverlay, this);
    barcodeDetector.setProcessor(new MultiProcessor.Builder<>(barcodeFactory).build());

    if (!barcodeDetector.isOperational()) {
        // Note: The first time that an app using the barcode or face API is installed on a
        // device, GMS will download a native libraries to the device in order to do detection.
        // Usually this completes before the app is run for the first time.  But if that
        // download has not yet completed, then the above call will not detect any barcodes
        // and/or faces.
        //
        // isOperational() can be used to check if the required native libraries are currently
        // available.  The detectors will automatically become operational once the library
        // downloads complete on device.
        Log.w(TAG, "Detector dependencies are not yet available.");

        // Check for low storage.  If there is low storage, the native library will not be
        // downloaded, so detection will not become operational.
        IntentFilter lowstorageFilter = new IntentFilter(Intent.ACTION_DEVICE_STORAGE_LOW);
        boolean hasLowStorage = registerReceiver(null, lowstorageFilter) != null;

        if (hasLowStorage) {
            Toast.makeText(this, "Get a bigger phone", Toast.LENGTH_LONG).show();
            Log.w(TAG, "Get a bigger phone");
        }
    }

    // Creates and starts the camera.  Note that this uses a higher resolution in comparison
    // to other detection examples to enable the barcode detector to detect small barcodes
    // at long distances.
    CameraSource.Builder builder = new CameraSource.Builder(getApplicationContext(), barcodeDetector)
            .setFacing(CameraSource.CAMERA_FACING_BACK).setRequestedPreviewSize(1600, 1600)
            .setRequestedFps(30.0f);

    // make sure that auto focus is an available option
    if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.ICE_CREAM_SANDWICH) {
        builder = builder.setFocusMode(autoFocus ? Camera.Parameters.FOCUS_MODE_CONTINUOUS_PICTURE : null);
    }

    mCameraSource = builder.setFlashMode(useFlash ? Camera.Parameters.FLASH_MODE_RED_EYE : null).build();
}

From source file:divya.myvision.TessActivity.java

/**
 *
 * Suppressing InlinedApi since there is a check that the minimum version is met before using
 * the constant.//  w w w.  ja v  a  2  s.c  o  m
 */
@SuppressLint("InlinedApi")
private void createCameraSource(boolean autoFocus, boolean useFlash, String fps) {
    Context context = getApplicationContext();

    // A text recognizer is created to find text.  An associated processor instance
    // is set to receive the text recognition results and display graphics for each text block
    // on screen.
    TextRecognizer textRecognizer = new TextRecognizer.Builder(context).build();
    textRecognizer.setProcessor(new TessDetector(mGraphicOverlay));

    if (!textRecognizer.isOperational()) {

        Log.w(TAG, "Detector dependencies are not yet available.");

        IntentFilter lowstorageFilter = new IntentFilter(Intent.ACTION_DEVICE_STORAGE_LOW);
        boolean hasLowStorage = registerReceiver(null, lowstorageFilter) != null;

        if (hasLowStorage) {
            Toast.makeText(this, R.string.low_storage_error, Toast.LENGTH_LONG).show();
            Log.w(TAG, getString(R.string.low_storage_error));
        }
    }

    // Creates and starts the camera.

    mCameraSource = new CameraSource.Builder(getApplicationContext(), textRecognizer)
            .setFacing(CameraSource.CAMERA_FACING_BACK).setRequestedPreviewSize(1280, 1024)
            .setRequestedFps(Float.parseFloat(fps))
            .setFlashMode(useFlash ? Camera.Parameters.FLASH_MODE_TORCH : null)
            .setFocusMode(autoFocus ? Camera.Parameters.FOCUS_MODE_CONTINUOUS_PICTURE : null).build();
}

From source file:br.com.brolam.cloudvision.ui.NoteVisionActivity.java

/**
 * Creates and starts the camera.  Note that this uses a higher resolution in comparison
 * to other detection examples to enable the ocr detector to detect small text samples
 * at long distances.//  www  .ja  va2s.c  o  m
 *
 * Suppressing InlinedApi since there is a check that the minimum version is met before using
 * the constant.
 */
@SuppressLint("InlinedApi")
private void createCameraSource() {
    Context context = getApplicationContext();

    // A text recognizer is created to find text.  An associated processor instance
    // is set to receive the text recognition results and display graphics for each text block
    // on screen.
    TextRecognizer textRecognizer = new TextRecognizer.Builder(context).build();
    textRecognizer.setProcessor(new OcrDetectorProcessor(mGraphicOverlay));

    if (!textRecognizer.isOperational()) {
        // Note: The first time that an app using a Vision API is installed on a
        // device, GMS will download a native libraries to the device in order to do detection.
        // Usually this completes before the app is run for the first time.  But if that
        // download has not yet completed, then the above call will not detect any text,
        // barcodes, or faces.
        //
        // isOperational() can be used to check if the required native libraries are currently
        // available.  The detectors will automatically become operational once the library
        // downloads complete on device.
        Log.w(TAG, "Detector dependencies are not yet available.");

        // Check for low storage.  If there is low storage, the native library will not be
        // downloaded, so detection will not become operational.
        IntentFilter lowstorageFilter = new IntentFilter(Intent.ACTION_DEVICE_STORAGE_LOW);
        boolean hasLowStorage = registerReceiver(null, lowstorageFilter) != null;

        if (hasLowStorage) {
            Toast.makeText(this, R.string.low_storage_error, Toast.LENGTH_LONG).show();
            Log.w(TAG, getString(R.string.low_storage_error));
        }
    }

    // Creates and starts the camera.  Note that this uses a higher resolution in comparison
    // to other detection examples to enable the text recognizer to detect small pieces of text.
    mCameraSource = new CameraSource.Builder(getApplicationContext(), textRecognizer)
            .setFacing(CameraSource.CAMERA_FACING_BACK).setRequestedPreviewSize(1280, 1024)
            .setRequestedFps(2.0f).setFlashMode(useFlash ? Camera.Parameters.FLASH_MODE_TORCH : null)
            .setFocusMode(Camera.Parameters.FOCUS_MODE_CONTINUOUS_PICTURE).build();
}

From source file:com.stemsc.bt.BarcodeCaptureActivity.java

/**
 * Creates and starts the camera.  Note that this uses a higher resolution in comparison
 * to other detection examples to enable the barcode detector to detect small barcodes
 * at long distances./*from   www . j av a  2 s.  c  o m*/
 * <p>
 * Suppressing InlinedApi since there is a check that the minimum version is met before using
 * the constant.
 */
@SuppressLint("InlinedApi")
private void createCameraSource(boolean autoFocus, boolean useFlash) {
    Context context = getApplicationContext();

    // A barcode detector is created to track barcodes.  An associated multi-processor instance
    // is set to receive the barcode detection results, track the barcodes, and maintain
    // graphics for each barcode on screen.  The factory is used by the multi-processor to
    // create a separate tracker instance for each barcode.
    BarcodeDetector barcodeDetector = new BarcodeDetector.Builder(context)
            .setBarcodeFormats(Barcode.EAN_13 | Barcode.QR_CODE) // !!!!  
            .build();
    BarcodeTrackerFactory barcodeFactory = new BarcodeTrackerFactory(mGraphicOverlay);
    barcodeDetector.setProcessor(new MultiProcessor.Builder<>(barcodeFactory).build());

    if (!barcodeDetector.isOperational()) {
        // Note: The first time that an app using the barcode or face API is installed on a
        // device, GMS will download a native libraries to the device in order to do detection.
        // Usually this completes before the app is run for the first time.  But if that
        // download has not yet completed, then the above call will not detect any barcodes
        // and/or faces.
        //
        // isOperational() can be used to check if the required native libraries are currently
        // available.  The detectors will automatically become operational once the library
        // downloads complete on device.
        Log.w(TAG, "Detector dependencies are not yet available.");

        // Check for low storage.  If there is low storage, the native library will not be
        // downloaded, so detection will not become operational.
        IntentFilter lowstorageFilter = new IntentFilter(Intent.ACTION_DEVICE_STORAGE_LOW);
        boolean hasLowStorage = registerReceiver(null, lowstorageFilter) != null;

        if (hasLowStorage) {
            Toast.makeText(this, R.string.low_storage_error, Toast.LENGTH_LONG).show();
            Log.w(TAG, getString(R.string.low_storage_error));
        }
    }

    // Creates and starts the camera.  Note that this uses a higher resolution in comparison
    // to other detection examples to enable the barcode detector to detect small barcodes
    // at long distances.
    CameraSource.Builder builder = new CameraSource.Builder(getApplicationContext(), barcodeDetector)
            .setFacing(CameraSource.CAMERA_FACING_BACK).setRequestedPreviewSize(1600, 1024)
            .setRequestedFps(15.0f);

    // make sure that auto focus is an available option
    if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.ICE_CREAM_SANDWICH) {
        builder = builder.setFocusMode(autoFocus ? Camera.Parameters.FOCUS_MODE_CONTINUOUS_PICTURE : null);
    }

    mCameraSource = builder.setFlashMode(useFlash ? Camera.Parameters.FLASH_MODE_TORCH : null).build();
}

From source file:com.itm.oki.oki.GooglyEyesActivity.java

/**
 * Creates the face detector and associated processing pipeline to support either front facing
 * mode or rear facing mode.  Checks if the detector is ready to use, and displays a low storage
 * warning if it was not possible to download the face library.
 *///from   w w  w. ja  v a 2s  .  c  om
@NonNull
private FaceDetector createFaceDetector(final Context context) {
    // For both front facing and rear facing modes, the detector is initialized to do landmark
    // detection (to find the eyes), classification (to determine if the eyes are open), and
    // tracking.
    //
    // Use of "fast mode" enables faster detection for frontward faces, at the expense of not
    // attempting to detect faces at more varied angles (e.g., faces in profile).  Therefore,
    // faces that are turned too far won't be detected under fast mode.
    //
    // For front facing mode only, the detector will use the "prominent face only" setting,
    // which is optimized for tracking a single relatively large face.  This setting allows the
    // detector to take some shortcuts to make tracking faster, at the expense of not being able
    // to track multiple faces.
    //
    // Setting the minimum face size not only controls how large faces must be in order to be
    // detected, it also affects performance.  Since it takes longer to scan for smaller faces,
    // we increase the minimum face size for the rear facing mode a little bit in order to make
    // tracking faster (at the expense of missing smaller faces).  But this optimization is less
    // important for the front facing case, because when "prominent face only" is enabled, the
    // detector stops scanning for faces after it has found the first (large) face.
    FaceDetector detector = new FaceDetector.Builder(context).setLandmarkType(FaceDetector.ALL_LANDMARKS)
            .setClassificationType(FaceDetector.ALL_CLASSIFICATIONS).setTrackingEnabled(true)
            .setMode(FaceDetector.FAST_MODE).setProminentFaceOnly(mIsFrontFacing)
            .setMinFaceSize(mIsFrontFacing ? 0.35f : 0.15f).build();

    Detector.Processor<Face> processor;
    if (mIsFrontFacing) {
        // For front facing mode, a single tracker instance is used with an associated focusing
        // processor.  This configuration allows the face detector to take some shortcuts to
        // speed up detection, in that it can quit after finding a single face and can assume
        // that the nextIrisPosition face position is usually relatively close to the last seen
        // face position.
        Tracker<Face> tracker = new GooglyFaceTracker(mGraphicOverlay, context);
        processor = new LargestFaceFocusingProcessor.Builder(detector, tracker).build();

    } else {
        // For rear facing mode, a factory is used to create per-face tracker instances.  A
        // tracker is created for each face and is maintained as long as the same face is
        // visible, enabling per-face state to be maintained over time.  This is used to store
        // the iris position and velocity for each face independently, simulating the motion of
        // the eyes of any number of faces over time.
        //
        // Both the front facing mode and the rear facing mode use the same tracker
        // implementation, avoiding the need for any additional code.  The only difference
        // between these cases is the choice of Processor: one that is specialized for tracking
        // a single face or one that can handle multiple faces.  Here, we use MultiProcessor,
        // which is a standard component of the mobile vision API for managing multiple items.
        MultiProcessor.Factory<Face> factory = new MultiProcessor.Factory<Face>() {
            @Override
            public Tracker<Face> create(Face face) {
                return new GooglyFaceTracker(mGraphicOverlay, context);
            }
        };
        processor = new MultiProcessor.Builder<>(factory).build();
    }

    detector.setProcessor(processor);

    if (!detector.isOperational()) {
        // Note: The first time that an app using face API is installed on a device, GMS will
        // download a native library to the device in order to do detection.  Usually this
        // completes before the app is run for the first time.  But if that download has not yet
        // completed, then the above call will not detect any faces.
        //
        // isOperational() can be used to check if the required native library is currently
        // available.  The detector will automatically become operational once the library
        // download completes on device.
        Log.w(TAG, "Face detector dependencies are not yet available.");

        // Check for low storage.  If there is low storage, the native library will not be
        // downloaded, so detection will not become operational.
        IntentFilter lowStorageFilter = new IntentFilter(Intent.ACTION_DEVICE_STORAGE_LOW);
        boolean hasLowStorage = registerReceiver(null, lowStorageFilter) != null;

        if (hasLowStorage) {
            Toast.makeText(this, R.string.low_storage_error, Toast.LENGTH_LONG).show();
            Log.w(TAG, getString(R.string.low_storage_error));
        }
    }
    return detector;
}

From source file:devinxu.mynativemodeulaexample.facerecognize.GooglyEyesActivity.java

/**
 * Creates the face detector and associated processing pipeline to support either front facing
 * mode or rear facing mode.  Checks if the detector is ready to use, and displays a low storage
 * warning if it was not possible to download the face library.
 *///  w ww. j a  v  a  2s.c o m
@NonNull
private FaceDetector createFaceDetector(Context context) {
    // For both front facing and rear facing modes, the detector is initialized to do landmark
    // detection (to find the eyes), classification (to determine if the eyes are open), and
    // tracking.
    //
    // Use of "fast mode" enables faster detection for frontward faces, at the expense of not
    // attempting to detect faces at more varied angles (e.g., faces in profile).  Therefore,
    // faces that are turned too far won't be detected under fast mode.
    //
    // For front facing mode only, the detector will use the "prominent face only" setting,
    // which is optimized for tracking a single relatively large face.  This setting allows the
    // detector to take some shortcuts to make tracking faster, at the expense of not being able
    // to track multiple faces.
    //
    // Setting the minimum face size not only controls how large faces must be in order to be
    // detected, it also affects performance.  Since it takes longer to scan for smaller faces,
    // we increase the minimum face size for the rear facing mode a little bit in order to make
    // tracking faster (at the expense of missing smaller faces).  But this optimization is less
    // important for the front facing case, because when "prominent face only" is enabled, the
    // detector stops scanning for faces after it has found the first (large) face.
    FaceDetector detector = new FaceDetector.Builder(context).setLandmarkType(FaceDetector.ALL_LANDMARKS)
            .setClassificationType(FaceDetector.ALL_CLASSIFICATIONS).setTrackingEnabled(true)
            .setMode(FaceDetector.FAST_MODE).setProminentFaceOnly(mIsFrontFacing)
            .setMinFaceSize(mIsFrontFacing ? 0.35f : 0.15f).build();

    Detector.Processor<Face> processor;
    if (mIsFrontFacing) {
        // For front facing mode, a single tracker instance is used with an associated focusing
        // processor.  This configuration allows the face detector to take some shortcuts to
        // speed up detection, in that it can quit after finding a single face and can assume
        // that the nextIrisPosition face position is usually relatively close to the last seen
        // face position.
        Tracker<Face> tracker = new GooglyFaceTracker(mGraphicOverlay, getResources());
        processor = new LargestFaceFocusingProcessor.Builder(detector, tracker).build();
    } else {
        // For rear facing mode, a factory is used to create per-face tracker instances.  A
        // tracker is created for each face and is maintained as long as the same face is
        // visible, enabling per-face state to be maintained over time.  This is used to store
        // the iris position and velocity for each face independently, simulating the motion of
        // the eyes of any number of faces over time.
        //
        // Both the front facing mode and the rear facing mode use the same tracker
        // implementation, avoiding the need for any additional code.  The only difference
        // between these cases is the choice of Processor: one that is specialized for tracking
        // a single face or one that can handle multiple faces.  Here, we use MultiProcessor,
        // which is a standard component of the mobile vision API for managing multiple items.
        MultiProcessor.Factory<Face> factory = new MultiProcessor.Factory<Face>() {
            @Override
            public Tracker<Face> create(Face face) {
                return new GooglyFaceTracker(mGraphicOverlay, getResources());
            }
        };
        processor = new MultiProcessor.Builder<>(factory).build();
    }

    detector.setProcessor(processor);

    if (!detector.isOperational()) {
        // Note: The first time that an app using face API is installed on a device, GMS will
        // download a native library to the device in order to do detection.  Usually this
        // completes before the app is run for the first time.  But if that download has not yet
        // completed, then the above call will not detect any faces.
        //
        // isOperational() can be used to check if the required native library is currently
        // available.  The detector will automatically become operational once the library
        // download completes on device.
        Log.w(TAG, "Face detector dependencies are not yet available.");

        // Check for low storage.  If there is low storage, the native library will not be
        // downloaded, so detection will not become operational.
        IntentFilter lowStorageFilter = new IntentFilter(Intent.ACTION_DEVICE_STORAGE_LOW);
        boolean hasLowStorage = registerReceiver(null, lowStorageFilter) != null;

        if (hasLowStorage) {
            Toast.makeText(this, R.string.low_storage_error, Toast.LENGTH_LONG).show();
            Log.w(TAG, getString(R.string.low_storage_error));
        }
    }
    return detector;
}

From source file:com.google.android.gms.samples.vision.face.googlyeyes.GooglyEyesActivity.java

/**
 * Creates the face detector and associated processing pipeline to support either front facing
 * mode or rear facing mode.  Checks if the detector is ready to use, and displays a low storage
 * warning if it was not possible to download the face library.
 *///from  w w w .  j a v  a2 s  .c o  m
@NonNull
private FaceDetector createFaceDetector(Context context) {
    // For both front facing and rear facing modes, the detector is initialized to do landmark
    // detection (to find the eyes), classification (to determine if the eyes are open), and
    // tracking.
    //
    // Use of "fast mode" enables faster detection for frontward faces, at the expense of not
    // attempting to detect faces at more varied angles (e.g., faces in profile).  Therefore,
    // faces that are turned too far won't be detected under fast mode.
    //
    // For front facing mode only, the detector will use the "prominent face only" setting,
    // which is optimized for tracking a single relatively large face.  This setting allows the
    // detector to take some shortcuts to make tracking faster, at the expense of not being able
    // to track multiple faces.
    //
    // Setting the minimum face size not only controls how large faces must be in order to be
    // detected, it also affects performance.  Since it takes longer to scan for smaller faces,
    // we increase the minimum face size for the rear facing mode a little bit in order to make
    // tracking faster (at the expense of missing smaller faces).  But this optimization is less
    // important for the front facing case, because when "prominent face only" is enabled, the
    // detector stops scanning for faces after it has found the first (large) face.
    FaceDetector detector = new FaceDetector.Builder(context).setLandmarkType(FaceDetector.ALL_LANDMARKS)
            .setClassificationType(FaceDetector.ALL_CLASSIFICATIONS).setTrackingEnabled(true)
            .setMode(FaceDetector.FAST_MODE).setProminentFaceOnly(mIsFrontFacing)
            .setMinFaceSize(mIsFrontFacing ? 0.35f : 0.15f).build();

    Detector.Processor<Face> processor;
    if (mIsFrontFacing) {
        // For front facing mode, a single tracker instance is used with an associated focusing
        // processor.  This configuration allows the face detector to take some shortcuts to
        // speed up detection, in that it can quit after finding a single face and can assume
        // that the nextIrisPosition face position is usually relatively close to the last seen
        // face position.
        Tracker<Face> tracker = new GooglyFaceTracker(mGraphicOverlay);
        processor = new LargestFaceFocusingProcessor.Builder(detector, tracker).build();
    } else {
        // For rear facing mode, a factory is used to create per-face tracker instances.  A
        // tracker is created for each face and is maintained as long as the same face is
        // visible, enabling per-face state to be maintained over time.  This is used to store
        // the iris position and velocity for each face independently, simulating the motion of
        // the eyes of any number of faces over time.
        //
        // Both the front facing mode and the rear facing mode use the same tracker
        // implementation, avoiding the need for any additional code.  The only difference
        // between these cases is the choice of Processor: one that is specialized for tracking
        // a single face or one that can handle multiple faces.  Here, we use MultiProcessor,
        // which is a standard component of the mobile vision API for managing multiple items.
        MultiProcessor.Factory<Face> factory = new MultiProcessor.Factory<Face>() {
            @Override
            public Tracker<Face> create(Face face) {
                return new GooglyFaceTracker(mGraphicOverlay);
            }
        };
        processor = new MultiProcessor.Builder<>(factory).build();
    }

    detector.setProcessor(processor);

    if (!detector.isOperational()) {
        // Note: The first time that an app using face API is installed on a device, GMS will
        // download a native library to the device in order to do detection.  Usually this
        // completes before the app is run for the first time.  But if that download has not yet
        // completed, then the above call will not detect any faces.
        //
        // isOperational() can be used to check if the required native library is currently
        // available.  The detector will automatically become operational once the library
        // download completes on device.
        Log.w(TAG, "Face detector dependencies are not yet available.");

        // Check for low storage.  If there is low storage, the native library will not be
        // downloaded, so detection will not become operational.
        IntentFilter lowStorageFilter = new IntentFilter(Intent.ACTION_DEVICE_STORAGE_LOW);
        boolean hasLowStorage = registerReceiver(null, lowStorageFilter) != null;

        if (hasLowStorage) {
            Toast.makeText(this, R.string.low_storage_error, Toast.LENGTH_LONG).show();
            Log.w(TAG, getString(R.string.low_storage_error));
        }
    }
    return detector;
}

From source file:taylorandtanner.gasscanmk1.OcrCaptureActivity.java

/**
 * Creates and starts the camera.  Note that this uses a higher resolution in comparison
 * to other detection examples to enable the ocr detector to detect small text samples
 * at long distances.// ww  w  .java2 s  .c  o  m
 *
 * Suppressing InlinedApi since there is a check that the minimum version is met before using
 * the constant.
 */
@SuppressLint("InlinedApi")
private void createCameraSource(boolean autoFocus, boolean useFlash) {
    Context context = getApplicationContext();

    // A text recognizer is created to find text.  An associated multi-processor instance
    // is set to receive the text recognition results, track the text, and maintain
    // graphics for each text block on screen.  The factory is used by the multi-processor to
    // create a separate tracker instance for each text block.
    TextRecognizer textRecognizer = new TextRecognizer.Builder(context).build();
    textRecognizer.setProcessor(new OcrDetectorProcessor(mGraphicOverlay));

    if (!textRecognizer.isOperational()) {
        // Note: The first time that an app using a Vision API is installed on a
        // device, GMS will download a native libraries to the device in order to do detection.
        // Usually this completes before the app is run for the first time.  But if that
        // download has not yet completed, then the above call will not detect any text,
        // barcodes, or faces.
        //
        // isOperational() can be used to check if the required native libraries are currently
        // available.  The detectors will automatically become operational once the library
        // downloads complete on device.
        Log.w(TAG, "Detector dependencies are not yet available.");

        // Check for low storage.  If there is low storage, the native library will not be
        // downloaded, so detection will not become operational.
        IntentFilter lowstorageFilter = new IntentFilter(Intent.ACTION_DEVICE_STORAGE_LOW);
        boolean hasLowStorage = registerReceiver(null, lowstorageFilter) != null;

        if (hasLowStorage) {
            Toast.makeText(this, R.string.low_storage_error, Toast.LENGTH_LONG).show();
            Log.w(TAG, getString(R.string.low_storage_error));
        }
    }

    // Creates and starts the camera.  Note that this uses a higher resolution in comparison
    // to other detection examples to enable the text recognizer to detect small pieces of text.
    mCameraSource = new CameraSource.Builder(getApplicationContext(), textRecognizer)
            .setFacing(CameraSource.CAMERA_FACING_BACK).setRequestedPreviewSize(1280, 1024)
            .setRequestedFps(0.5f).setFlashMode(useFlash ? Camera.Parameters.FLASH_MODE_TORCH : null)
            .setFocusMode(autoFocus ? Camera.Parameters.FOCUS_MODE_CONTINUOUS_PICTURE : null).build();
}