Java Code Examples for com.google.android.gms.vision.text.TextRecognizer#isOperational()

The following examples show how to use com.google.android.gms.vision.text.TextRecognizer#isOperational() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ImageData.java    From PrivacyStreams with Apache License 2.0 7 votes vote down vote up
List<TextBlock> detectTextBlocks(UQI uqi) {
    List<TextBlock> result = new ArrayList<>();
    Bitmap bitmap = this.getBitmap(uqi);
    if (bitmap == null) return result;
    TextRecognizer textRecognizer = new TextRecognizer.Builder(uqi.getContext()).build();
    if (!textRecognizer.isOperational()) {
        Logging.warn("TextRecognizer is not operational");
        textRecognizer.release();
        return result;
    }
    Frame imageFrame = new Frame.Builder().setBitmap(bitmap).build();
    SparseArray<TextBlock> textBlocks = textRecognizer.detect(imageFrame);
    for (int i = 0; i < textBlocks.size(); i++) {
        TextBlock textBlock = textBlocks.get(textBlocks.keyAt(i));
        result.add(textBlock);
    }
    textRecognizer.release();
    return result;
}
 
Example 2
Source File: OcrCaptureActivity.java    From flutter_mobile_vision with MIT License 5 votes vote down vote up
@SuppressLint("InlinedApi")
protected void createCameraSource() throws MobileVisionException {
    Context context = getApplicationContext();

    TextRecognizer textRecognizer = new TextRecognizer.Builder(context)
            .build();

    OcrTrackerFactory ocrTrackerFactory = new OcrTrackerFactory(graphicOverlay, showText);

    textRecognizer.setProcessor(
            new MultiProcessor.Builder<>(ocrTrackerFactory).build());

    if (!textRecognizer.isOperational()) {
        IntentFilter lowStorageFilter = new IntentFilter(Intent.ACTION_DEVICE_STORAGE_LOW);
        boolean hasLowStorage = registerReceiver(null, lowStorageFilter) != null;

        if (hasLowStorage) {
            throw new MobileVisionException("Low Storage.");
        }
    }

    cameraSource = new CameraSource
            .Builder(getApplicationContext(), textRecognizer)
            .setFacing(camera)
            .setRequestedPreviewSize(previewWidth, previewHeight)
            .setFocusMode(autoFocus ? Camera.Parameters.FOCUS_MODE_CONTINUOUS_PICTURE : null)
            .setFlashMode(useFlash ? Camera.Parameters.FLASH_MODE_TORCH : null)
            .setRequestedFps(fps)
            .build();
}
 
Example 3
Source File: CameraView.java    From camerakit-android with MIT License 5 votes vote down vote up
public boolean setTextDetectionListener(final CameraKitEventCallback<CameraKitTextDetect> callback) throws GooglePlayServicesUnavailableException {
    TextRecognizer textRecognizer = new TextRecognizer.Builder(getContext()).build();
    textRecognizer.setProcessor(new TextProcessor(mEventDispatcher, callback));
    int code = GoogleApiAvailability.getInstance().isGooglePlayServicesAvailable(getContext().getApplicationContext());
    if (code != ConnectionResult.SUCCESS) {
        throw new GooglePlayServicesUnavailableException();
    }

    if (textRecognizer.isOperational()) {
        mCameraImpl.setTextDetector(textRecognizer);
        return true;
    } else {
        return false;
    }
}
 
Example 4
Source File: MainActivity.java    From text-detector with MIT License 5 votes vote down vote up
public void detectText(View view) {
    Bitmap textBitmap = BitmapFactory.decodeResource(getResources(), R.drawable.cat);

    TextRecognizer textRecognizer = new TextRecognizer.Builder(this).build();

    if (!textRecognizer.isOperational()) {
        new AlertDialog.Builder(this)
                .setMessage("Text recognizer could not be set up on your device :(")
                .show();
        return;
    }

    Frame frame = new Frame.Builder().setBitmap(textBitmap).build();
    SparseArray<TextBlock> text = textRecognizer.detect(frame);

    for (int i = 0; i < text.size(); ++i) {
        TextBlock item = text.valueAt(i);
        if (item != null && item.getValue() != null) {
            detectedTextView.setText(item.getValue());
        }
    }
}
 
Example 5
Source File: OcrCaptureActivity.java    From OCR-Reader with MIT License 4 votes vote down vote up
/**
 * Creates and starts the camera.  Note that this uses a higher resolution in comparison
 * to other detection examples to enable the ocr detector to detect small text samples
 * at long distances.
 *
 * Suppressing InlinedApi since there is a check that the minimum version is met before using
 * the constant.
 */
@SuppressLint("InlinedApi")
private void createCameraSource(boolean autoFocus, boolean useFlash) {
    Context context = getApplicationContext();

    // A text recognizer is created to find text.  An associated processor instance
    // is set to receive the text recognition results and display graphics for each text block
    // on screen.
    TextRecognizer textRecognizer = new TextRecognizer.Builder(context).build();
    textRecognizer.setProcessor(new OcrDetectorProcessor(mGraphicOverlay));

    if (!textRecognizer.isOperational()) {
        // Note: The first time that an app using a Vision API is installed on a
        // device, GMS will download a native libraries to the device in order to do detection.
        // Usually this completes before the app is run for the first time.  But if that
        // download has not yet completed, then the above call will not detect any text,
        // barcodes, or faces.
        //
        // isOperational() can be used to check if the required native libraries are currently
        // available.  The detectors will automatically become operational once the library
        // downloads complete on device.
        Log.w(TAG, "Detector dependencies are not yet available.");

        // Check for low storage.  If there is low storage, the native library will not be
        // downloaded, so detection will not become operational.
        IntentFilter lowstorageFilter = new IntentFilter(Intent.ACTION_DEVICE_STORAGE_LOW);
        boolean hasLowStorage = registerReceiver(null, lowstorageFilter) != null;

        if (hasLowStorage) {
            Toast.makeText(this, R.string.low_storage_error, Toast.LENGTH_LONG).show();
            Log.w(TAG, getString(R.string.low_storage_error));
        }
    }

    // Creates and starts the camera.  Note that this uses a higher resolution in comparison
    // to other detection examples to enable the text recognizer to detect small pieces of text.
    mCameraSource =
            new CameraSource.Builder(getApplicationContext(), textRecognizer)
            .setFacing(CameraSource.CAMERA_FACING_BACK)
            .setRequestedPreviewSize(1280, 1024)
            .setRequestedFps(2.0f)
            .setFlashMode(useFlash ? Camera.Parameters.FLASH_MODE_TORCH : null)
            .setFocusMode(autoFocus ? Camera.Parameters.FOCUS_MODE_CONTINUOUS_PICTURE : null)
            .build();
}
 
Example 6
Source File: OcrCaptureActivity.java    From Moneycim with MIT License 4 votes vote down vote up
/**
 * Creates and starts the camera.  Note that this uses a higher resolution in comparison
 * to other detection examples to enable the ocr detector to detect small text samples
 * at long distances.
 *
 * Suppressing InlinedApi since there is a icon_save that the minimum version is met before using
 * the constant.
 */
@SuppressLint("InlinedApi")
private void createCameraSource() {
    Context context = getApplicationContext();

    // A text recognizer is created to find text.  An associated processor instance
    // is set to receive the text recognition results and display graphics for each text block
    // on screen.
    TextRecognizer textRecognizer = new TextRecognizer.Builder(context).build();
    textRecognizer.setProcessor(new OcrDetectorProcessor(mGraphicOverlay, this));

    if (!textRecognizer.isOperational()) {
        // Note: The first time that an app using a Vision API is installed on a
        // device, GMS will download a native libraries to the device in order to do detection.
        // Usually this completes before the app is run for the first time.  But if that
        // download has not yet completed, then the above call will not detect any text,
        // barcodes, or faces.
        //
        // isOperational() can be used to icon_save if the required native libraries are currently
        // available.  The detectors will automatically become operational once the library
        // downloads complete on device.
        Log.w(TAG, "Detector dependencies are not yet available.");

        // Check for low storage.  If there is low storage, the native library will not be
        // downloaded, so detection will not become operational.
        IntentFilter lowstorageFilter = new IntentFilter(Intent.ACTION_DEVICE_STORAGE_LOW);
        boolean hasLowStorage = registerReceiver(null, lowstorageFilter) != null;

        if (hasLowStorage) {
            Snackbar.make(mGraphicOverlay, R.string.low_storage_error, Snackbar.LENGTH_LONG).show();
            Log.w(TAG, getString(R.string.low_storage_error));
        }
    }

    // Creates and starts the camera.  Note that this uses a higher resolution in comparison
    // to other detection examples to enable the text recognizer to detect small pieces of text.
    mCameraSource =
            new CameraSource.Builder(getApplicationContext(), textRecognizer)
            .setFacing(CameraSource.CAMERA_FACING_BACK)
            .setRequestedPreviewSize(1280, 1024)
            .setRequestedFps(2.0f)
            .setFocusMode(Camera.Parameters.FOCUS_MODE_CONTINUOUS_PICTURE)
            .build();
}
 
Example 7
Source File: OcrCaptureActivity.java    From Questor with MIT License 4 votes vote down vote up
/**
 * Creates and starts the camera.  Note that this uses a higher resolution in comparison
 * to other detection examples to enable the ocr detector to detect small text samples
 * at long distances.
 *
 * Suppressing InlinedApi since there is a check that the minimum version is met before using
 * the constant.
 */
@SuppressLint("InlinedApi")
private void createCameraSource(boolean autoFocus, boolean useFlash) {
    Context context = getApplicationContext();

    // A text recognizer is created to find text.  An associated processor instance
    // is set to receive the text recognition results and display graphics for each text block
    // on screen.
    TextRecognizer textRecognizer = new TextRecognizer.Builder(context).build();
    textRecognizer.setProcessor(new OcrDetectorProcessor(mGraphicOverlay));

    if (!textRecognizer.isOperational()) {
        // Note: The first time that an app using a Vision API is installed on a
        // device, GMS will download a native libraries to the device in order to do detection.
        // Usually this completes before the app is run for the first time.  But if that
        // download has not yet completed, then the above call will not detect any text,
        // barcodes, or faces.
        //
        // isOperational() can be used to check if the required native libraries are currently
        // available.  The detectors will automatically become operational once the library
        // downloads complete on device.
        Log.w(TAG, "Detector dependencies are not yet available.");

        // Check for low storage.  If there is low storage, the native library will not be
        // downloaded, so detection will not become operational.
        IntentFilter lowstorageFilter = new IntentFilter(Intent.ACTION_DEVICE_STORAGE_LOW);
        boolean hasLowStorage = registerReceiver(null, lowstorageFilter) != null;

        if (hasLowStorage) {
            Toast.makeText(this, R.string.low_storage_error, Toast.LENGTH_LONG).show();
            Log.w(TAG, getString(R.string.low_storage_error));
        }
    }

    // Creates and starts the camera.  Note that this uses a higher resolution in comparison
    // to other detection examples to enable the text recognizer to detect small pieces of text.
    mCameraSource =
            new CameraSource.Builder(getApplicationContext(), textRecognizer)
            .setFacing(CameraSource.CAMERA_FACING_BACK)
            .setRequestedPreviewSize(1280, 1024)
            .setRequestedFps(2.0f)
            .setFlashMode(useFlash ? Camera.Parameters.FLASH_MODE_TORCH : null)
            .setFocusMode(autoFocus ? Camera.Parameters.FOCUS_MODE_CONTINUOUS_PICTURE : null)
            .build();
}
 
Example 8
Source File: OcrCaptureActivity.java    From Document-Scanner with GNU General Public License v3.0 4 votes vote down vote up
/**
 * Creates and starts the camera.  Note that this uses a higher resolution in comparison
 * to other detection examples to enable the ocr detector to detect small text samples
 * at long distances.
 * <p>
 * Suppressing InlinedApi since there is a check that the minimum version is met before using
 * the constant.
 */
@SuppressLint("InlinedApi")
private void createCameraSource(boolean autoFocus, boolean useFlash) {
    Context context = getApplicationContext();

    // A text recognizer is created to find text.  An associated processor instance
    // is set to receive the text recognition results and display graphics for each text block
    // on screen.
    TextRecognizer textRecognizer = new TextRecognizer.Builder(context).build();
    textRecognizer.setProcessor(new OcrDetectorProcessor(mGraphicOverlay));

    if (!textRecognizer.isOperational()) {
        // Note: The first time that an app using a Vision API is installed on a
        // device, GMS will download a native libraries to the device in order to do detection.
        // Usually this completes before the app is run for the first time.  But if that
        // download has not yet completed, then the above call will not detect any text,
        // barcodes, or faces.
        //
        // isOperational() can be used to check if the required native libraries are currently
        // available.  The detectors will automatically become operational once the library
        // downloads complete on device.
        Log.w(TAG, "Detector dependencies are not yet available.");

        // Check for low storage.  If there is low storage, the native library will not be
        // downloaded, so detection will not become operational.
        IntentFilter lowstorageFilter = new IntentFilter(Intent.ACTION_DEVICE_STORAGE_LOW);
        boolean hasLowStorage = registerReceiver(null, lowstorageFilter) != null;

        if (hasLowStorage) {
            Toast.makeText(this, R.string.low_storage_error, Toast.LENGTH_LONG).show();
            Log.w(TAG, getString(R.string.low_storage_error));
        }
    }

    // Creates and starts the camera.  Note that this uses a higher resolution in comparison
    // to other detection examples to enable the text recognizer to detect small pieces of text.
    mCameraSource =
            new CameraSource.Builder(getApplicationContext(), textRecognizer)
                    .setFacing(CameraSource.CAMERA_FACING_BACK)
                    .setRequestedPreviewSize(1280, 1024)
                    .setRequestedFps(2.0f)
                    .setFlashMode(useFlash ? Camera.Parameters.FLASH_MODE_TORCH : null)
                    .setFocusMode(autoFocus ? Camera.Parameters.FOCUS_MODE_CONTINUOUS_PICTURE : null)
                    .build();
}
 
Example 9
Source File: OcrCaptureActivity.java    From android-vision with Apache License 2.0 4 votes vote down vote up
/**
 * Creates and starts the camera.  Note that this uses a higher resolution in comparison
 * to other detection examples to enable the ocr detector to detect small text samples
 * at long distances.
 *
 * Suppressing InlinedApi since there is a check that the minimum version is met before using
 * the constant.
 */
@SuppressLint("InlinedApi")
private void createCameraSource(boolean autoFocus, boolean useFlash) {
    Context context = getApplicationContext();

    // A text recognizer is created to find text.  An associated multi-processor instance
    // is set to receive the text recognition results, track the text, and maintain
    // graphics for each text block on screen.  The factory is used by the multi-processor to
    // create a separate tracker instance for each text block.
    TextRecognizer textRecognizer = new TextRecognizer.Builder(context).build();
    textRecognizer.setProcessor(new OcrDetectorProcessor(graphicOverlay));

    if (!textRecognizer.isOperational()) {
        // Note: The first time that an app using a Vision API is installed on a
        // device, GMS will download a native libraries to the device in order to do detection.
        // Usually this completes before the app is run for the first time.  But if that
        // download has not yet completed, then the above call will not detect any text,
        // barcodes, or faces.
        //
        // isOperational() can be used to check if the required native libraries are currently
        // available.  The detectors will automatically become operational once the library
        // downloads complete on device.
        Log.w(TAG, "Detector dependencies are not yet available.");

        // Check for low storage.  If there is low storage, the native library will not be
        // downloaded, so detection will not become operational.
        IntentFilter lowstorageFilter = new IntentFilter(Intent.ACTION_DEVICE_STORAGE_LOW);
        boolean hasLowStorage = registerReceiver(null, lowstorageFilter) != null;

        if (hasLowStorage) {
            Toast.makeText(this, R.string.low_storage_error, Toast.LENGTH_LONG).show();
            Log.w(TAG, getString(R.string.low_storage_error));
        }
    }

    // Creates and starts the camera.  Note that this uses a higher resolution in comparison
    // to other detection examples to enable the text recognizer to detect small pieces of text.
    cameraSource =
            new CameraSource.Builder(getApplicationContext(), textRecognizer)
            .setFacing(CameraSource.CAMERA_FACING_BACK)
            .setRequestedPreviewSize(1280, 1024)
            .setRequestedFps(2.0f)
            .setFlashMode(useFlash ? Camera.Parameters.FLASH_MODE_TORCH : null)
            .setFocusMode(autoFocus ? Camera.Parameters.FOCUS_MODE_CONTINUOUS_VIDEO : null)
            .build();
}
 
Example 10
Source File: OcrCaptureActivity.java    From android-vision with Apache License 2.0 4 votes vote down vote up
/**
 * Creates and starts the camera.  Note that this uses a higher resolution in comparison
 * to other detection examples to enable the ocr detector to detect small text samples
 * at long distances.
 *
 * Suppressing InlinedApi since there is a check that the minimum version is met before using
 * the constant.
 */
@SuppressLint("InlinedApi")
private void createCameraSource(boolean autoFocus, boolean useFlash) {
    Context context = getApplicationContext();

    // A text recognizer is created to find text.  An associated processor instance
    // is set to receive the text recognition results and display graphics for each text block
    // on screen.
    TextRecognizer textRecognizer = new TextRecognizer.Builder(context).build();
    textRecognizer.setProcessor(new OcrDetectorProcessor(mGraphicOverlay));

    if (!textRecognizer.isOperational()) {
        // Note: The first time that an app using a Vision API is installed on a
        // device, GMS will download a native libraries to the device in order to do detection.
        // Usually this completes before the app is run for the first time.  But if that
        // download has not yet completed, then the above call will not detect any text,
        // barcodes, or faces.
        //
        // isOperational() can be used to check if the required native libraries are currently
        // available.  The detectors will automatically become operational once the library
        // downloads complete on device.
        Log.w(TAG, "Detector dependencies are not yet available.");

        // Check for low storage.  If there is low storage, the native library will not be
        // downloaded, so detection will not become operational.
        IntentFilter lowstorageFilter = new IntentFilter(Intent.ACTION_DEVICE_STORAGE_LOW);
        boolean hasLowStorage = registerReceiver(null, lowstorageFilter) != null;

        if (hasLowStorage) {
            Toast.makeText(this, R.string.low_storage_error, Toast.LENGTH_LONG).show();
            Log.w(TAG, getString(R.string.low_storage_error));
        }
    }

    // Creates and starts the camera.  Note that this uses a higher resolution in comparison
    // to other detection examples to enable the text recognizer to detect small pieces of text.
    mCameraSource =
            new CameraSource.Builder(getApplicationContext(), textRecognizer)
            .setFacing(CameraSource.CAMERA_FACING_BACK)
            .setRequestedPreviewSize(1280, 1024)
            .setRequestedFps(2.0f)
            .setFlashMode(useFlash ? Camera.Parameters.FLASH_MODE_TORCH : null)
            .setFocusMode(autoFocus ? Camera.Parameters.FOCUS_MODE_CONTINUOUS_PICTURE : null)
            .build();
}