Example usage for org.opencv.core Mat size

List of usage examples for org.opencv.core Mat size

Introduction

In this page you can find the example usage for org.opencv.core Mat size.

Prototype

public Size size() 

Source Link

Usage

From source file:classes.ObjectFinder.java

private void backprojectObjectHistogram() {

    // Converting the current fram to HSV color space
    Mat hsvImage = new Mat(this.objectImage.size(), CvType.CV_8UC3);

    Imgproc.cvtColor(this.inputFrame, hsvImage, Imgproc.COLOR_BGR2HSV);

    // Getting the pixels that are in te specified ranges    
    int hmin = this.thresholdsVector.get(0);
    int hmax = this.thresholdsVector.get(1);
    int smin = this.thresholdsVector.get(2);
    int smax = this.thresholdsVector.get(3);
    int vmin = this.thresholdsVector.get(4);
    int vmax = this.thresholdsVector.get(5);

    Mat maskImage = new Mat(this.objectImage.size(), CvType.CV_8UC1);
    Core.inRange(hsvImage, new Scalar(hmin, smin, vmin), new Scalar(hmax, smax, vmax), maskImage);

    // Taking the hue channel of the image
    Mat hueImage = new Mat(hsvImage.size(), hsvImage.depth());

    MatOfInt fromto = new MatOfInt(0, 0);
    Core.mixChannels(Arrays.asList(hsvImage), Arrays.asList(hueImage), fromto);

    // Backprojecting the histogram over that hue channel image
    MatOfFloat ranges = new MatOfFloat(0, 180);
    MatOfInt channels = new MatOfInt(0);

    Imgproc.calcBackProject(Arrays.asList(hueImage), channels, this.objectHistogram, this.backprojectionImage,
            ranges, 1);//from w w  w.  j  a  v a  2s  .co  m

    Core.bitwise_and(backprojectionImage, maskImage, backprojectionImage);

}

From source file:classes.TextRecognitionPreparer.java

private static Scalar getFillingColor(Scalar userColor, Mat cutout, Mat labels, Mat centers) {

    double minDistance = 1000000;
    Scalar fillingColor = null;/*from  ww  w  .  j  av a 2  s  .  c om*/

    centers.convertTo(centers, CvType.CV_8UC1, 255.0);
    centers.reshape(3);

    List<Mat> clusters = new ArrayList<Mat>();
    for (int i = 0; i < centers.rows(); i++) {
        clusters.add(Mat.zeros(cutout.size(), cutout.type()));
    }

    Map<Integer, Integer> counts = new HashMap<Integer, Integer>();
    for (int i = 0; i < centers.rows(); i++) {
        counts.put(i, 0);
    }

    int rows = 0;
    for (int y = 0; y < cutout.rows(); y++) {
        for (int x = 0; x < cutout.cols(); x++) {
            int label = (int) labels.get(rows, 0)[0];
            int r = (int) centers.get(label, 2)[0];
            int g = (int) centers.get(label, 1)[0];
            int b = (int) centers.get(label, 0)[0];
            counts.put(label, counts.get(label) + 1);
            clusters.get(label).put(y, x, b, g, r);
            rows++;
        }
    }

    Set<Integer> keySet = counts.keySet();
    Iterator<Integer> iterator = keySet.iterator();
    while (iterator.hasNext()) {

        int label = (int) iterator.next();
        int r = (int) centers.get(label, 2)[0];
        int g = (int) centers.get(label, 1)[0];
        int b = (int) centers.get(label, 0)[0];

        Scalar currentColor = new Scalar(r, g, b);

        double distance = getColorDistance(currentColor, userColor);

        if (distance < minDistance) {
            minDistance = distance;
            fillingColor = currentColor;
        }

    }

    return fillingColor;
}

From source file:classes.TextRecognitionPreparer.java

private static Mat reduceColor(Mat image, int div) {

    Mat result = new Mat(image.size(), image.type());

    int rows = image.rows(); // number of lines
    int cols = image.cols(); // number of elements per line

    for (int j = 0; j < rows; j++) {

        for (int i = 0; i < cols; i++) {

            double[] data = image.get(j, i);

            for (int k = 0; k < 3; k++) {
                data[k] = ((int) data[k] / div) * div + div / 2;

            }//from w w w.j av a 2 s  .  co m

            int put = result.put(j, i, data);

        }
    }

    return result;
}

From source file:com.astrocytes.core.operationsengine.OperationsImpl.java

License:Open Source License

private List<Mat> showClusters(Mat cutout, Mat labels, Mat centers) {
    centers.convertTo(centers, CvType.CV_8UC1, 255.0);
    centers.reshape(3);//from w  w w  .  j  a va  2  s .c om

    List<Mat> clusters = new ArrayList<Mat>();
    for (int i = 0; i < centers.rows(); i++) {
        clusters.add(Mat.zeros(cutout.size(), cutout.type()));
    }

    Map<Integer, Integer> counts = new HashMap<Integer, Integer>();
    for (int i = 0; i < centers.rows(); i++) {
        counts.put(i, 0);
    }

    for (int y = 0; y < cutout.rows(); y++) {
        int rows = 0;
        for (int x = 0; x < cutout.cols(); x++) {
            int label = (int) labels.get(rows, 0)[0];
            int r = (int) centers.get(label, 2)[0];
            int g = (int) centers.get(label, 1)[0];
            int b = (int) centers.get(label, 0)[0];
            counts.put(label, counts.get(label) + 1);
            clusters.get(label).put(y, x, b, g, r);
            rows++;
        }
    }
    System.out.println(counts);
    return clusters;
}

From source file:com.github.rosjava_catkin_package_a.ARLocROS.ComputePose.java

License:Apache License

public boolean computePose(Mat rvec, Mat tvec, Mat image2) throws NyARException, FileNotFoundException {
    // convert image to NyAR style for processing
    final INyARRgbRaster imageRaster = NyARImageHelper.createFromMat(image2);

    // create new marker system configuration
    i_config = new NyARMarkerSystemConfig(i_param);
    markerSystemState = new NyARMarkerSystem(i_config);
    // Create wrapper that passes cam pictures to marker system
    cameraSensorWrapper = new NyARSensor(i_screen_size);
    ids = new int[markerPatterns.size()];
    patternmap = new HashMap<>();
    for (int i = 0; i < markerPatterns.size(); i++) {
        // create marker description from pattern file and add to marker
        // system
        ids[i] = markerSystemState.addARMarker(arCodes.get(i), 25, markerConfig.getMarkerSize());
        patternmap.put(ids[i], markerPatterns.get(i));
    }//from w  w w .ja v  a2 s.  co  m

    cameraSensorWrapper.update(imageRaster);
    markerSystemState.update(cameraSensorWrapper);

    // init 3D point list
    final List<Point3> points3dlist = new ArrayList<>();
    final List<Point> points2dlist = new ArrayList<>();

    for (final int id : ids) {
        // process only if this marker has been detected
        if (markerSystemState.isExistMarker(id) && markerSystemState.getConfidence(id) > 0.7) {
            // read and add 2D points
            final NyARIntPoint2d[] vertex2d = markerSystemState.getMarkerVertex2D(id);
            Point p = new Point(vertex2d[0].x, vertex2d[0].y);
            points2dlist.add(p);
            p = new Point(vertex2d[1].x, vertex2d[2].y);
            points2dlist.add(p);
            p = new Point(vertex2d[2].x, vertex2d[2].y);
            points2dlist.add(p);
            p = new Point(vertex2d[3].x, vertex2d[3].y);
            points2dlist.add(p);

            final MatOfPoint mop = new MatOfPoint();
            mop.fromList(points2dlist);
            final List<MatOfPoint> pts = new ArrayList<>();
            pts.add(mop);
            // read and add corresponding 3D points
            points3dlist.addAll(markerConfig.create3dpointlist(patternmap.get(id)));
            // draw red rectangle around detected marker
            Core.rectangle(image2, new Point(vertex2d[0].x, vertex2d[0].y),
                    new Point(vertex2d[2].x, vertex2d[2].y), new Scalar(0, 0, 255));
        }

    }
    // load 2D and 3D points to Mats for solvePNP
    final MatOfPoint3f objectPoints = new MatOfPoint3f();
    objectPoints.fromList(points3dlist);
    final MatOfPoint2f imagePoints = new MatOfPoint2f();
    imagePoints.fromList(points2dlist);

    if (visualization) {
        // show image with markers detected
        Imshow.show(image2);
    }

    // do not call solvePNP with empty intput data (no markers detected)
    if (points2dlist.size() == 0) {
        return false;
    }

    // uncomment these lines if using RANSAC-based pose estimation (more
    // shaking)
    Mat inliers = new Mat();

    Calib3d.solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec, false, 300, 5, 16,
            inliers, Calib3d.CV_P3P);
    ARLoc.getLog().info("Points detected: " + points2dlist.size() + " inliers: " + inliers.size());
    // avoid publish zero pose if localization failed
    if (inliers.rows() == 0) {
        return false;
    }

    return true;
}

From source file:com.jeremydyer.nifi.ZoomImageProcessor.java

License:Apache License

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    final FlowFile original = session.get();
    if (original == null) {
        return;/*from w w  w. j  ava  2 s  .  c om*/
    }

    session.transfer(session.clone(original), REL_ORIGINAL);

    FlowFile ff = session.write(original, new StreamCallback() {
        @Override
        public void process(InputStream inputStream, OutputStream outputStream) throws IOException {
            try {
                int zoomingFactor = context.getProperty(ZOOMING_FACTOR).asInteger();

                BufferedImage image = ImageIO.read(inputStream);
                byte[] pixels = ((DataBufferByte) image.getRaster().getDataBuffer()).getData();
                Mat source = new Mat(image.getHeight(), image.getWidth(), Imgcodecs.CV_LOAD_IMAGE_GRAYSCALE);
                source.put(0, 0, pixels);

                Mat destination = new Mat(source.rows() * zoomingFactor, source.cols() * zoomingFactor,
                        source.type());
                Imgproc.resize(source, destination, destination.size(), zoomingFactor, zoomingFactor,
                        Imgproc.INTER_NEAREST);

                MatOfByte bytemat = new MatOfByte();
                Imgcodecs.imencode(".png", destination, bytemat);
                pixels = bytemat.toArray();
                outputStream.write(pixels);

            } catch (Exception ex) {
                getLogger().error(ex.getMessage());
                ex.printStackTrace();
            }
        }
    });

    session.transfer(ff, REL_SUCCESS);

}

From source file:com.mitzuli.core.ocr.OcrPreprocessor.java

License:Open Source License

/**
 * Binarizes and cleans the input image for OCR, saving debugging images in the given directory.
 *
 * @param input the input image, which is recycled by this method, so the caller should make a defensive copy of it if necessary.
 * @param debugDir the directory to write the debugging images to, or null to disable debugging.
 * @return the preprocessed image./*from   w w  w.j  av a 2 s .  c o m*/
 */
static Image preprocess(final Image input, final File debugDir) {
    // TODO Temporary workaround to allow to manually enable debugging (the global final variable should be used)
    boolean DEBUG = debugDir != null;

    // Initialization
    final Mat mat = input.toGrayscaleMat();
    final Mat debugMat = DEBUG ? input.toRgbMat() : null;
    input.recycle();
    final Mat aux = new Mat(mat.size(), CvType.CV_8UC1);
    final Mat binary = new Mat(mat.size(), CvType.CV_8UC1);
    if (DEBUG)
        Image.fromMat(mat).write(new File(debugDir, "1_input.jpg"));

    // Binarize the input image in mat through adaptive Gaussian thresholding
    Imgproc.adaptiveThreshold(mat, binary, 255, Imgproc.ADAPTIVE_THRESH_GAUSSIAN_C, Imgproc.THRESH_BINARY, 51,
            13);
    // Imgproc.adaptiveThreshold(mat, binary, 255, Imgproc.ADAPTIVE_THRESH_GAUSSIAN_C, Imgproc.THRESH_BINARY, 31, 7);

    // Edge detection
    Imgproc.morphologyEx(mat, mat, Imgproc.MORPH_OPEN, KERNEL_3X3); // Open
    Imgproc.morphologyEx(mat, aux, Imgproc.MORPH_CLOSE, KERNEL_3X3); // Close
    Core.addWeighted(mat, 0.5, aux, 0.5, 0, mat); // Average
    Imgproc.morphologyEx(mat, mat, Imgproc.MORPH_GRADIENT, KERNEL_3X3); // Gradient
    Imgproc.threshold(mat, mat, 0, 255, Imgproc.THRESH_BINARY | Imgproc.THRESH_OTSU); // Edge map
    if (DEBUG)
        Image.fromMat(mat).write(new File(debugDir, "2_edges.jpg"));

    // Extract word level connected-components from the dilated edge map
    Imgproc.dilate(mat, mat, KERNEL_3X3);
    if (DEBUG)
        Image.fromMat(mat).write(new File(debugDir, "3_dilated_edges.jpg"));
    final List<MatOfPoint> wordCCs = new ArrayList<MatOfPoint>();
    Imgproc.findContours(mat, wordCCs, new Mat(), Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);

    // Filter word level connected-components individually and calculate their average attributes
    final List<MatOfPoint> individuallyFilteredWordCCs = new ArrayList<MatOfPoint>();
    final List<MatOfPoint> removedWordCCs = new ArrayList<MatOfPoint>();
    double avgWidth = 0, avgHeight = 0, avgArea = 0;
    for (MatOfPoint cc : wordCCs) {
        final Rect boundingBox = Imgproc.boundingRect(cc);
        if (boundingBox.height >= 6 // bounding box height >= 6
                && boundingBox.area() >= 50 // bounding box area >= 50
                && (double) boundingBox.width / (double) boundingBox.height >= 0.25 // bounding box aspect ratio >= 1:4
                && boundingBox.width <= 0.75 * mat.width() // bounding box width <= 0.75 image width
                && boundingBox.height <= 0.75 * mat.height()) // bounding box height <= 0.75 image height
        {
            individuallyFilteredWordCCs.add(cc);
            avgWidth += boundingBox.width;
            avgHeight += boundingBox.height;
            avgArea += boundingBox.area();
        } else {
            if (DEBUG)
                removedWordCCs.add(cc);
        }
    }
    wordCCs.clear();
    avgWidth /= individuallyFilteredWordCCs.size();
    avgHeight /= individuallyFilteredWordCCs.size();
    avgArea /= individuallyFilteredWordCCs.size();
    if (DEBUG) {
        Imgproc.drawContours(debugMat, removedWordCCs, -1, BLUE, -1);
        removedWordCCs.clear();
    }

    // Filter word level connected-components in relation to their average attributes
    final List<MatOfPoint> filteredWordCCs = new ArrayList<MatOfPoint>();
    for (MatOfPoint cc : individuallyFilteredWordCCs) {
        final Rect boundingBox = Imgproc.boundingRect(cc);
        if (boundingBox.width >= 0.125 * avgWidth // bounding box width >= 0.125 average width
                && boundingBox.width <= 8 * avgWidth // bounding box width <= 8 average width
                && boundingBox.height >= 0.25 * avgHeight // bounding box height >= 0.25 average height
                && boundingBox.height <= 4 * avgHeight) // bounding box height <= 4 average height
        {
            filteredWordCCs.add(cc);
        } else {
            if (DEBUG)
                removedWordCCs.add(cc);
        }
    }
    individuallyFilteredWordCCs.clear();
    if (DEBUG) {
        Imgproc.drawContours(debugMat, filteredWordCCs, -1, GREEN, -1);
        Imgproc.drawContours(debugMat, removedWordCCs, -1, PURPLE, -1);
        removedWordCCs.clear();
    }

    // Extract paragraph level connected-components
    mat.setTo(BLACK);
    Imgproc.drawContours(mat, filteredWordCCs, -1, WHITE, -1);
    final List<MatOfPoint> paragraphCCs = new ArrayList<MatOfPoint>();
    Imgproc.morphologyEx(mat, aux, Imgproc.MORPH_CLOSE, KERNEL_30X30);
    Imgproc.findContours(aux, paragraphCCs, new Mat(), Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);

    // Filter paragraph level connected-components according to the word level connected-components inside
    final List<MatOfPoint> textCCs = new ArrayList<MatOfPoint>();
    for (MatOfPoint paragraphCC : paragraphCCs) {
        final List<MatOfPoint> wordCCsInParagraphCC = new ArrayList<MatOfPoint>();
        aux.setTo(BLACK);
        Imgproc.drawContours(aux, Collections.singletonList(paragraphCC), -1, WHITE, -1);
        Core.bitwise_and(mat, aux, aux);
        Imgproc.findContours(aux, wordCCsInParagraphCC, new Mat(), Imgproc.RETR_EXTERNAL,
                Imgproc.CHAIN_APPROX_SIMPLE);
        final Rect boundingBox = Imgproc.boundingRect(paragraphCC);
        final double center = mat.size().width / 2;
        final double distToCenter = center > boundingBox.x + boundingBox.width
                ? center - boundingBox.x - boundingBox.width
                : center < boundingBox.x ? boundingBox.x - center : 0.0;
        if (DEBUG) {
            System.err.println("****************************************");
            System.err.println("\tArea:                " + boundingBox.area());
            System.err.println("\tDistance to center:  " + distToCenter);
            System.err.println("\tCCs inside:          " + wordCCsInParagraphCC.size());
        }
        if ((wordCCsInParagraphCC.size() >= 10 || wordCCsInParagraphCC.size() >= 0.3 * filteredWordCCs.size())
                && mat.size().width / distToCenter >= 4) {
            textCCs.addAll(wordCCsInParagraphCC);
            if (DEBUG) {
                System.err.println("\tText:                YES");
                Imgproc.drawContours(debugMat, Collections.singletonList(paragraphCC), -1, DARK_GREEN, 5);
            }
        } else {
            if (DEBUG) {
                System.err.println("\tText:                NO");
                Imgproc.drawContours(debugMat, Collections.singletonList(paragraphCC), -1, DARK_RED, 5);
            }
        }
    }
    filteredWordCCs.clear();
    paragraphCCs.clear();
    mat.setTo(WHITE);
    Imgproc.drawContours(mat, textCCs, -1, BLACK, -1);
    textCCs.clear();
    if (DEBUG)
        Image.fromMat(debugMat).write(new File(debugDir, "4_filtering.jpg"));

    // Obtain the final text mask from the filtered connected-components
    Imgproc.erode(mat, mat, KERNEL_15X15);
    Imgproc.morphologyEx(mat, mat, Imgproc.MORPH_OPEN, KERNEL_30X30);
    if (DEBUG)
        Image.fromMat(mat).write(new File(debugDir, "5_text_mask.jpg"));

    // Apply the text mask to the binarized image
    if (DEBUG)
        Image.fromMat(binary).write(new File(debugDir, "6_binary.jpg"));
    binary.setTo(WHITE, mat);
    if (DEBUG)
        Image.fromMat(binary).write(new File(debugDir, "7_binary_text.jpg"));

    // Dewarp the text using Leptonica
    Pix pixs = Image.fromMat(binary).toGrayscalePix();
    Pix pixsDewarp = Dewarp.dewarp(pixs, 0, Dewarp.DEFAULT_SAMPLING, 5, true);
    final Image result = Image.fromGrayscalePix(pixsDewarp);
    if (DEBUG)
        result.write(new File(debugDir, "8_dewarp.jpg"));

    // Clean up
    pixs.recycle();
    mat.release();
    aux.release();
    binary.release();
    if (debugMat != null)
        debugMat.release();

    return result;
}

From source file:com.orange.documentare.core.image.Binarization.java

License:Open Source License

public static Mat getFrom(Mat mat) {
    Mat greyscaleMat = isGreyscale(mat) ? mat : getGreyscaleImage(mat);
    Mat binaryMat = new Mat(greyscaleMat.size(), CvType.CV_8UC1);
    Imgproc.adaptiveThreshold(greyscaleMat, binaryMat, 255, Imgproc.ADAPTIVE_THRESH_GAUSSIAN_C,
            Imgproc.THRESH_BINARY, ADAPTIVE_BLOCK_SIZE, ADAPTIVE_MEAN_ADJUSTMENT);
    return binaryMat;
}

From source file:com.orange.documentare.core.image.opencv.OpenCvImage.java

License:Open Source License

public static Mat resize(Mat mat, int bytesSizeTarget) {
    long matBytesCount = matBytesCount(mat);
    float ratio = (float) matBytesCount / bytesSizeTarget;
    double sqrtRatio = Math.sqrt(ratio);
    int newWidth = (int) (mat.size().width / sqrtRatio);
    int newHeigth = (int) (mat.size().height / sqrtRatio);
    Mat newMat = new Mat(newHeigth, newWidth, mat.type());
    Imgproc.resize(mat, newMat, newMat.size(), sqrtRatio, sqrtRatio, Imgproc.INTER_LANCZOS4);
    return newMat;
}

From source file:com.shootoff.camera.autocalibration.AutoCalibrationManager.java

License:Open Source License

private Mat warpPerspective(final Mat frame) {
    if (warpInitialized) {
        final Mat mat = new Mat();
        Imgproc.warpPerspective(frame, mat, perspMat, frame.size(), Imgproc.INTER_LINEAR);

        return mat;
    } else {/*from w  w w.j  av  a 2s.c  o m*/
        logger.warn("warpPerspective called when warpInitialized is false - {} {} - {}", perspMat, boundingBox,
                isCalibrated);

        return frame;
    }
}