Example usage for org.opencv.core Mat cols

List of usage examples for org.opencv.core Mat cols

Introduction

In this page you can find the example usage for org.opencv.core Mat cols.

Prototype

public int cols() 

Source Link

Usage

From source file:de.hhn.android.licenseplatedecoder.decoder.DecodingEngine.java

/**
 * Get the country code using CountryExtractor
 * @return the country code//from w w  w.  ja  va 2  s .  c  o  m
 */
public String getCountryCode() {
    Mat nativeOpencv = new Mat();
    org.opencv.android.Utils.bitmapToMat(inputImg, nativeOpencv);
    Mat res = new Mat(nativeOpencv.rows(), nativeOpencv.cols(), nativeOpencv.type());
    org.opencv.imgproc.Imgproc.cvtColor(nativeOpencv, res, Imgproc.COLOR_RGBA2BGR);
    nativeOpencv = null;
    Mat withoutStrip = new Mat();

    CountryExtractor ce = new CountryExtractor(res.nativeObj, withoutStrip.nativeObj);
    return ce.getResult();
}

From source file:de.hhn.android.licenseplatedecoder.decoder.DecodingEngine.java

/**
 * Get the license plate using LPSegmenter
 * @return the license plate/*  ww  w. j  a va  2s .com*/
 */
public String getLicensePlate() {
    Mat nativeOpencv = new Mat();
    org.opencv.android.Utils.bitmapToMat(inputImg, nativeOpencv);
    Mat res = new Mat(nativeOpencv.rows(), nativeOpencv.cols(), nativeOpencv.type());
    org.opencv.imgproc.Imgproc.cvtColor(nativeOpencv, res, Imgproc.COLOR_RGBA2BGR);
    nativeOpencv = null;

    LPSegmenter lp = new LPSegmenter(res.nativeObj, this.countryCodeNonAutomatic);
    return lp.getResult();
}

From source file:de.hhn.android.licenseplatedecoder.decoder.LPSegmenter.java

/**
 * Constructor/*from www  .  jav a2s.c o m*/
 * @param inputImageAddr input image pointer address
 * @param countryCode input country code
 */
public LPSegmenter(long inputImageAddr, int countryCode) {
    this.nativeInputAddr = inputImageAddr;
    this.countryCode = countryCode;
    Log.d("Segmenter", "Country Code: " + countryCode);

    /** OCR ENGINE INIT */
    this.baseApi = new TessBaseAPI();
    // INITIALIZATION DEPENDING OF THE COUNTRY CODE
    String languageDataset;
    String whitelist = null;
    String[] datasetWhitelist = getDatasetAndWhiteList(countryCode);
    languageDataset = datasetWhitelist[0];
    whitelist = datasetWhitelist[1];

    this.baseApi.init("/sdcard/", languageDataset); // myDir + "/tessdata/eng.traineddata" must be present
    this.baseApi.setPageSegMode(TessBaseAPI.PageSegMode.PSM_SINGLE_CHAR);
    this.baseApi.setVariable("tessedit_char_whitelist", whitelist);

    ArrayList<Mat> segChars = getCharacters();

    StringBuffer strb = new StringBuffer();
    for (Mat elem : segChars) {
        Imgproc.cvtColor(elem, elem, Imgproc.COLOR_BGR2GRAY);

        Bitmap pass = Bitmap.createBitmap(elem.cols(), elem.rows(), Bitmap.Config.ARGB_8888);
        Utils.matToBitmap(elem, pass, true);

        baseApi.setImage(pass);
        String recognizedText = baseApi.getUTF8Text(); // Log or otherwise display this string...
        strb.append(recognizedText);
        baseApi.clear();
        pass.recycle();
    }
    this.result = strb.toString();
    baseApi.end();
    baseApi = null;
    segChars = null;
}

From source file:de.hu_berlin.informatik.spws2014.mapever.entzerrung.CornerDetector.java

License:Open Source License

/**
 * Guesses the most likly corners of a distorted map within an image.
 * Expects OpenCV to be initialized./*  ww w.  ja va2  s . c  om*/
 * The results are already pretty good but could propably be improved
 * via tweaking the parameters or adding some additional line filtering
 * criteria(like them being kind of parallel for instance...)
 * 
 * @param gray_img A grayscale image in OpenCVs Mat format.
 * @return An array of propable corner points in the following form: {x0,y0,x1,y1,x2,y2,x3,y3} or null on error.
 **/
public static Point[] guess_corners(Mat gray_img) {
    Mat lines = new Mat();
    Imgproc.Canny(gray_img, gray_img, THRESHOLD0, THRESHOLD1, APERTURE_SIZE, false);
    Imgproc.HoughLinesP(gray_img, lines, RHO, THETA, HOUGH_THRESHOLD,
            Math.min(gray_img.cols(), gray_img.rows()) / MIN_LINE_LENGTH_FRACTION, MAX_LINE_GAP);

    double[][] edge_lines = filter_lines(lines, gray_img.size());

    Point[] ret_val = new Point[4];
    ret_val[0] = find_intercept_point(edge_lines[0], edge_lines[2]);
    ret_val[1] = find_intercept_point(edge_lines[0], edge_lines[3]);
    ret_val[2] = find_intercept_point(edge_lines[1], edge_lines[3]);
    ret_val[3] = find_intercept_point(edge_lines[1], edge_lines[2]);

    // do sanity checks and return null on invalid coordinates
    for (int i = 0; i < 4; i++) {
        // check if coordinates are outside image boundaries
        if (ret_val[i].x < 0 || ret_val[i].y < 0 || ret_val[i].x > gray_img.width()
                || ret_val[i].y > gray_img.height()) {
            return null;
        }

        // check if point equal to other point
        for (int j = i + 1; j < 4; j++) {
            if (ret_val[j].x == ret_val[i].x && ret_val[j].y == ret_val[i].y) {
                return null;
            }
        }
    }

    return ret_val;
}

From source file:de.hu_berlin.informatik.spws2014.mapever.entzerrung.CornerDetector.java

License:Open Source License

/**
 * Finds the lines closest to the images border within a reasonable range
 * of slopes.//from w w  w .  j a  va  2  s. c o m
 * 
 * Turns out this is sufficient to give acceptable results.
 * 
 * @param lines The lines within the original image, in OpenCVs Mat format(as returned by HoughLines or HoughLinesP)
 * @param image_dimensions The original image size
 * @return 4 lines in the by now well known format of 4 doubles per line: {x0,y0,x1,y1}{x0,y0,x1,y1}...
 **/
private static double[][] filter_lines(Mat lines, Size image_dimensions) {
    double[][] ret_lines = new double[4][4];
    double min_x = Double.MAX_VALUE, max_x = Double.MIN_VALUE, min_y = Double.MAX_VALUE,
            max_y = Double.MIN_VALUE;

    for (int l = 0; l < lines.cols(); ++l) {
        double current_line[] = lines.get(0, l);
        if (too_close(current_line, image_dimensions))
            continue;

        double slope = get_slope(current_line);
        if (Math.abs(slope) <= MAX_SLOPE) {
            double cl_min_y = Math.min(current_line[1], current_line[3]);
            double cl_max_y = Math.max(current_line[1], current_line[3]);
            if (cl_min_y < min_y) {
                ret_lines[0] = current_line;
                min_y = cl_min_y;
            }
            if (cl_max_y > max_y) {
                ret_lines[1] = current_line;
                max_y = cl_max_y;
            }
        } else if (Math.abs(1.0 / slope) <= MAX_SLOPE) {
            double cl_min_x = Math.min(current_line[0], current_line[2]);
            double cl_max_x = Math.max(current_line[0], current_line[2]);
            if (cl_min_x < min_x) {
                ret_lines[2] = current_line;
                min_x = cl_min_x;
            }
            if (cl_max_x > max_x) {
                ret_lines[3] = current_line;
                max_x = cl_max_x;
            }
        }

    }
    return ret_lines;
}

From source file:de.vion.eyetracking.cameracalib.calibration.opencv.CameraCalibrator.java

private void renderFrame(Mat rgbaFrame) {
    drawPoints(rgbaFrame);//from ww w.  j a va2 s .c om

    Core.putText(rgbaFrame, "Captured: " + this.mCornersBuffer.size(),
            new Point(rgbaFrame.cols() / 3 * 2, rgbaFrame.rows() * 0.1), Core.FONT_HERSHEY_SIMPLEX, 1.0,
            new Scalar(255, 255, 0));
}

From source file:depthDataFromStereoCamsOpenCV.ProcessImages.java

/**
 * /*  w w w  .  ja  v  a  2 s .c  om*/
 * @param image
 * @return
 */
public static Mat bringImageToStdSize(Mat image) {
    //create the square container
    int dstWidth = 300;
    int dstHeight = 500;
    Mat dst = new Mat(dstHeight, dstWidth, CvType.CV_8UC3, new Scalar(0, 0, 0));
    //ProcessImages.displayImage(ProcessImages.Mat2BufferedImage(dst),"background");
    //Put the image into the container, roi is the new position
    Rect roi = new Rect((dstWidth - image.cols()) / 2, (dstHeight - image.rows()) / 2, image.cols(),
            image.rows());
    Mat targetROI = new Mat(dst, roi);
    image.copyTo(targetROI);
    //          ProcessImages.displayImage(ProcessImages.Mat2BufferedImage(dst),"Standardized");
    return dst;
}

From source file:depthDataFromStereoCamsOpenCV.ProcessImages.java

/**
 * Mat2BufferedImage//from ww  w .  j  a  v a 2  s . c o  m
 * @param Mat m
 * @return BufferedImage
 */

public static BufferedImage Mat2BufferedImage(Mat m) {
    // source: http://answers.opencv.org/question/10344/opencv-java-load-image-to-gui/
    // Fastest code
    // The output can be assigned either to a BufferedImage or to an Image

    int type = BufferedImage.TYPE_BYTE_GRAY;
    if (m.channels() > 1) {
        type = BufferedImage.TYPE_3BYTE_BGR;
    }
    int bufferSize = m.channels() * m.cols() * m.rows();
    byte[] b = new byte[bufferSize];
    m.get(0, 0, b); // get all the pixels
    BufferedImage image = new BufferedImage(m.cols(), m.rows(), type);
    final byte[] targetPixels = ((DataBufferByte) image.getRaster().getDataBuffer()).getData();
    System.arraycopy(b, 0, targetPixels, 0, b.length);
    return image;

}

From source file:detectiontest.ImageDisplayer.java

public static BufferedImage toBufferedImage(Mat m) {
    int type = BufferedImage.TYPE_BYTE_GRAY;
    if (m.channels() > 1) {
        type = BufferedImage.TYPE_3BYTE_BGR;
    }/*from   w  w  w.  jav a2  s .  c  o  m*/
    int bufferSize = m.channels() * m.cols() * m.rows();
    byte[] b = new byte[bufferSize];
    m.get(0, 0, b); // get all the pixels
    BufferedImage image = new BufferedImage(m.cols(), m.rows(), type);
    final byte[] targetPixels = ((DataBufferByte) image.getRaster().getDataBuffer()).getData();
    System.arraycopy(b, 0, targetPixels, 0, b.length);
    return image;
}

From source file:dfmDrone.examples.fitEllipseExample.java

public static Image toBufferedImage(Mat m) {
    int type = BufferedImage.TYPE_BYTE_GRAY;
    if (m.channels() > 1)
        type = BufferedImage.TYPE_3BYTE_BGR;

    int bufferSize = m.channels() * m.cols() * m.rows();
    byte[] b = new byte[bufferSize];
    m.get(0, 0, b); // get all the pixels
    BufferedImage image = new BufferedImage(m.cols(), m.rows(), type);
    final byte[] targetPixels = ((DataBufferByte) image.getRaster().getDataBuffer()).getData();
    System.arraycopy(b, 0, targetPixels, 0, b.length);
    return image;
}