Example usage for org.opencv.core Mat cols

List of usage examples for org.opencv.core Mat cols

Introduction

In this page you can find the example usage for org.opencv.core Mat cols.

Prototype

public int cols() 

Source Link

Usage

From source file:net.semanticmetadata.lire.imageanalysis.features.local.opencvfeatures.CvSiftExtractor.java

License:Open Source License

@Override
public void extract(BufferedImage img) {
    MatOfKeyPoint keypoints = new MatOfKeyPoint();
    Mat descriptors = new Mat();
    List<KeyPoint> myKeys;//from  ww w .j  a va 2s  .  com
    //        Mat img_object = Highgui.imread(image, 0); //0 = CV_LOAD_IMAGE_GRAYSCALE
    //        detector.detect(img_object, keypoints);
    byte[] data = ((DataBufferByte) img.getRaster().getDataBuffer()).getData();
    Mat matRGB = new Mat(img.getHeight(), img.getWidth(), CvType.CV_8UC3);
    matRGB.put(0, 0, data);
    Mat matGray = new Mat(img.getHeight(), img.getWidth(), CvType.CV_8UC1);
    Imgproc.cvtColor(matRGB, matGray, Imgproc.COLOR_BGR2GRAY); //TODO: RGB or BGR?
    byte[] dataGray = new byte[matGray.rows() * matGray.cols() * (int) (matGray.elemSize())];
    matGray.get(0, 0, dataGray);

    detector.detect(matGray, keypoints);
    extractor.compute(matGray, keypoints, descriptors);
    myKeys = keypoints.toList();

    features = new LinkedList<CvSiftFeature>();
    KeyPoint key;
    CvSiftFeature feat;
    double[] desc;
    int cols, rows = myKeys.size();
    for (int i = 0; i < rows; i++) {
        cols = (descriptors.row(i)).cols();
        desc = new double[cols];
        key = myKeys.get(i);
        for (int j = 0; j < cols; j++) {
            desc[j] = descriptors.get(i, j)[0];
        }
        feat = new CvSiftFeature(key.pt.x, key.pt.y, key.size, desc);
        features.add(feat);
    }
}

From source file:net.semanticmetadata.lire.imageanalysis.features.local.opencvfeatures.CvSiftExtractor.java

License:Open Source License

public LinkedList<CvSiftFeature> computeSiftKeypoints(BufferedImage img) {
    MatOfKeyPoint keypoints = new MatOfKeyPoint();
    List<KeyPoint> myKeys;/*from  w w  w.jav a2  s .  c  om*/
    //        Mat img_object = Highgui.imread(image, 0); //0 = CV_LOAD_IMAGE_GRAYSCALE
    //        detector.detect(img_object, keypoints);
    byte[] data = ((DataBufferByte) img.getRaster().getDataBuffer()).getData();
    Mat matRGB = new Mat(img.getHeight(), img.getWidth(), CvType.CV_8UC3);
    matRGB.put(0, 0, data);
    Mat matGray = new Mat(img.getHeight(), img.getWidth(), CvType.CV_8UC1);
    Imgproc.cvtColor(matRGB, matGray, Imgproc.COLOR_BGR2GRAY); //TODO: RGB or BGR?
    byte[] dataGray = new byte[matGray.rows() * matGray.cols() * (int) (matGray.elemSize())];
    matGray.get(0, 0, dataGray);

    detector.detect(matGray, keypoints);
    myKeys = keypoints.toList();

    LinkedList<CvSiftFeature> myKeypoints = new LinkedList<CvSiftFeature>();
    KeyPoint key;
    CvSiftFeature feat;
    for (Iterator<KeyPoint> iterator = myKeys.iterator(); iterator.hasNext();) {
        key = iterator.next();
        feat = new CvSiftFeature(key.pt.x, key.pt.y, key.size, null);
        myKeypoints.add(feat);
    }

    return myKeypoints;
}

From source file:net.semanticmetadata.lire.imageanalysis.features.local.opencvfeatures.CvSurfExtractor.java

License:Open Source License

@Override
public void extract(BufferedImage img) {
    MatOfKeyPoint keypoints = new MatOfKeyPoint();
    Mat descriptors = new Mat();
    List<KeyPoint> myKeys;//  ww w  .ja  v  a 2 s.  co  m
    //        Mat img_object = Highgui.imread(image, 0); //0 = CV_LOAD_IMAGE_GRAYSCALE
    //        detector.detect(img_object, keypoints);
    byte[] data = ((DataBufferByte) img.getRaster().getDataBuffer()).getData();
    Mat matRGB = new Mat(img.getHeight(), img.getWidth(), CvType.CV_8UC3);
    matRGB.put(0, 0, data);
    Mat matGray = new Mat(img.getHeight(), img.getWidth(), CvType.CV_8UC1);
    Imgproc.cvtColor(matRGB, matGray, Imgproc.COLOR_BGR2GRAY); //TODO: RGB or BGR?
    byte[] dataGray = new byte[matGray.rows() * matGray.cols() * (int) (matGray.elemSize())];
    matGray.get(0, 0, dataGray);

    detector.detect(matGray, keypoints);
    extractor.compute(matGray, keypoints, descriptors);
    myKeys = keypoints.toList();

    features = new LinkedList<CvSurfFeature>();
    KeyPoint key;
    CvSurfFeature feat;
    double[] desc;
    int cols, rows = myKeys.size();
    for (int i = 0; i < rows; i++) {
        cols = (descriptors.row(i)).cols();
        desc = new double[cols];
        key = myKeys.get(i);
        for (int j = 0; j < cols; j++) {
            desc[j] = descriptors.get(i, j)[0];
        }
        feat = new CvSurfFeature(key.pt.x, key.pt.y, key.size, desc);
        features.add(feat);
    }
}

From source file:net.semanticmetadata.lire.imageanalysis.features.local.opencvfeatures.CvSurfExtractor.java

License:Open Source License

public LinkedList<CvSurfFeature> computeSurfKeypoints(BufferedImage img) {
    MatOfKeyPoint keypoints = new MatOfKeyPoint();
    List<KeyPoint> myKeys;/*from ww  w.  j a v  a 2  s .  c  o m*/
    //        Mat img_object = Highgui.imread(image, 0); //0 = CV_LOAD_IMAGE_GRAYSCALE
    //        detector.detect(img_object, keypoints);
    byte[] data = ((DataBufferByte) img.getRaster().getDataBuffer()).getData();
    Mat matRGB = new Mat(img.getHeight(), img.getWidth(), CvType.CV_8UC3);
    matRGB.put(0, 0, data);
    Mat matGray = new Mat(img.getHeight(), img.getWidth(), CvType.CV_8UC1);
    Imgproc.cvtColor(matRGB, matGray, Imgproc.COLOR_BGR2GRAY); //TODO: RGB or BGR?
    byte[] dataGray = new byte[matGray.rows() * matGray.cols() * (int) (matGray.elemSize())];
    matGray.get(0, 0, dataGray);

    detector.detect(matGray, keypoints);
    myKeys = keypoints.toList();

    LinkedList<CvSurfFeature> myKeypoints = new LinkedList<CvSurfFeature>();
    KeyPoint key;
    CvSurfFeature feat;
    for (Iterator<KeyPoint> iterator = myKeys.iterator(); iterator.hasNext();) {
        key = iterator.next();
        feat = new CvSurfFeature(key.pt.x, key.pt.y, key.size, null);
        myKeypoints.add(feat);
    }

    return myKeypoints;
}

From source file:nz.ac.auckland.lablet.vision.CamShiftTracker.java

License:Open Source License

/**
 * Finds the dominant colour in an image, and returns two values in HSV colour space to represent similar colours,
 * e.g. so you can keep all colours similar to the dominant colour.
 *
 * How the algorithm works://  ww w. j av a  2  s  .  com
 *
 * 1. Scale the frame down so that algorithm doesn't take too long.
 * 2. Segment the frame into different colours (number of colours determined by k)
 * 3. Find dominant cluster (largest area) and get its central colour point.
 * 4. Get range (min max) to represent similar colours.
 *
 * @param bgr The input frame, in BGR colour space.
 * @param k The number of segments to use (2 works well).
 * @return The min and max HSV colour values, which represent the colours similar to the dominant colour.
 */
private Pair<Scalar, Scalar> getMinMaxHsv(Mat bgr, int k) {
    //Convert to HSV
    Mat input = new Mat();
    Imgproc.cvtColor(bgr, input, Imgproc.COLOR_BGR2BGRA, 3);

    //Scale image
    Size bgrSize = bgr.size();
    Size newSize = new Size();

    if (bgrSize.width > CamShiftTracker.KMEANS_IMG_SIZE || bgrSize.height > CamShiftTracker.KMEANS_IMG_SIZE) {

        if (bgrSize.width > bgrSize.height) {
            newSize.width = CamShiftTracker.KMEANS_IMG_SIZE;
            newSize.height = CamShiftTracker.KMEANS_IMG_SIZE / bgrSize.width * bgrSize.height;
        } else {
            newSize.width = CamShiftTracker.KMEANS_IMG_SIZE / bgrSize.height * bgrSize.width;
            newSize.height = CamShiftTracker.KMEANS_IMG_SIZE;
        }

        Imgproc.resize(input, input, newSize);
    }

    //Image quantization using k-means, see here for details of k-means algorithm: http://bit.ly/1JIvrlB
    Mat clusterData = new Mat();

    Mat reshaped = input.reshape(1, input.rows() * input.cols());
    reshaped.convertTo(clusterData, CvType.CV_32F, 1.0 / 255.0);
    Mat labels = new Mat();
    Mat centres = new Mat();
    TermCriteria criteria = new TermCriteria(TermCriteria.COUNT, 50, 1);
    Core.kmeans(clusterData, k, labels, criteria, 1, Core.KMEANS_PP_CENTERS, centres);

    //Get num hits for each category
    int[] counts = new int[k];

    for (int i = 0; i < labels.rows(); i++) {
        int label = (int) labels.get(i, 0)[0];
        counts[label] += 1;
    }

    //Get cluster index with maximum number of members
    int maxCluster = 0;
    int index = -1;

    for (int i = 0; i < counts.length; i++) {
        int value = counts[i];

        if (value > maxCluster) {
            maxCluster = value;
            index = i;
        }
    }

    //Get cluster centre point hsv
    int r = (int) (centres.get(index, 2)[0] * 255.0);
    int g = (int) (centres.get(index, 1)[0] * 255.0);
    int b = (int) (centres.get(index, 0)[0] * 255.0);
    int sum = (r + g + b) / 3;

    //Get colour range
    Scalar min;
    Scalar max;

    int rg = Math.abs(r - g);
    int gb = Math.abs(g - b);
    int rb = Math.abs(r - b);
    int maxDiff = Math.max(Math.max(rg, gb), rb);

    if (maxDiff < 35 && sum > 120) { //white
        min = new Scalar(0, 0, 0);
        max = new Scalar(180, 40, 255);
    } else if (sum < 50 && maxDiff < 35) { //black
        min = new Scalar(0, 0, 0);
        max = new Scalar(180, 255, 40);
    } else {
        Mat bgrColour = new Mat(1, 1, CvType.CV_8UC3, new Scalar(r, g, b));
        Mat hsvColour = new Mat();

        Imgproc.cvtColor(bgrColour, hsvColour, Imgproc.COLOR_BGR2HSV, 3);
        double[] hsv = hsvColour.get(0, 0);

        int addition = 0;
        int minHue = (int) hsv[0] - colourRange;
        if (minHue < 0) {
            addition = Math.abs(minHue);
        }

        int maxHue = (int) hsv[0] + colourRange;

        min = new Scalar(Math.max(minHue, 0), 60, Math.max(35, hsv[2] - 30));
        max = new Scalar(Math.min(maxHue + addition, 180), 255, 255);
    }

    return new Pair<>(min, max);
}

From source file:objectdetection.Mat2Image.java

BufferedImage getImage(Mat mat) {
    getSpace(mat);
    mat.get(0, 0, dat);
    img.getRaster().setDataElements(0, 0, mat.cols(), mat.rows(), dat);
    return img;
}

From source file:opencv.CaptchaDetection.java

public static String goDetect(String filename) throws CvException {
    final Mat src = Imgcodecs.imread(filename);

    Mat src_gray = thres_rgb(src);

    Mat noise = k_means_spilter(src);/*from   ww  w .j av  a 2 s. c  o  m*/

    Mat line = check_is_line(src_gray);

    delete_target(src_gray, noise, line);

    delete_point(src_gray);

    Mat src_resize = new Mat();
    Imgproc.resize(src_gray, src_resize, new Size(src_gray.cols() * 2, src_gray.rows() * 2));

    List<Mat> num_rio = find_number(src_resize);

    String answer = dect_number(num_rio);

    return answer;
}

From source file:opencv.CaptchaDetection.java

private static Mat thres_rgb(Mat src) {
    Mat gray = Mat.zeros(src.size(), CvType.CV_8UC1);

    //  , ?/* w w  w . j a va  2  s .  co m*/
    int thres = 150;
    double gamma = 2.5;

    for (int row = 0; row < src.rows(); row++) {
        for (int col = 0; col < src.cols(); col++) {
            double[] s_data = src.get(row, col);

            byte[] s_buff = new byte[3];
            byte[] g_buff = new byte[1];

            double color_sum = s_data[0] + s_data[1] + s_data[2];

            if (color_sum / 3 > thres) {
                for (int channel = 0; channel < 3; channel++)
                    s_buff[channel] = (byte) 255;

                g_buff[0] = 0;
            } else {
                //   gamma 
                for (int channel = 0; channel < 3; channel++) {
                    double tmp = s_data[channel];
                    tmp = Math.pow(tmp / 255, gamma) * 255;

                    if (tmp < 0)
                        s_buff[channel] = 0;
                    else if (tmp > 255)
                        s_buff[channel] = (byte) 255;
                    else
                        s_buff[channel] = (byte) tmp;
                }

                g_buff[0] = (byte) 255;
            }
            src.put(row, col, s_buff);
            gray.put(row, col, g_buff);
        }
    }
    return gray;
}

From source file:opencv.CaptchaDetection.java

private static Mat k_means_spilter(Mat src) {
    Mat dst = Mat.zeros(src.size(), CvType.CV_8UC1);

    int width = src.cols();
    int height = src.rows();
    int dims = src.channels();

    //   /*from w w  w  .j  a va  2s  .  com*/
    int clusterCount = 3;

    Mat points = new Mat(width * height, dims, CvType.CV_32F, new Scalar(0));
    Mat centers = new Mat(clusterCount, dims, CvType.CV_32F);
    Mat labels = new Mat(width * height, 1, CvType.CV_32S);

    //    points
    for (int row = 0; row < height; row++) {
        for (int col = 0; col < width; col++) {
            int index = row * width + col;
            double[] s_data = src.get(row, col);

            for (int channel = 0; channel < 3; channel++) {
                float[] f_buff = new float[1];
                f_buff[0] = (float) s_data[channel];

                points.put(index, channel, f_buff);
            }
        }
    }

    //  knn ?
    TermCriteria criteria = new TermCriteria(TermCriteria.EPS + TermCriteria.MAX_ITER, 10, 0.1);
    Core.kmeans(points, clusterCount, labels, criteria, 3, Core.KMEANS_PP_CENTERS, centers);

    //  ??? label index
    Map<Integer, Integer> tmp = new TreeMap<>();
    for (int i = 0; i < clusterCount; i++) {
        int sum = 0;
        for (int j = 0; j < dims; j++) {
            sum += centers.get(i, j)[0];
        }
        while (tmp.containsKey(sum))
            sum++;
        tmp.put(sum, i);
    }

    int count = 0;
    int[] label_order = new int[clusterCount];
    for (Map.Entry<Integer, Integer> iter : tmp.entrySet()) {
        label_order[count++] = iter.getValue();
    }

    for (int row = 0; row < height; row++) {
        for (int col = 0; col < width; col++) {
            int index = row * width + col;
            int label = (int) labels.get(index, 0)[0];

            if (label == label_order[1]) {
                byte[] d_buff = new byte[1];
                d_buff[0] = (byte) 255;
                dst.put(row, col, d_buff);
            }
        }
    }

    return dst;
}

From source file:opencv.CaptchaDetection.java

private static Mat check_is_line(Mat src) {
    Mat dst = Mat.zeros(src.size(), CvType.CV_8UC1);

    int min_length = 3;

    //  ?/*from ww w  .  j ava 2  s.  com*/
    for (int row = 0; row < src.rows(); row++) {
        for (int col = 0; col < src.cols(); col++) {
            if (src.get(row, col)[0] == 0)
                continue;

            //  ??
            boolean left_black = false, right_black = false;

            if (col == 0 || src.get(row, col - 1)[0] == 0)
                left_black = true;

            if (col == src.cols() - 1 || src.get(row, col + 1)[0] == 0)
                right_black = true;

            if (!left_black || !right_black)
                continue;

            //   
            int length = col_length(src, row, col);
            if (length > min_length) {
                byte[] d_buff = new byte[1];
                d_buff[0] = (byte) 255;
                dst.put(row, col, d_buff);
            }

        }
    }

    //  ?
    for (int row = 0; row < src.rows(); row++) {
        for (int col = 0; col < src.cols(); col++) {
            if (src.get(row, col)[0] == 0)
                continue;

            //  ?
            boolean up_black = false, down_black = false;

            if (row == 0 || src.get(row - 1, col)[0] == 0)
                up_black = true;

            if (row == src.rows() - 1 || src.get(row + 1, col)[0] == 0)
                down_black = true;

            if (!up_black || !down_black)
                continue;

            //  
            int length = row_length(src, row, col);
            if (length > min_length) {
                byte[] d_buff = new byte[1];
                d_buff[0] = (byte) 255;
                dst.put(row, col, d_buff);
            }
        }
    }

    return dst;
}