Example usage for org.opencv.core Mat size

List of usage examples for org.opencv.core Mat size

Introduction

In this page you can find the example usage for org.opencv.core Mat size.

Prototype

public Size size() 

Source Link

Usage

From source file:nz.ac.auckland.lablet.vision.CamShiftTracker.java

License:Open Source License

/**
 * Finds the dominant colour in an image, and returns two values in HSV colour space to represent similar colours,
 * e.g. so you can keep all colours similar to the dominant colour.
 *
 * How the algorithm works://from   www . j av a 2s .c  o m
 *
 * 1. Scale the frame down so that algorithm doesn't take too long.
 * 2. Segment the frame into different colours (number of colours determined by k)
 * 3. Find dominant cluster (largest area) and get its central colour point.
 * 4. Get range (min max) to represent similar colours.
 *
 * @param bgr The input frame, in BGR colour space.
 * @param k The number of segments to use (2 works well).
 * @return The min and max HSV colour values, which represent the colours similar to the dominant colour.
 */
private Pair<Scalar, Scalar> getMinMaxHsv(Mat bgr, int k) {
    //Convert to HSV
    Mat input = new Mat();
    Imgproc.cvtColor(bgr, input, Imgproc.COLOR_BGR2BGRA, 3);

    //Scale image
    Size bgrSize = bgr.size();
    Size newSize = new Size();

    if (bgrSize.width > CamShiftTracker.KMEANS_IMG_SIZE || bgrSize.height > CamShiftTracker.KMEANS_IMG_SIZE) {

        if (bgrSize.width > bgrSize.height) {
            newSize.width = CamShiftTracker.KMEANS_IMG_SIZE;
            newSize.height = CamShiftTracker.KMEANS_IMG_SIZE / bgrSize.width * bgrSize.height;
        } else {
            newSize.width = CamShiftTracker.KMEANS_IMG_SIZE / bgrSize.height * bgrSize.width;
            newSize.height = CamShiftTracker.KMEANS_IMG_SIZE;
        }

        Imgproc.resize(input, input, newSize);
    }

    //Image quantization using k-means, see here for details of k-means algorithm: http://bit.ly/1JIvrlB
    Mat clusterData = new Mat();

    Mat reshaped = input.reshape(1, input.rows() * input.cols());
    reshaped.convertTo(clusterData, CvType.CV_32F, 1.0 / 255.0);
    Mat labels = new Mat();
    Mat centres = new Mat();
    TermCriteria criteria = new TermCriteria(TermCriteria.COUNT, 50, 1);
    Core.kmeans(clusterData, k, labels, criteria, 1, Core.KMEANS_PP_CENTERS, centres);

    //Get num hits for each category
    int[] counts = new int[k];

    for (int i = 0; i < labels.rows(); i++) {
        int label = (int) labels.get(i, 0)[0];
        counts[label] += 1;
    }

    //Get cluster index with maximum number of members
    int maxCluster = 0;
    int index = -1;

    for (int i = 0; i < counts.length; i++) {
        int value = counts[i];

        if (value > maxCluster) {
            maxCluster = value;
            index = i;
        }
    }

    //Get cluster centre point hsv
    int r = (int) (centres.get(index, 2)[0] * 255.0);
    int g = (int) (centres.get(index, 1)[0] * 255.0);
    int b = (int) (centres.get(index, 0)[0] * 255.0);
    int sum = (r + g + b) / 3;

    //Get colour range
    Scalar min;
    Scalar max;

    int rg = Math.abs(r - g);
    int gb = Math.abs(g - b);
    int rb = Math.abs(r - b);
    int maxDiff = Math.max(Math.max(rg, gb), rb);

    if (maxDiff < 35 && sum > 120) { //white
        min = new Scalar(0, 0, 0);
        max = new Scalar(180, 40, 255);
    } else if (sum < 50 && maxDiff < 35) { //black
        min = new Scalar(0, 0, 0);
        max = new Scalar(180, 255, 40);
    } else {
        Mat bgrColour = new Mat(1, 1, CvType.CV_8UC3, new Scalar(r, g, b));
        Mat hsvColour = new Mat();

        Imgproc.cvtColor(bgrColour, hsvColour, Imgproc.COLOR_BGR2HSV, 3);
        double[] hsv = hsvColour.get(0, 0);

        int addition = 0;
        int minHue = (int) hsv[0] - colourRange;
        if (minHue < 0) {
            addition = Math.abs(minHue);
        }

        int maxHue = (int) hsv[0] + colourRange;

        min = new Scalar(Math.max(minHue, 0), 60, Math.max(35, hsv[2] - 30));
        max = new Scalar(Math.min(maxHue + addition, 180), 255, 255);
    }

    return new Pair<>(min, max);
}

From source file:opencv.CaptchaDetection.java

private static Mat thres_rgb(Mat src) {
    Mat gray = Mat.zeros(src.size(), CvType.CV_8UC1);

    //  , ?/*  w  w w.j  a v  a  2 s.  c  o  m*/
    int thres = 150;
    double gamma = 2.5;

    for (int row = 0; row < src.rows(); row++) {
        for (int col = 0; col < src.cols(); col++) {
            double[] s_data = src.get(row, col);

            byte[] s_buff = new byte[3];
            byte[] g_buff = new byte[1];

            double color_sum = s_data[0] + s_data[1] + s_data[2];

            if (color_sum / 3 > thres) {
                for (int channel = 0; channel < 3; channel++)
                    s_buff[channel] = (byte) 255;

                g_buff[0] = 0;
            } else {
                //   gamma 
                for (int channel = 0; channel < 3; channel++) {
                    double tmp = s_data[channel];
                    tmp = Math.pow(tmp / 255, gamma) * 255;

                    if (tmp < 0)
                        s_buff[channel] = 0;
                    else if (tmp > 255)
                        s_buff[channel] = (byte) 255;
                    else
                        s_buff[channel] = (byte) tmp;
                }

                g_buff[0] = (byte) 255;
            }
            src.put(row, col, s_buff);
            gray.put(row, col, g_buff);
        }
    }
    return gray;
}

From source file:opencv.CaptchaDetection.java

private static Mat k_means_spilter(Mat src) {
    Mat dst = Mat.zeros(src.size(), CvType.CV_8UC1);

    int width = src.cols();
    int height = src.rows();
    int dims = src.channels();

    //   //from   ww  w.  j a  va 2s. c  o m
    int clusterCount = 3;

    Mat points = new Mat(width * height, dims, CvType.CV_32F, new Scalar(0));
    Mat centers = new Mat(clusterCount, dims, CvType.CV_32F);
    Mat labels = new Mat(width * height, 1, CvType.CV_32S);

    //    points
    for (int row = 0; row < height; row++) {
        for (int col = 0; col < width; col++) {
            int index = row * width + col;
            double[] s_data = src.get(row, col);

            for (int channel = 0; channel < 3; channel++) {
                float[] f_buff = new float[1];
                f_buff[0] = (float) s_data[channel];

                points.put(index, channel, f_buff);
            }
        }
    }

    //  knn ?
    TermCriteria criteria = new TermCriteria(TermCriteria.EPS + TermCriteria.MAX_ITER, 10, 0.1);
    Core.kmeans(points, clusterCount, labels, criteria, 3, Core.KMEANS_PP_CENTERS, centers);

    //  ??? label index
    Map<Integer, Integer> tmp = new TreeMap<>();
    for (int i = 0; i < clusterCount; i++) {
        int sum = 0;
        for (int j = 0; j < dims; j++) {
            sum += centers.get(i, j)[0];
        }
        while (tmp.containsKey(sum))
            sum++;
        tmp.put(sum, i);
    }

    int count = 0;
    int[] label_order = new int[clusterCount];
    for (Map.Entry<Integer, Integer> iter : tmp.entrySet()) {
        label_order[count++] = iter.getValue();
    }

    for (int row = 0; row < height; row++) {
        for (int col = 0; col < width; col++) {
            int index = row * width + col;
            int label = (int) labels.get(index, 0)[0];

            if (label == label_order[1]) {
                byte[] d_buff = new byte[1];
                d_buff[0] = (byte) 255;
                dst.put(row, col, d_buff);
            }
        }
    }

    return dst;
}

From source file:opencv.CaptchaDetection.java

private static Mat check_is_line(Mat src) {
    Mat dst = Mat.zeros(src.size(), CvType.CV_8UC1);

    int min_length = 3;

    //  ?//from  ww  w.j a v a  2  s .  c om
    for (int row = 0; row < src.rows(); row++) {
        for (int col = 0; col < src.cols(); col++) {
            if (src.get(row, col)[0] == 0)
                continue;

            //  ??
            boolean left_black = false, right_black = false;

            if (col == 0 || src.get(row, col - 1)[0] == 0)
                left_black = true;

            if (col == src.cols() - 1 || src.get(row, col + 1)[0] == 0)
                right_black = true;

            if (!left_black || !right_black)
                continue;

            //   
            int length = col_length(src, row, col);
            if (length > min_length) {
                byte[] d_buff = new byte[1];
                d_buff[0] = (byte) 255;
                dst.put(row, col, d_buff);
            }

        }
    }

    //  ?
    for (int row = 0; row < src.rows(); row++) {
        for (int col = 0; col < src.cols(); col++) {
            if (src.get(row, col)[0] == 0)
                continue;

            //  ?
            boolean up_black = false, down_black = false;

            if (row == 0 || src.get(row - 1, col)[0] == 0)
                up_black = true;

            if (row == src.rows() - 1 || src.get(row + 1, col)[0] == 0)
                down_black = true;

            if (!up_black || !down_black)
                continue;

            //  
            int length = row_length(src, row, col);
            if (length > min_length) {
                byte[] d_buff = new byte[1];
                d_buff[0] = (byte) 255;
                dst.put(row, col, d_buff);
            }
        }
    }

    return dst;
}

From source file:opencv.CaptchaDetection.java

/***
 * ?//from w  w  w  .jav a 2s  .  com
 * @param src
 * @param dst
 * @param degree 
 */
private static void rotateImage(Mat src, Mat dst, int degree) {
    Point center = new Point(src.cols() / 2.0 + 0.5, src.rows() / 2.0 + 0.5);
    Mat M = Imgproc.getRotationMatrix2D(center, degree, 1.0);
    Imgproc.warpAffine(src, dst, M, src.size());
}

From source file:opencvdemos.BallGame.java

License:Apache License

private Mat findAndDrawObjects(Mat maskedImage, Mat frame) {
    // Init// ww w.j  av a2 s .  c  o  m
    List<MatOfPoint> contours = new ArrayList<>();
    Mat hierarchy = new Mat();

    // Find contours
    Imgproc.findContours(maskedImage, contours, hierarchy, Imgproc.RETR_CCOMP, Imgproc.CHAIN_APPROX_SIMPLE);

    // If any contour exist...
    if (hierarchy.size().height > 0 && hierarchy.size().width > 0) {
        // for each contour, display it in blue
        for (int idx = 0; idx >= 0; idx = (int) hierarchy.get(0, idx)[0]) {
            Imgproc.drawContours(frame, contours, idx, new Scalar(250, 0, 0));
        }
    }

    return frame;
}

From source file:org.akvo.caddisfly.sensor.colorimetry.strip.util.OpenCVUtil.java

License:Open Source License

public static Mat perspectiveTransform(double[] topLeft, double[] topRight, double[] bottomLeft,
        double[] bottomRight, Mat bgr) {

    // determine the size of the destination Mat: use the positions of the finder patterns to determine the width and height.
    // look out: the horizontal direction now refers again to the actual calibration card
    int verSize = (int) Math.round(
            Math.sqrt(Math.pow((topLeft[0] - topRight[0]), 2) + Math.pow((topLeft[1] - topRight[1]), 2)));
    int horSize = (int) Math.round(
            Math.sqrt(Math.pow((topLeft[0] - bottomLeft[0]), 2) + Math.pow((topLeft[1] - bottomLeft[1]), 2)));

    // we rotate the resulting image, so we go from a portrait view to the regular calibration card in landscape
    // so the mapping is:
    // top left source => top right destination
    // top right source => bottom right destination
    // bottom right source => bottom left destination
    // bottom left source => top left destination

    double[] trDest = new double[] { horSize - 1, 0 };
    double[] brDest = new double[] { horSize - 1, verSize - 1 };
    double[] blDest = new double[] { 0, verSize - 1 };
    double[] tlDest = new double[] { 0, 0 };

    Mat transformMatrix = transformMatrix(topLeft, topRight, bottomRight, bottomLeft, trDest, brDest, blDest,
            tlDest);/*from   w  ww  .  j  av a2s. c  om*/

    //make a destination mat for a warp
    Mat warpMat = Mat.zeros(verSize, horSize, bgr.type());

    //do the warp
    Imgproc.warpPerspective(bgr, warpMat, transformMatrix, warpMat.size());
    return warpMat;
}

From source file:org.akvo.caddisfly.sensor.colorimetry.strip.util.OpenCVUtil.java

License:Open Source License

@SuppressWarnings("UnusedParameters")
public static Mat detectStrip(Mat stripArea, StripTest.Brand brand, double ratioW, double ratioH) {
    List<Mat> channels = new ArrayList<>();
    Mat sArea = stripArea.clone();//  w w w  .  j a va  2  s .  co  m

    // Gaussian blur
    Imgproc.medianBlur(sArea, sArea, 3);
    Core.split(sArea, channels);

    // create binary image
    Mat binary = new Mat();

    // determine min and max NOT USED
    Imgproc.threshold(channels.get(0), binary, 128, MAX_RGB_INT_VALUE, Imgproc.THRESH_BINARY);

    // compute first approximation of line through length of the strip
    final WeightedObservedPoints points = new WeightedObservedPoints();
    final WeightedObservedPoints corrPoints = new WeightedObservedPoints();

    double tot, yTot;
    for (int i = 0; i < binary.cols(); i++) { // iterate over cols
        tot = 0;
        yTot = 0;
        for (int j = 0; j < binary.rows(); j++) { // iterate over rows
            if (binary.get(j, i)[0] > 128) {
                yTot += j;
                tot++;
            }
        }
        if (tot > 0) {
            points.add((double) i, yTot / tot);
        }
    }

    // order of coefficients is (b + ax), so [b, a]
    final PolynomialCurveFitter fitter = PolynomialCurveFitter.create(1);
    List<WeightedObservedPoint> pointsList = points.toList();
    final double[] coefficient = fitter.fit(pointsList);

    // second pass, remove outliers
    double estimate, actual;

    for (int i = 0; i < pointsList.size(); i++) {
        estimate = coefficient[1] * pointsList.get(i).getX() + coefficient[0];
        actual = pointsList.get(i).getY();
        if (actual > LOWER_PERCENTAGE_BOUND * estimate && actual < UPPER_PERCENTAGE_BOUND * estimate) {
            //if the point differs less than +/- 10%, keep the point
            corrPoints.add(pointsList.get(i).getX(), pointsList.get(i).getY());
        }
    }

    final double[] coefficientCorr = fitter.fit(corrPoints.toList());
    double slope = coefficientCorr[1];
    double offset = coefficientCorr[0];

    // compute rotation angle
    double rotAngleDeg = Math.atan(slope) * 180 / Math.PI;

    //determine a point on the line, in the middle of strip, in the horizontal middle of the whole image
    int midPointX = binary.cols() / 2;
    int midPointY = (int) Math.round(midPointX * slope + offset);

    // rotate around the midpoint, to straighten the binary strip
    Mat dstBinary = new Mat(binary.rows(), binary.cols(), binary.type());
    Point center = new Point(midPointX, midPointY);
    Mat rotMat = Imgproc.getRotationMatrix2D(center, rotAngleDeg, 1.0);
    Imgproc.warpAffine(binary, dstBinary, rotMat, binary.size(),
            Imgproc.INTER_CUBIC + Imgproc.WARP_FILL_OUTLIERS);

    // also apply rotation to colored strip
    Mat dstStrip = new Mat(stripArea.rows(), stripArea.cols(), stripArea.type());
    Imgproc.warpAffine(stripArea, dstStrip, rotMat, binary.size(),
            Imgproc.INTER_CUBIC + Imgproc.WARP_FILL_OUTLIERS);

    // Compute white points in each row
    double[] rowCount = new double[dstBinary.rows()];
    int rowTot;
    for (int i = 0; i < dstBinary.rows(); i++) { // iterate over rows
        rowTot = 0;
        for (int j = 0; j < dstBinary.cols(); j++) { // iterate over cols
            if (dstBinary.get(i, j)[0] > 128) {
                rowTot++;
            }
        }
        rowCount[i] = rowTot;
    }

    // find width by finding rising and dropping edges
    // rising edge  = largest positive difference
    // falling edge = largest negative difference
    int risePos = 0;
    int fallPos = 0;
    double riseVal = 0;
    double fallVal = 0;
    for (int i = 0; i < dstBinary.rows() - 1; i++) {
        if (rowCount[i + 1] - rowCount[i] > riseVal) {
            riseVal = rowCount[i + 1] - rowCount[i];
            risePos = i + 1;
        }
        if (rowCount[i + 1] - rowCount[i] < fallVal) {
            fallVal = rowCount[i + 1] - rowCount[i];
            fallPos = i;
        }
    }

    // cut out binary strip
    Point stripTopLeft = new Point(0, risePos);
    Point stripBottomRight = new Point(dstBinary.cols(), fallPos);

    org.opencv.core.Rect stripAreaRect = new org.opencv.core.Rect(stripTopLeft, stripBottomRight);
    Mat binaryStrip = dstBinary.submat(stripAreaRect);

    // also cut out colored strip
    Mat colorStrip = dstStrip.submat(stripAreaRect);

    // now right end of strip
    // method: first rising edge

    double[] colCount = new double[binaryStrip.cols()];
    int colTotal;
    for (int i = 0; i < binaryStrip.cols(); i++) { // iterate over cols
        colTotal = 0;
        for (int j = 0; j < binaryStrip.rows(); j++) { // iterate over rows
            if (binaryStrip.get(j, i)[0] > 128) {
                colTotal++;
            }
        }

        //Log.d("Caddisfly", String.valueOf(colTotal));
        colCount[i] = colTotal;
    }

    stripAreaRect = getStripRectangle(binaryStrip, colCount, brand.getStripLength(), ratioW);

    Mat resultStrip = colorStrip.submat(stripAreaRect).clone();

    // release Mat objects
    stripArea.release();
    sArea.release();
    binary.release();
    dstBinary.release();
    dstStrip.release();
    binaryStrip.release();
    colorStrip.release();

    return resultStrip;
}

From source file:org.ar.rubik.CubePoseEstimator.java

License:Open Source License

/**
 * Pose Estimation//from w  ww .ja  va 2 s.c o  m
 * 
 * Deduce real world cube coordinates and rotation
 * 
 * @param rubikFace
 * @param image 
 * @param stateModel 
 * @return 
 */
public static CubePose poseEstimation(RubikFace rubikFace, Mat image, StateModel stateModel) {

    if (rubikFace == null)
        return null;

    if (rubikFace.faceRecognitionStatus != FaceRecognitionStatusEnum.SOLVED)
        return null;

    LeastMeansSquare lmsResult = rubikFace.lmsResult;

    if (lmsResult == null)
        return null;

    // OpenCV Pose Estimate requires at least four points.
    if (rubikFace.rhombusList.size() <= 4)
        return null;

    if (cameraMatrix == null) {
        cameraMatrix = stateModel.cameraCalibration.getOpenCVCameraMatrix((int) (image.size().width),
                (int) (image.size().height));
        distCoeffs = new MatOfDouble(stateModel.cameraCalibration.getDistortionCoefficients());
    }

    /*
     * For the purposes of external camera calibration: i.e., where the cube is 
     * located in camera coordinates, we define the geometry of the face of a
     * cube composed of nine 3D locations each representing the center of each tile.
     * Correspondence between these points and nine 2D points from the actual
     * camera image, along with camera calibration data, are using to calculate
     * the Pose of the Cube (i.e. "Cube Pose").
     * 
     * The geometry of the cube here is defined as having center at {0,0,0},
     * and edge size of 2 units (i.e., +/- 1.0).
     */

    // List of real world point and screen points that correspond.
    List<Point3> objectPointsList = new ArrayList<Point3>(9);
    List<Point> imagePointsList = new ArrayList<Point>(9);

    // Create list of image (in 2D) and object (in 3D) points.
    // Loop over Rubik Face Tiles
    for (int n = 0; n < 3; n++) {
        for (int m = 0; m < 3; m++) {

            Rhombus rhombus = rubikFace.faceRhombusArray[n][m];

            // Only use if Rhombus was non null.
            if (rhombus != null) {

                // Obtain center of Rhombus in screen image coordinates
                // Convention:
                //  o X is zero on the left, and increases to the right.
                //  o Y is zero on the top and increases downward.
                Point imagePoint = new Point(rhombus.center.x, rhombus.center.y);
                imagePointsList.add(imagePoint);

                // N and M are actual not conceptual (as in design doc).
                int mm = 2 - n;
                int nn = 2 - m;
                // above now matches design doc.
                // that is:
                //  o the nn vector is to the right and upwards.
                //  o the mm vector is to the left and upwards.

                // Calculate center of Tile in OpenCV World Space Coordinates
                // Convention:
                //  o X is zero in the center, and increases to the left.
                //  o Y is zero in the center and increases downward.
                //  o Z is zero (at the world coordinate origin) and increase away for the camera.
                float x = (1 - mm) * 0.66666f;
                float y = -1.0f;
                float z = -1.0f * (1 - nn) * 0.666666f;
                Point3 objectPoint = new Point3(x, y, z);
                objectPointsList.add(objectPoint);
            }
        }
    }

    // Cast image point list into OpenCV Matrix.
    MatOfPoint2f imagePoints = new MatOfPoint2f();
    imagePoints.fromList(imagePointsList);

    // Cast object point list into OpenCV Matrix.
    MatOfPoint3f objectPoints = new MatOfPoint3f();
    objectPoints.fromList(objectPointsList);

    Mat rvec = new Mat();
    Mat tvec = new Mat();

    //      Log.e(Constants.TAG, "Image Points: " + imagePoints.dump());
    //      Log.e(Constants.TAG, "Object Points: " + objectPoints.dump());

    //      =+= sometimes a "count >= 4" exception 
    Calib3d.solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec);

    Log.v(Constants.TAG, String.format("Open CV Rotation Vector x=%4.2f y=%4.2f z=%4.2f", rvec.get(0, 0)[0],
            rvec.get(1, 0)[0], rvec.get(2, 0)[0]));

    // Convert from OpenCV to OpenGL World Coordinates
    float x = +1.0f * (float) tvec.get(0, 0)[0];
    float y = -1.0f * (float) tvec.get(1, 0)[0];
    float z = -1.0f * (float) tvec.get(2, 0)[0];

    //        // =+= Add manual offset correction to translation  
    //        x += MenuAndParams.xTranslationOffsetParam.value;
    //        y += MenuAndParams.yTranslationOffsetParam.value;
    //        z += MenuAndParams.zTranslationOffsetParam.value;      

    // Convert Rotation Vector from OpenCL polarity axes definition to OpenGL definition
    // Note, polarity of x-axis is OK, no need to invert.
    rvec.put(1, 0, -1.0f * rvec.get(1, 0)[0]); // y-axis
    rvec.put(2, 0, -1.0f * rvec.get(2, 0)[0]); // z-axis

    //        // =+= Add manual offset correction to Rotation
    //        rvec.put(0, 0, rvec.get(0, 0)[0] + MenuAndParams.xRotationOffsetParam.value * Math.PI / 180.0);  // X rotation
    //        rvec.put(1, 0, rvec.get(1, 0)[0] + MenuAndParams.yRotationOffsetParam.value * Math.PI / 180.0);  // Y rotation
    //        rvec.put(2, 0, rvec.get(2, 0)[0] + MenuAndParams.zRotationOffsetParam.value * Math.PI / 180.0);  // Z rotation

    // Package up as CubePose object
    CubePose cubePose = new CubePose();
    cubePose.x = x;
    cubePose.y = y;
    cubePose.z = z;
    cubePose.xRotation = rvec.get(0, 0)[0];
    cubePose.yRotation = rvec.get(1, 0)[0];
    cubePose.zRotation = rvec.get(2, 0)[0];

    //      Log.e(Constants.TAG, "Result: " + result);
    //      Log.e(Constants.TAG, "Camera: " + cameraMatrix.dump());
    //      Log.e(Constants.TAG, "Rotation: " + rvec.dump());
    //      Log.e(Constants.TAG, "Translation: " + tvec.dump());

    //      // Reporting in OpenGL World Coordinates
    //      Core.rectangle(image, new Point(0, 50), new Point(1270, 150), Constants.ColorBlack, -1);
    //      Core.putText(image, String.format("Translation  x=%4.2f y=%4.2f z=%4.2f", x, y, z), new Point(50, 100), Constants.FontFace, 3, Constants.ColorWhite, 3);
    //      Core.putText(image, String.format("Rotation     x=%4.0f y=%4.0f z=%4.0f", cubeXrotation, cubeYrotation, cubeZrotation), new Point(50, 150), Constants.FontFace, 3, Constants.ColorWhite, 3);

    Log.v(Constants.TAG, "Cube Pose: " + cubePose);

    return cubePose;
}

From source file:org.ar.rubik.ImageRecognizer.java

License:Open Source License

/**
 * On Camera Frame/*from   ww w.  j  av a  2s .co m*/
 * 
 * Process frame image through Rubik Face recognition possibly resulting in a state change.
 * 
 *  (non-Javadoc)
 * @see org.opencv.android.CameraBridgeViewBase.CvCameraViewListener2#onCameraFrame(org.opencv.android.CameraBridgeViewBase.CvCameraViewFrame)
 */
@Override
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {

    //      Log.e(Constants.TAG, "CV Thread ID = " + Thread.currentThread().getId());

    // Just display error message if it is non-null.
    if (errorImage != null)
        return errorImage;

    Mat image = inputFrame.rgba();
    Size imageSize = image.size();
    Log.v(Constants.TAG_CAL, "Input Frame width=" + imageSize.width + " height=" + imageSize.height);
    if (imageSize.width != stateModel.openCVSize.width || imageSize.height != stateModel.openCVSize.height)
        Log.e(Constants.TAG_CAL, "State Model openCVSize does not agree with input frame!");

    // Save or Recall image as requested
    switch (MenuAndParams.imageSourceMode) {
    case NORMAL:
        break;
    case SAVE_NEXT:
        Util.saveImage(image);
        MenuAndParams.imageSourceMode = ImageSourceModeEnum.NORMAL;
        break;
    case PLAYBACK:
        image = Util.recallImage();
    default:
        break;
    }

    // Calculate and display Frames Per Second
    long newTimeStamp = System.currentTimeMillis();
    if (framesPerSecondTimeStamp > 0) {
        long frameTime = newTimeStamp - framesPerSecondTimeStamp;
        double framesPerSecond = 1000.0 / frameTime;
        String string = String.format("%4.1f FPS", framesPerSecond);
        Core.putText(image, string, new Point(50, 700), Constants.FontFace, 2, ColorTileEnum.WHITE.cvColor, 2);
    }
    framesPerSecondTimeStamp = newTimeStamp;

    try {

        // Initialize
        RubikFace rubikFace = new RubikFace();
        rubikFace.profiler.markTime(Profiler.Event.START);
        Log.i(Constants.TAG, "============================================================================");

        /* **********************************************************************
         * **********************************************************************
         * Return Original Image
         */
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.DIRECT) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            return annotation.drawAnnotation(image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Process to Grey Scale
         * 
         * This algorithm finds highlights areas that are all of nearly
         * the same hue.  In particular, cube faces should be highlighted.
         */
        Mat greyscale_image = new Mat();
        Imgproc.cvtColor(image, greyscale_image, Imgproc.COLOR_BGR2GRAY);
        rubikFace.profiler.markTime(Profiler.Event.GREYSCALE);
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.GREYSCALE) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            image.release();
            return annotation.drawAnnotation(greyscale_image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Gaussian Filter Blur prevents getting a lot of false hits 
         */
        Mat blur_image = new Mat();

        int kernelSize = (int) MenuAndParams.gaussianBlurKernelSizeParam.value;
        kernelSize = kernelSize % 2 == 0 ? kernelSize + 1 : kernelSize; // make odd
        Imgproc.GaussianBlur(greyscale_image, blur_image, new Size(kernelSize, kernelSize), -1, -1);
        rubikFace.profiler.markTime(Profiler.Event.GAUSSIAN);
        greyscale_image.release();
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.GAUSSIAN) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            image.release();
            return annotation.drawAnnotation(blur_image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Canny Edge Detection
         */
        Mat canny_image = new Mat();
        Imgproc.Canny(blur_image, canny_image, MenuAndParams.cannyLowerThresholdParam.value,
                MenuAndParams.cannyUpperThresholdParam.value, 3, // Sobel Aperture size.  This seems to be typically value used in the literature: i.e., a 3x3 Sobel Matrix.
                false); // use cheap gradient calculation: norm =|dI/dx|+|dI/dy|
        rubikFace.profiler.markTime(Profiler.Event.EDGE);
        blur_image.release();
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.CANNY) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            image.release();
            return annotation.drawAnnotation(canny_image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Dilation Image Process
         */
        Mat dilate_image = new Mat();
        Imgproc.dilate(canny_image, dilate_image, Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(
                MenuAndParams.dilationKernelSizeParam.value, MenuAndParams.dilationKernelSizeParam.value)));
        rubikFace.profiler.markTime(Profiler.Event.DILATION);
        canny_image.release();
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.DILATION) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            image.release();
            return annotation.drawAnnotation(dilate_image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Contour Generation 
         */
        List<MatOfPoint> contours = new LinkedList<MatOfPoint>();
        Mat heirarchy = new Mat();
        Imgproc.findContours(dilate_image, contours, heirarchy, Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE); // Note: tried other TC89 options, but no significant change or improvement on cpu time.
        rubikFace.profiler.markTime(Profiler.Event.CONTOUR);
        dilate_image.release();

        // Create gray scale image but in RGB format, and then added yellow colored contours on top.
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.CONTOUR) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            Mat gray_image = new Mat(imageSize, CvType.CV_8UC4);
            Mat rgba_gray_image = new Mat(imageSize, CvType.CV_8UC4);
            Imgproc.cvtColor(image, gray_image, Imgproc.COLOR_RGB2GRAY);
            Imgproc.cvtColor(gray_image, rgba_gray_image, Imgproc.COLOR_GRAY2BGRA, 3);
            Imgproc.drawContours(rgba_gray_image, contours, -1, ColorTileEnum.YELLOW.cvColor, 3);
            Core.putText(rgba_gray_image, "Num Contours: " + contours.size(), new Point(500, 50),
                    Constants.FontFace, 4, ColorTileEnum.RED.cvColor, 4);
            gray_image.release();
            image.release();
            return annotation.drawAnnotation(rgba_gray_image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Polygon Detection
         */
        List<Rhombus> polygonList = new LinkedList<Rhombus>();
        for (MatOfPoint contour : contours) {

            // Keep only counter clockwise contours.  A clockwise contour is reported as a negative number.
            double contourArea = Imgproc.contourArea(contour, true);
            if (contourArea < 0.0)
                continue;

            // Keep only reasonable area contours
            if (contourArea < MenuAndParams.minimumContourAreaParam.value)
                continue;

            // Floating, instead of Double, for some reason required for approximate polygon detection algorithm.
            MatOfPoint2f contour2f = new MatOfPoint2f();
            MatOfPoint2f polygone2f = new MatOfPoint2f();
            MatOfPoint polygon = new MatOfPoint();

            // Make a Polygon out of a contour with provide Epsilon accuracy parameter.
            // It uses the Douglas-Peucker algorithm http://en.wikipedia.org/wiki/Ramer-Douglas-Peucker_algorithm
            contour.convertTo(contour2f, CvType.CV_32FC2);
            Imgproc.approxPolyDP(contour2f, polygone2f, MenuAndParams.polygonEpsilonParam.value, // The maximum distance between the original curve and its approximation.
                    true); // Resulting polygon representation is "closed:" its first and last vertices are connected.
            polygone2f.convertTo(polygon, CvType.CV_32S);

            polygonList.add(new Rhombus(polygon));
        }

        rubikFace.profiler.markTime(Profiler.Event.POLYGON);

        // Create gray scale image but in RGB format, and then add yellow colored polygons on top.
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.POLYGON) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            Mat gray_image = new Mat(imageSize, CvType.CV_8UC4);
            Mat rgba_gray_image = new Mat(imageSize, CvType.CV_8UC4);
            Imgproc.cvtColor(image, gray_image, Imgproc.COLOR_RGB2GRAY);
            Imgproc.cvtColor(gray_image, rgba_gray_image, Imgproc.COLOR_GRAY2BGRA, 4);
            for (Rhombus polygon : polygonList)
                polygon.draw(rgba_gray_image, ColorTileEnum.YELLOW.cvColor);
            Core.putText(rgba_gray_image, "Num Polygons: " + polygonList.size(), new Point(500, 50),
                    Constants.FontFace, 3, ColorTileEnum.RED.cvColor, 4);
            return annotation.drawAnnotation(rgba_gray_image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Rhombus Tile Recognition
         * 
         * From polygon list, produces a list of suitable Parallelograms (Rhombi).
         */
        Log.i(Constants.TAG, String.format("Rhombus:   X    Y   Area   a-a  b-a a-l b-l gamma"));
        List<Rhombus> rhombusList = new LinkedList<Rhombus>();
        // Get only valid Rhombus(es) : actually parallelograms.
        for (Rhombus rhombus : polygonList) {
            rhombus.qualify();
            if (rhombus.status == Rhombus.StatusEnum.VALID)
                rhombusList.add(rhombus);
        }

        // Filtering w.r.t. Rhmobus set characteristics
        Rhombus.removedOutlierRhombi(rhombusList);

        rubikFace.profiler.markTime(Profiler.Event.RHOMBUS);

        // Create gray scale image but in RGB format, and then add yellow colored Rhombi (parallelograms) on top.
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.RHOMBUS) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            Mat gray_image = new Mat(imageSize, CvType.CV_8UC4);
            Mat rgba_gray_image = new Mat(imageSize, CvType.CV_8UC4);
            Imgproc.cvtColor(image, gray_image, Imgproc.COLOR_RGB2GRAY);
            Imgproc.cvtColor(gray_image, rgba_gray_image, Imgproc.COLOR_GRAY2BGRA, 4);
            for (Rhombus rhombus : rhombusList)
                rhombus.draw(rgba_gray_image, ColorTileEnum.YELLOW.cvColor);
            Core.putText(rgba_gray_image, "Num Rhombus: " + rhombusList.size(), new Point(500, 50),
                    Constants.FontFace, 4, ColorTileEnum.RED.cvColor, 4);
            gray_image.release();
            image.release();
            return annotation.drawAnnotation(rgba_gray_image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Face Recognition
         * 
         * Takes a collection of Rhombus objects and determines if a valid
         * Rubik Face can be determined from them, and then also determines 
         * initial color for all nine tiles. 
         */
        rubikFace.processRhombuses(rhombusList, image);
        Log.i(Constants.TAG, "Face Solution = " + rubikFace.faceRecognitionStatus);
        rubikFace.profiler.markTime(Profiler.Event.FACE);
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.FACE_DETECT) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            return annotation.drawAnnotation(image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Cube Pose Estimation
         * 
         * Reconstruct the Rubik Cube 3D location and orientation in GL space coordinates.
         */
        if (rubikFace.faceRecognitionStatus == FaceRecognitionStatusEnum.SOLVED) {

            // Obtain Cube Pose from Face Grid information.
            stateModel.cubePose = CubePoseEstimator.poseEstimation(rubikFace, image, stateModel);

            // Process measurement update on Kalman Filter (if it exists).
            KalmanFilter kalmanFilter = stateModel.kalmanFilter;
            if (kalmanFilter != null)
                kalmanFilter.measurementUpdate(stateModel.cubePose, System.currentTimeMillis());

            // Process measurement update on Kalman Filter ALSM (if it exists).
            KalmanFilterALSM kalmanFilterALSM = stateModel.kalmanFilterALSM;
            if (kalmanFilter != null)
                kalmanFilterALSM.measurementUpdate(stateModel.cubePose, System.currentTimeMillis());
        } else {
            stateModel.cubePose = null;
        }
        rubikFace.profiler.markTime(Profiler.Event.POSE);

        /* **********************************************************************
         * **********************************************************************
         * Application State Machine
         * 
         * Will provide user instructions.
         * Will determine when we are on-face and off-face
         * Will determine when we are on-new-face
         * Will change state 
         */
        appStateMachine.onFaceEvent(rubikFace);
        rubikFace.profiler.markTime(Profiler.Event.CONTROLLER);
        rubikFace.profiler.markTime(Profiler.Event.TOTAL);

        // Normal return point.
        stateModel.activeRubikFace = rubikFace;
        return annotation.drawAnnotation(image);

        // =+= Issue: how to get stdio to print as error and not warning in logcat?
    } catch (CvException e) {
        Log.e(Constants.TAG, "CvException: " + e.getMessage());
        e.printStackTrace();
        errorImage = new Mat(imageSize, CvType.CV_8UC4);
        Core.putText(errorImage, "CvException: " + e.getMessage(), new Point(50, 50), Constants.FontFace, 2,
                ColorTileEnum.WHITE.cvColor, 2);
        int i = 1;
        for (StackTraceElement element : e.getStackTrace())
            Core.putText(errorImage, element.toString(), new Point(50, 50 + 50 * i++), Constants.FontFace, 2,
                    ColorTileEnum.WHITE.cvColor, 2);
    } catch (Exception e) {
        Log.e(Constants.TAG, "Exception: " + e.getMessage());
        e.printStackTrace();
        errorImage = new Mat(imageSize, CvType.CV_8UC4);
        Core.putText(errorImage, "Exception: " + e.getMessage(), new Point(50, 50), Constants.FontFace, 2,
                ColorTileEnum.WHITE.cvColor, 2);
        int i = 1;
        for (StackTraceElement element : e.getStackTrace())
            Core.putText(errorImage, element.toString(), new Point(50, 50 + 50 * i++), Constants.FontFace, 2,
                    ColorTileEnum.WHITE.cvColor, 2);
    } catch (Error e) {
        Log.e(Constants.TAG, "Error: " + e.getMessage());
        e.printStackTrace();
        errorImage = new Mat(imageSize, CvType.CV_8UC4);
        Core.putText(errorImage, "Error: " + e.getMessage(), new Point(50, 50), Constants.FontFace, 2,
                ColorTileEnum.WHITE.cvColor, 2);
        int i = 1;
        for (StackTraceElement element : e.getStackTrace())
            Core.putText(errorImage, element.toString(), new Point(50, 50 + 50 * i++), Constants.FontFace, 2,
                    ColorTileEnum.WHITE.cvColor, 2);
    }

    return annotation.drawAnnotation(image);
}