Example usage for org.opencv.core Mat put

List of usage examples for org.opencv.core Mat put

Introduction

In this page you can find the example usage for org.opencv.core Mat put.

Prototype

public int put(int row, int col, byte[] data) 

Source Link

Usage

From source file:org.akvo.caddisfly.sensor.colorimetry.strip.calibration.CalibrationCard.java

License:Open Source License

@NonNull
private static Mat doIlluminationCorrection(@NonNull Mat imgLab, @NonNull CalibrationData calData) {
    // create HLS image for homogeneous illumination calibration
    int pHeight = imgLab.rows();
    int pWidth = imgLab.cols();

    RealMatrix points = createWhitePointMatrix(imgLab, calData);

    // create coefficient matrix for all three variables L,A,B
    // the model for all three is y = ax + bx^2 + cy + dy^2 + exy + f
    // 6th row is the constant 1
    RealMatrix coefficient = new Array2DRowRealMatrix(points.getRowDimension(), 6);
    coefficient.setColumnMatrix(0, points.getColumnMatrix(0));
    coefficient.setColumnMatrix(2, points.getColumnMatrix(1));

    //create constant, x^2, y^2 and xy terms
    for (int i = 0; i < points.getRowDimension(); i++) {
        coefficient.setEntry(i, 1, Math.pow(coefficient.getEntry(i, 0), 2)); // x^2
        coefficient.setEntry(i, 3, Math.pow(coefficient.getEntry(i, 2), 2)); // y^2
        coefficient.setEntry(i, 4, coefficient.getEntry(i, 0) * coefficient.getEntry(i, 2)); // xy
        coefficient.setEntry(i, 5, 1d); // constant = 1
    }//  www. ja v a2 s .c o m

    // create vectors
    RealVector L = points.getColumnVector(2);
    RealVector A = points.getColumnVector(3);
    RealVector B = points.getColumnVector(4);

    // solve the least squares problem for all three variables
    DecompositionSolver solver = new SingularValueDecomposition(coefficient).getSolver();
    RealVector solutionL = solver.solve(L);
    RealVector solutionA = solver.solve(A);
    RealVector solutionB = solver.solve(B);

    // get individual coefficients
    float La = (float) solutionL.getEntry(0);
    float Lb = (float) solutionL.getEntry(1);
    float Lc = (float) solutionL.getEntry(2);
    float Ld = (float) solutionL.getEntry(3);
    float Le = (float) solutionL.getEntry(4);
    float Lf = (float) solutionL.getEntry(5);

    float Aa = (float) solutionA.getEntry(0);
    float Ab = (float) solutionA.getEntry(1);
    float Ac = (float) solutionA.getEntry(2);
    float Ad = (float) solutionA.getEntry(3);
    float Ae = (float) solutionA.getEntry(4);
    float Af = (float) solutionA.getEntry(5);

    float Ba = (float) solutionB.getEntry(0);
    float Bb = (float) solutionB.getEntry(1);
    float Bc = (float) solutionB.getEntry(2);
    float Bd = (float) solutionB.getEntry(3);
    float Be = (float) solutionB.getEntry(4);
    float Bf = (float) solutionB.getEntry(5);

    // compute mean (the luminosity value of the plane in the middle of the image)
    float L_mean = (float) (0.5 * La * pWidth + 0.5 * Lc * pHeight + Lb * pWidth * pWidth / 3.0
            + Ld * pHeight * pHeight / 3.0 + Le * 0.25 * pHeight * pWidth + Lf);
    float A_mean = (float) (0.5 * Aa * pWidth + 0.5 * Ac * pHeight + Ab * pWidth * pWidth / 3.0
            + Ad * pHeight * pHeight / 3.0 + Ae * 0.25 * pHeight * pWidth + Af);
    float B_mean = (float) (0.5 * Ba * pWidth + 0.5 * Bc * pHeight + Bb * pWidth * pWidth / 3.0
            + Bd * pHeight * pHeight / 3.0 + Be * 0.25 * pHeight * pWidth + Bf);

    // Correct image
    // we do this per row. We tried to do it in one block, but there is no speed difference.
    byte[] temp = new byte[imgLab.cols() * imgLab.channels()];
    int valL, valA, valB;
    int ii, ii3;
    float iiSq, iSq;
    int imgCols = imgLab.cols();
    int imgRows = imgLab.rows();

    // use lookup tables to speed up computation
    // create lookup tables
    float[] L_aii = new float[imgCols];
    float[] L_biiSq = new float[imgCols];
    float[] A_aii = new float[imgCols];
    float[] A_biiSq = new float[imgCols];
    float[] B_aii = new float[imgCols];
    float[] B_biiSq = new float[imgCols];

    float[] Lci = new float[imgRows];
    float[] LdiSq = new float[imgRows];
    float[] Aci = new float[imgRows];
    float[] AdiSq = new float[imgRows];
    float[] Bci = new float[imgRows];
    float[] BdiSq = new float[imgRows];

    for (ii = 0; ii < imgCols; ii++) {
        iiSq = ii * ii;
        L_aii[ii] = La * ii;
        L_biiSq[ii] = Lb * iiSq;
        A_aii[ii] = Aa * ii;
        A_biiSq[ii] = Ab * iiSq;
        B_aii[ii] = Ba * ii;
        B_biiSq[ii] = Bb * iiSq;
    }

    for (int i = 0; i < imgRows; i++) {
        iSq = i * i;
        Lci[i] = Lc * i;
        LdiSq[i] = Ld * iSq;
        Aci[i] = Ac * i;
        AdiSq[i] = Ad * iSq;
        Bci[i] = Bc * i;
        BdiSq[i] = Bd * iSq;
    }

    // We can also improve the performance of the i,ii term, if we want, but it won't make much difference.
    for (int i = 0; i < imgRows; i++) { // y
        imgLab.get(i, 0, temp);
        ii3 = 0;
        for (ii = 0; ii < imgCols; ii++) { //x
            valL = capValue(
                    Math.round((temp[ii3] & 0xFF)
                            - (L_aii[ii] + L_biiSq[ii] + Lci[i] + LdiSq[i] + Le * i * ii + Lf) + L_mean),
                    0, 255);
            valA = capValue(
                    Math.round((temp[ii3 + 1] & 0xFF)
                            - (A_aii[ii] + A_biiSq[ii] + Aci[i] + AdiSq[i] + Ae * i * ii + Af) + A_mean),
                    0, 255);
            valB = capValue(
                    Math.round((temp[ii3 + 2] & 0xFF)
                            - (B_aii[ii] + B_biiSq[ii] + Bci[i] + BdiSq[i] + Be * i * ii + Bf) + B_mean),
                    0, 255);

            temp[ii3] = (byte) valL;
            temp[ii3 + 1] = (byte) valA;
            temp[ii3 + 2] = (byte) valB;
            ii3 += 3;
        }
        imgLab.put(i, 0, temp);
    }

    return imgLab;
}

From source file:org.akvo.caddisfly.sensor.colorimetry.strip.calibration.CalibrationCard.java

License:Open Source License

@NonNull
private static Mat do1D_3DCorrection(@NonNull Mat imgMat, @Nullable CalibrationData calData)
        throws CalibrationException {

    if (calData == null) {
        throw new CalibrationException("no calibration data.");
    }/*from  ww  w  .j  av a  2  s  .  c  om*/

    final WeightedObservedPoints obsL = new WeightedObservedPoints();
    final WeightedObservedPoints obsA = new WeightedObservedPoints();
    final WeightedObservedPoints obsB = new WeightedObservedPoints();

    Map<String, double[]> calResultIllumination = new HashMap<>();
    // iterate over all patches
    try {
        for (String label : calData.getCalValues().keySet()) {
            CalibrationData.CalValue cal = calData.getCalValues().get(label);
            CalibrationData.Location loc = calData.getLocations().get(label);
            float[] LAB_color = measurePatch(imgMat, loc.x, loc.y, calData); // measure patch color
            obsL.add(LAB_color[0], cal.getL());
            obsA.add(LAB_color[1], cal.getA());
            obsB.add(LAB_color[2], cal.getB());
            calResultIllumination.put(label, new double[] { LAB_color[0], LAB_color[1], LAB_color[2] });
        }
    } catch (Exception e) {
        throw new CalibrationException("1D calibration: error iterating over all patches.", e);
    }

    // Instantiate a second-degree polynomial fitter.
    final PolynomialCurveFitter fitter = PolynomialCurveFitter.create(2);

    // Retrieve fitted parameters (coefficients of the polynomial function).
    // order of coefficients is (c + bx + ax^2), so [c,b,a]
    try {
        final double[] coefficientL = fitter.fit(obsL.toList());
        final double[] coefficientA = fitter.fit(obsA.toList());
        final double[] coefficientB = fitter.fit(obsB.toList());

        double[] valIllumination;
        double L_orig, A_orig, B_orig, L_new, A_new, B_new;

        // transform patch values using the 1d calibration results
        Map<String, double[]> calResult1D = new HashMap<>();
        for (String label : calData.getCalValues().keySet()) {
            valIllumination = calResultIllumination.get(label);

            L_orig = valIllumination[0];
            A_orig = valIllumination[1];
            B_orig = valIllumination[2];

            L_new = coefficientL[2] * L_orig * L_orig + coefficientL[1] * L_orig + coefficientL[0];
            A_new = coefficientA[2] * A_orig * A_orig + coefficientA[1] * A_orig + coefficientA[0];
            B_new = coefficientB[2] * B_orig * B_orig + coefficientB[1] * B_orig + coefficientB[0];

            calResult1D.put(label, new double[] { L_new, A_new, B_new });
        }

        // use the 1D calibration result for the second calibration step
        // Following http://docs.scipy.org/doc/scipy/reference/tutorial/linalg.html#solving-linear-least-squares-problems-and-pseudo-inverses
        // we will solve P = M x
        int total = calData.getLocations().keySet().size();
        RealMatrix coefficient = new Array2DRowRealMatrix(total, 3);
        RealMatrix cal = new Array2DRowRealMatrix(total, 3);
        int index = 0;

        // create coefficient and calibration vectors
        for (String label : calData.getCalValues().keySet()) {
            CalibrationData.CalValue calv = calData.getCalValues().get(label);
            double[] cal1dResult = calResult1D.get(label);
            coefficient.setEntry(index, 0, cal1dResult[0]);
            coefficient.setEntry(index, 1, cal1dResult[1]);
            coefficient.setEntry(index, 2, cal1dResult[2]);

            cal.setEntry(index, 0, calv.getL());
            cal.setEntry(index, 1, calv.getA());
            cal.setEntry(index, 2, calv.getB());
            index++;
        }

        DecompositionSolver solver = new SingularValueDecomposition(coefficient).getSolver();
        RealMatrix sol = solver.solve(cal);

        float a_L, b_L, c_L, a_A, b_A, c_A, a_B, b_B, c_B;
        a_L = (float) sol.getEntry(0, 0);
        b_L = (float) sol.getEntry(1, 0);
        c_L = (float) sol.getEntry(2, 0);
        a_A = (float) sol.getEntry(0, 1);
        b_A = (float) sol.getEntry(1, 1);
        c_A = (float) sol.getEntry(2, 1);
        a_B = (float) sol.getEntry(0, 2);
        b_B = (float) sol.getEntry(1, 2);
        c_B = (float) sol.getEntry(2, 2);

        //use the solution to correct the image
        double L_temp, A_temp, B_temp, L_mid, A_mid, B_mid;
        int L_fin, A_fin, B_fin;
        int ii3;
        byte[] temp = new byte[imgMat.cols() * imgMat.channels()];
        for (int i = 0; i < imgMat.rows(); i++) { // y
            imgMat.get(i, 0, temp);
            ii3 = 0;
            for (int ii = 0; ii < imgMat.cols(); ii++) { //x
                L_temp = temp[ii3] & 0xFF;
                A_temp = temp[ii3 + 1] & 0xFF;
                B_temp = temp[ii3 + 2] & 0xFF;

                L_mid = coefficientL[2] * L_temp * L_temp + coefficientL[1] * L_temp + coefficientL[0];
                A_mid = coefficientA[2] * A_temp * A_temp + coefficientA[1] * A_temp + coefficientA[0];
                B_mid = coefficientB[2] * B_temp * B_temp + coefficientB[1] * B_temp + coefficientB[0];

                L_fin = (int) Math.round(a_L * L_mid + b_L * A_mid + c_L * B_mid);
                A_fin = (int) Math.round(a_A * L_mid + b_A * A_mid + c_A * B_mid);
                B_fin = (int) Math.round(a_B * L_mid + b_B * A_mid + c_B * B_mid);

                // cap values
                L_fin = capValue(L_fin, 0, 255);
                A_fin = capValue(A_fin, 0, 255);
                B_fin = capValue(B_fin, 0, 255);

                temp[ii3] = (byte) L_fin;
                temp[ii3 + 1] = (byte) A_fin;
                temp[ii3 + 2] = (byte) B_fin;

                ii3 += 3;
            }
            imgMat.put(i, 0, temp);
        }

        return imgMat;
    } catch (Exception e) {
        throw new CalibrationException("error while performing calibration: ", e);
    }
}

From source file:org.akvo.caddisfly.sensor.colorimetry.strip.calibration.CalibrationCard.java

License:Open Source License

private static void addPatch(@NonNull Mat imgMat, Double x, Double y, @NonNull CalibrationData calData,
        String label) {/*  w w w.  j a va2 s.c  om*/

    CalibrationData.CalValue calValue = calData.getCalValues().get(label);
    calData.hSizePixel = imgMat.cols();
    double hPixels = calData.hSizePixel / calData.hSize; // pixel per mm
    calData.vSizePixel = imgMat.rows();
    double vPixels = calData.vSizePixel / calData.vSize; // pixel per mm

    int xp = (int) Math.round(x * hPixels);
    int yp = (int) Math.round(y * vPixels);
    int dp = (int) Math.round(calData.getPatchSize() * hPixels * 0.150);
    for (int i = -dp; i <= dp; i++) {
        for (int ii = -dp; ii <= dp; ii++) {
            byte[] col = new byte[3];
            col[0] = (byte) Math.round(calValue.getL());
            col[1] = (byte) Math.round(calValue.getA());
            col[2] = (byte) Math.round(calValue.getB());
            imgMat.put(yp + i, xp + ii, col);
        }
    }
}

From source file:org.akvo.caddisfly.sensor.colorimetry.strip.detect.DetectStripTask.java

License:Open Source License

private Mat makeLab(byte[] data) {
    if (format == ImageFormat.NV21) {
        //convert preview data to Mat object in CIELab format
        Mat rgb = new Mat(height, width, CvType.CV_8UC3);
        Mat labImg = new Mat(height, width, CvType.CV_8UC3);
        Mat previewMat = new Mat(height + height / 2, width, CvType.CV_8UC1);
        previewMat.put(0, 0, data);
        Imgproc.cvtColor(previewMat, rgb, Imgproc.COLOR_YUV2RGB_NV21, rgb.channels());
        Imgproc.cvtColor(rgb, labImg, Imgproc.COLOR_RGB2Lab, rgb.channels());

        return labImg;
    }//w w  w.ja  va2 s .c om
    return null;
}

From source file:org.akvo.caddisfly.sensor.colorimetry.strip.util.ResultUtil.java

License:Open Source License

public static Mat getMatFromFile(Context context, int patchId) {

    String fileName = null;//www  . j  av a  2  s  .  c  o m

    try {
        // Get the correct file from the patch id
        String json = FileUtil.readFromInternalStorage(context, Constant.IMAGE_PATCH);
        JSONArray imagePatchArray = new JSONArray(json);
        for (int i = 0; i < imagePatchArray.length(); i++) {
            JSONArray array = imagePatchArray.getJSONArray(i);
            if (array.getInt(1) == patchId) {
                fileName = Constant.STRIP + array.getInt(0);
                break;
            }
        }
    } catch (Exception e) {
        Timber.e(e);
    }

    //if in DetectStripTask, no strip was found, an image was saved with the String Constant.ERROR
    if (FileUtil.fileExists(context, fileName + Constant.ERROR)) {
        fileName += Constant.ERROR;
    }

    // read the Mat object from internal storage
    byte[] data;
    try {
        data = FileUtil.readByteArray(context, fileName);

        if (data != null) {
            // determine cols and rows dimensions
            byte[] rows = new byte[4];
            byte[] cols = new byte[4];

            int length = data.length;
            System.arraycopy(data, length - 8, rows, 0, 4);
            System.arraycopy(data, length - 4, cols, 0, 4);

            int rowsNum = FileUtil.byteArrayToLeInt(rows);
            int colsNum = FileUtil.byteArrayToLeInt(cols);

            // remove last part
            byte[] imgData = Arrays.copyOfRange(data, 0, data.length - 8);

            // reserve Mat of proper size:
            Mat result = new Mat(rowsNum, colsNum, CvType.CV_8UC3);

            // put image data back in Mat:
            result.put(0, 0, imgData);
            return result;
        }
    } catch (IOException e) {
        Timber.e(e);
    }
    return null;
}

From source file:org.ar.rubik.CameraCalibration.java

License:Open Source License

/**
 * Get OpenCV Camera Matrix// w ww .j  a v a 2  s  . com
 * 
 * This matrix represents the intrinsic properties of the camera.
 * Matrix is basically:
 * 
 *    |   Fx    0    Cx  |
 *    |    0   Fy    Cy  |
 *    |    0    0     1  |
 *    
 *    Fx := X Focal Length
 *    Fy := Y Focal Length
 *    Cx := X Optical Center
 *    Cy := Y Optical Center
 * 
 * 
 * @return
 */
public Mat getOpenCVCameraMatrix(int width, int height) {

    Log.v(Constants.TAG_CAL, "CameraCalibration.getOpenCVMatrix(): width=" + width + " height=" + height);

    double focalLengthXPixels = width / (2.0 * Math.tan(0.5 * fovX));
    double focalLengthYPixels = height / (2.0 * Math.tan(0.5 * fovY));

    Mat cameraMatrix = new Mat(3, 3, CvType.CV_64FC1);
    cameraMatrix.put(0, 0, focalLengthXPixels); // should be X focal length in pixels.
    cameraMatrix.put(0, 1, 0.0);
    cameraMatrix.put(0, 2, width / 2.0);
    cameraMatrix.put(1, 0, 0.0);
    cameraMatrix.put(1, 1, focalLengthYPixels); // should be Y focal length in pixels.
    cameraMatrix.put(1, 2, height / 2.0);
    cameraMatrix.put(2, 0, 0.0);
    cameraMatrix.put(2, 1, 0.0);
    cameraMatrix.put(2, 2, 1.0);

    Log.v(Constants.TAG_CAL, "Android Camera Calibration Matrix: ");
    Log.v(Constants.TAG_CAL, cameraMatrix.dump());

    //       =+= From Android Camera Calibration App at resolution 1920 x 1080
    //       cameraMatrix.put(0, 0, 1686.1);
    //       cameraMatrix.put(0, 1, 0.0);
    //       cameraMatrix.put(0, 2, 959.5);
    //       cameraMatrix.put(1, 0, 0.0);
    //       cameraMatrix.put(1, 1, 1686.1);
    //       cameraMatrix.put(1, 2, 539.5);
    //       cameraMatrix.put(2, 0, 0.0);
    //       cameraMatrix.put(2, 1, 0.0);
    //       cameraMatrix.put(2, 2, 1.0);
    //
    //       Log.v(Constants.TAG_CAL, "Camera Calibration App Matrix: ");
    //       Log.v(Constants.TAG_CAL, cameraMatrix.dump());

    return cameraMatrix;
}

From source file:org.ar.rubik.CubePoseEstimator.java

License:Open Source License

/**
 * Pose Estimation//from  w w  w .java 2  s . co m
 * 
 * Deduce real world cube coordinates and rotation
 * 
 * @param rubikFace
 * @param image 
 * @param stateModel 
 * @return 
 */
public static CubePose poseEstimation(RubikFace rubikFace, Mat image, StateModel stateModel) {

    if (rubikFace == null)
        return null;

    if (rubikFace.faceRecognitionStatus != FaceRecognitionStatusEnum.SOLVED)
        return null;

    LeastMeansSquare lmsResult = rubikFace.lmsResult;

    if (lmsResult == null)
        return null;

    // OpenCV Pose Estimate requires at least four points.
    if (rubikFace.rhombusList.size() <= 4)
        return null;

    if (cameraMatrix == null) {
        cameraMatrix = stateModel.cameraCalibration.getOpenCVCameraMatrix((int) (image.size().width),
                (int) (image.size().height));
        distCoeffs = new MatOfDouble(stateModel.cameraCalibration.getDistortionCoefficients());
    }

    /*
     * For the purposes of external camera calibration: i.e., where the cube is 
     * located in camera coordinates, we define the geometry of the face of a
     * cube composed of nine 3D locations each representing the center of each tile.
     * Correspondence between these points and nine 2D points from the actual
     * camera image, along with camera calibration data, are using to calculate
     * the Pose of the Cube (i.e. "Cube Pose").
     * 
     * The geometry of the cube here is defined as having center at {0,0,0},
     * and edge size of 2 units (i.e., +/- 1.0).
     */

    // List of real world point and screen points that correspond.
    List<Point3> objectPointsList = new ArrayList<Point3>(9);
    List<Point> imagePointsList = new ArrayList<Point>(9);

    // Create list of image (in 2D) and object (in 3D) points.
    // Loop over Rubik Face Tiles
    for (int n = 0; n < 3; n++) {
        for (int m = 0; m < 3; m++) {

            Rhombus rhombus = rubikFace.faceRhombusArray[n][m];

            // Only use if Rhombus was non null.
            if (rhombus != null) {

                // Obtain center of Rhombus in screen image coordinates
                // Convention:
                //  o X is zero on the left, and increases to the right.
                //  o Y is zero on the top and increases downward.
                Point imagePoint = new Point(rhombus.center.x, rhombus.center.y);
                imagePointsList.add(imagePoint);

                // N and M are actual not conceptual (as in design doc).
                int mm = 2 - n;
                int nn = 2 - m;
                // above now matches design doc.
                // that is:
                //  o the nn vector is to the right and upwards.
                //  o the mm vector is to the left and upwards.

                // Calculate center of Tile in OpenCV World Space Coordinates
                // Convention:
                //  o X is zero in the center, and increases to the left.
                //  o Y is zero in the center and increases downward.
                //  o Z is zero (at the world coordinate origin) and increase away for the camera.
                float x = (1 - mm) * 0.66666f;
                float y = -1.0f;
                float z = -1.0f * (1 - nn) * 0.666666f;
                Point3 objectPoint = new Point3(x, y, z);
                objectPointsList.add(objectPoint);
            }
        }
    }

    // Cast image point list into OpenCV Matrix.
    MatOfPoint2f imagePoints = new MatOfPoint2f();
    imagePoints.fromList(imagePointsList);

    // Cast object point list into OpenCV Matrix.
    MatOfPoint3f objectPoints = new MatOfPoint3f();
    objectPoints.fromList(objectPointsList);

    Mat rvec = new Mat();
    Mat tvec = new Mat();

    //      Log.e(Constants.TAG, "Image Points: " + imagePoints.dump());
    //      Log.e(Constants.TAG, "Object Points: " + objectPoints.dump());

    //      =+= sometimes a "count >= 4" exception 
    Calib3d.solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec);

    Log.v(Constants.TAG, String.format("Open CV Rotation Vector x=%4.2f y=%4.2f z=%4.2f", rvec.get(0, 0)[0],
            rvec.get(1, 0)[0], rvec.get(2, 0)[0]));

    // Convert from OpenCV to OpenGL World Coordinates
    float x = +1.0f * (float) tvec.get(0, 0)[0];
    float y = -1.0f * (float) tvec.get(1, 0)[0];
    float z = -1.0f * (float) tvec.get(2, 0)[0];

    //        // =+= Add manual offset correction to translation  
    //        x += MenuAndParams.xTranslationOffsetParam.value;
    //        y += MenuAndParams.yTranslationOffsetParam.value;
    //        z += MenuAndParams.zTranslationOffsetParam.value;      

    // Convert Rotation Vector from OpenCL polarity axes definition to OpenGL definition
    // Note, polarity of x-axis is OK, no need to invert.
    rvec.put(1, 0, -1.0f * rvec.get(1, 0)[0]); // y-axis
    rvec.put(2, 0, -1.0f * rvec.get(2, 0)[0]); // z-axis

    //        // =+= Add manual offset correction to Rotation
    //        rvec.put(0, 0, rvec.get(0, 0)[0] + MenuAndParams.xRotationOffsetParam.value * Math.PI / 180.0);  // X rotation
    //        rvec.put(1, 0, rvec.get(1, 0)[0] + MenuAndParams.yRotationOffsetParam.value * Math.PI / 180.0);  // Y rotation
    //        rvec.put(2, 0, rvec.get(2, 0)[0] + MenuAndParams.zRotationOffsetParam.value * Math.PI / 180.0);  // Z rotation

    // Package up as CubePose object
    CubePose cubePose = new CubePose();
    cubePose.x = x;
    cubePose.y = y;
    cubePose.z = z;
    cubePose.xRotation = rvec.get(0, 0)[0];
    cubePose.yRotation = rvec.get(1, 0)[0];
    cubePose.zRotation = rvec.get(2, 0)[0];

    //      Log.e(Constants.TAG, "Result: " + result);
    //      Log.e(Constants.TAG, "Camera: " + cameraMatrix.dump());
    //      Log.e(Constants.TAG, "Rotation: " + rvec.dump());
    //      Log.e(Constants.TAG, "Translation: " + tvec.dump());

    //      // Reporting in OpenGL World Coordinates
    //      Core.rectangle(image, new Point(0, 50), new Point(1270, 150), Constants.ColorBlack, -1);
    //      Core.putText(image, String.format("Translation  x=%4.2f y=%4.2f z=%4.2f", x, y, z), new Point(50, 100), Constants.FontFace, 3, Constants.ColorWhite, 3);
    //      Core.putText(image, String.format("Rotation     x=%4.0f y=%4.0f z=%4.0f", cubeXrotation, cubeYrotation, cubeZrotation), new Point(50, 150), Constants.FontFace, 3, Constants.ColorWhite, 3);

    Log.v(Constants.TAG, "Cube Pose: " + cubePose);

    return cubePose;
}

From source file:org.ar.rubik.CubeReconstructor.java

License:Open Source License

/**
 * Pose Estimation//from   www  .j ava  2  s.  c  o  m
 * 
 * Deduce real world cube coordinates and rotation
 * 
 * @param rubikFace
 * @param image 
 * @param stateModel 
 */
public void poseEstimation(RubikFace rubikFace, Mat image, StateModel stateModel) {

    if (rubikFace == null)
        return;

    if (rubikFace.faceRecognitionStatus != FaceRecognitionStatusEnum.SOLVED)
        return;

    LeastMeansSquare lmsResult = rubikFace.lmsResult;

    if (lmsResult == null)
        return;

    // OpenCV Pose Estimate requires at least four points.
    if (rubikFace.rhombusList.size() <= 4)
        return;

    // List of real world point and screen points that correspond.
    List<Point3> objectPointsList = new ArrayList<Point3>(9);
    List<Point> imagePointsList = new ArrayList<Point>(9);

    // Create list of image (in 2D) and object (in 3D) points.
    // Loop over Rubik Face Tiles/Rhombi
    for (int n = 0; n < 3; n++) {
        for (int m = 0; m < 3; m++) {

            Rhombus rhombus = rubikFace.faceRhombusArray[n][m];

            // Only use if Rhombus was non null.
            if (rhombus != null) {

                // Obtain center of Rhombus in screen image coordinates
                // Convention:
                //  o X is zero on the left, and increases to the right.
                //  o Y is zero on the top and increases downward.
                Point imagePoint = new Point(rhombus.center.x, rhombus.center.y);
                imagePointsList.add(imagePoint);

                // N and M are actual not conceptual (as in design doc).
                int mm = 2 - n;
                int nn = 2 - m;
                // above now matches design doc.
                // that is:
                //  o the nn vector is to the right and upwards.
                //  o the mm vector is to the left and upwards.

                // Calculate center of Tile in OpenCV World Space Coordinates
                // Convention:
                //  o X is zero in the center, and increases to the left.
                //  o Y is zero in the center and increases downward.
                //  o Z is zero (at the world coordinate origin) and increase away for the camera.
                float x = (1 - mm) * 0.66666f;
                float y = -1.0f;
                float z = -1.0f * (1 - nn) * 0.666666f;
                Point3 objectPoint = new Point3(x, y, z);
                objectPointsList.add(objectPoint);
            }
        }
    }

    // Cast image point list into OpenCV Matrix.
    MatOfPoint2f imagePoints = new MatOfPoint2f();
    imagePoints.fromList(imagePointsList);

    // Cast object point list into OpenCV Matrix.
    MatOfPoint3f objectPoints = new MatOfPoint3f();
    objectPoints.fromList(objectPointsList);

    Mat cameraMatrix = stateModel.cameraParameters.getOpenCVCameraMatrix();
    MatOfDouble distCoeffs = new MatOfDouble();
    Mat rvec = new Mat();
    Mat tvec = new Mat();

    //      Log.e(Constants.TAG, "Image Points: " + imagePoints.dump());
    //      Log.e(Constants.TAG, "Object Points: " + objectPoints.dump());

    //      boolean result = 
    Calib3d.solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec);

    Log.v(Constants.TAG, String.format("Open CV Rotation Vector x=%4.2f y=%4.2f z=%4.2f", rvec.get(0, 0)[0],
            rvec.get(1, 0)[0], rvec.get(2, 0)[0]));

    // Convert from OpenCV to OpenGL World Coordinates
    x = +1.0f * (float) tvec.get(0, 0)[0];
    y = -1.0f * (float) tvec.get(1, 0)[0];
    z = -1.0f * (float) tvec.get(2, 0)[0];

    // =+= Add manual offset correction to translation  
    x += MenuAndParams.xTranslationOffsetParam.value;
    y += MenuAndParams.yTranslationOffsetParam.value;
    z += MenuAndParams.zTranslationOffsetParam.value;

    // Convert Rotation Vector from OpenCL polarity axes definition to OpenGL definition
    rvec.put(1, 0, -1.0f * rvec.get(1, 0)[0]);
    rvec.put(2, 0, -1.0f * rvec.get(2, 0)[0]);

    // =+= Add manual offset correction to Rotation
    rvec.put(0, 0, rvec.get(0, 0)[0] + MenuAndParams.xRotationOffsetParam.value * Math.PI / 180.0); // X rotation
    rvec.put(1, 0, rvec.get(1, 0)[0] + MenuAndParams.yRotationOffsetParam.value * Math.PI / 180.0); // Y rotation
    rvec.put(2, 0, rvec.get(2, 0)[0] + MenuAndParams.zRotationOffsetParam.value * Math.PI / 180.0); // Z rotation

    // Create an OpenCV Rotation Matrix from a Rotation Vector
    Mat rMatrix = new Mat(4, 4, CvType.CV_32FC2);
    Calib3d.Rodrigues(rvec, rMatrix);
    Log.v(Constants.TAG, "Rodrigues Matrix: " + rMatrix.dump());

    /*
     * Create an OpenGL Rotation Matrix
     * Notes:
     *   o  OpenGL is in column-row order (correct?).
     *   o  OpenCV Rodrigues Rotation Matrix is 3x3 where OpenGL Rotation Matrix is 4x4.
     */

    // Initialize all Rotational Matrix elements to zero.
    for (int i = 0; i < 16; i++)
        rotationMatrix[i] = 0.0f; // Initialize to zero

    // Initialize element [3,3] to 1.0: i.e., "w" component in homogenous coordinates
    rotationMatrix[3 * 4 + 3] = 1.0f;

    // Copy OpenCV matrix to OpenGL matrix element by element.
    for (int r = 0; r < 3; r++)
        for (int c = 0; c < 3; c++)
            rotationMatrix[r + c * 4] = (float) (rMatrix.get(r, c)[0]);

    // Diagnostics
    for (int r = 0; r < 4; r++)
        Log.v(Constants.TAG, String.format("Rotation Matrix  r=%d  [%5.2f  %5.2f  %5.2f  %5.2f]", r,
                rotationMatrix[r + 0], rotationMatrix[r + 4], rotationMatrix[r + 8], rotationMatrix[r + 12]));

    //      Log.e(Constants.TAG, "Result: " + result);
    //      Log.e(Constants.TAG, "Camera: " + cameraMatrix.dump());
    //      Log.e(Constants.TAG, "Rotation: " + rvec.dump());
    //      Log.e(Constants.TAG, "Translation: " + tvec.dump());

    //      // Reporting in OpenGL World Coordinates
    //      Core.rectangle(image, new Point(0, 50), new Point(1270, 150), Constants.ColorBlack, -1);
    //      Core.putText(image, String.format("Translation  x=%4.2f y=%4.2f z=%4.2f", x, y, z), new Point(50, 100), Constants.FontFace, 3, Constants.ColorWhite, 3);
    //      Core.putText(image, String.format("Rotation     x=%4.0f y=%4.0f z=%4.0f", cubeXrotation, cubeYrotation, cubeZrotation), new Point(50, 150), Constants.FontFace, 3, Constants.ColorWhite, 3);
}

From source file:org.ar.rubik.gl.GLRenderer.java

License:Open Source License

/**
 * Compute Pose Rotation Matrix and return it.
 * /*  w  w  w .  j  a  va  2 s  .co  m*/
 * @param cubePose
 * @return Pose Rotation Matrix
 */
private float[] computePoseRotationMatrix(CubePose cubePose) {

    // Rotational matrix suitable for consumption by OpenGL
    float[] poseRotationMatrix = new float[16];

    // Recreate Open CV matrix for processing by Rodriques algorithm.
    Mat rvec = new Mat(3, 1, CvType.CV_64FC1);
    rvec.put(0, 0, new double[] { cubePose.xRotation });
    rvec.put(1, 0, new double[] { cubePose.yRotation });
    rvec.put(2, 0, new double[] { cubePose.zRotation });
    //      Log.v(Constants.TAG, "Rotation Vector: " + rvec.dump());

    // Create an OpenCV Rotation Matrix from a Rotation Vector
    Mat rMatrix = new Mat(4, 4, CvType.CV_64FC1);
    Calib3d.Rodrigues(rvec, rMatrix);
    //      Log.v(Constants.TAG, "Rodrigues Matrix: " + rMatrix.dump());

    /*
     * Create an OpenGL Rotation Matrix
     * Notes:
     *   o  OpenGL is in column-row order (correct?).
     *   o  OpenCV Rodrigues Rotation Matrix is 3x3 where OpenGL Rotation Matrix is 4x4.
     *   o  OpenGL Rotation Matrix is simple float array variable
     */

    // Initialize all Rotational Matrix elements to zero.
    for (int i = 0; i < 16; i++)
        poseRotationMatrix[i] = 0.0f; // Initialize to zero

    // Initialize element [3,3] to 1.0: i.e., "w" component in homogenous coordinates
    poseRotationMatrix[3 * 4 + 3] = 1.0f;

    // Copy OpenCV matrix to OpenGL matrix element by element.
    for (int r = 0; r < 3; r++)
        for (int c = 0; c < 3; c++)
            poseRotationMatrix[r + c * 4] = (float) (rMatrix.get(r, c)[0]);

    // Diagnostics
    //        for(int r=0; r<4; r++)
    //            Log.v(Constants.TAG, String.format("Rotation Matrix  r=%d  [%5.2f  %5.2f  %5.2f  %5.2f]", r, poseRotationMatrix[r + 0], poseRotationMatrix[r+4], poseRotationMatrix[r+8], poseRotationMatrix[r+12]));

    return poseRotationMatrix;
}

From source file:org.ar.rubik.MonoChromatic.java

License:Open Source License

/**
 * Create submatrix using bytearray, then Mat.minmax().
 * This solution consumes about 10 seconds per frame.
 * /*ww w. j a  v  a2s .c om*/
 * @param original_image
 * @return
 */
private static Mat monochromaticMedianImageFilterUtilizingOpenCv3(Mat original_image) {
    final Size imageSize = original_image.size();

    Mat monochromatic_image = new Mat(imageSize, CvType.CV_8UC1);
    Mat hsv_image = new Mat(imageSize, CvType.CV_8UC3);

    Imgproc.cvtColor(original_image, hsv_image, Imgproc.COLOR_RGB2HLS);
    //      Log.i(Constants.TAG, "HSV Image: " + hsv_image); // CV_8UC3

    // Try RGB below
    //      hsv_image = result;

    // Get hue channel into simple byte array for speed efficiency.
    final int numColumns = (int) original_image.size().width;
    final int numRows = (int) original_image.size().height;
    final int span = (int) 7;
    final int accuracy = (int) 5;
    List<Mat> channels = new LinkedList<Mat>();
    Core.split(hsv_image, channels);
    Mat hueMat = channels.get(0);
    Mat lumMat = channels.get(1);
    Mat satMat = channels.get(2);
    final int bufferSize = numColumns * numRows;
    byte[] hueByteArray = new byte[bufferSize];
    byte[] lumByteArray = new byte[bufferSize];
    byte[] satByteArray = new byte[bufferSize];
    hueMat.get(0, 0, hueByteArray); // get all the pixels
    lumMat.get(0, 0, lumByteArray); // get all the pixels
    satMat.get(0, 0, satByteArray); // get all the pixels

    // Output byte array for speed efficiency
    byte[] monochromaticByteArray = new byte[bufferSize];

    Mat subimageMat = new Mat(span, span, CvType.CV_8UC1);
    byte[] subimageByteArray = new byte[span * span];

    for (int row = 0; row < numRows; row++) {

        byte result_pixel = 0;

        for (int col = 0; col < numColumns; col++) {

            if (col < span || (col >= numColumns - span))
                result_pixel = 0; // Just put in black

            else if (row < span || (row >= numRows - span))
                result_pixel = 0; // Just put in black

            else {

                // Copy a row (or column)
                for (int i = 0; i < span; i++) {

                    // copy span bytes from (row + i) * numCol + col
                    int srcPos = (row + i) * numColumns + col;
                    int dstPos = i * span;
                    System.arraycopy(hueByteArray, srcPos, subimageByteArray, dstPos, span);
                }

                subimageMat.put(0, 0, subimageByteArray);
                Core.MinMaxLocResult minMaxResult = Core.minMaxLoc(subimageMat);

                if (((minMaxResult.maxVal - minMaxResult.maxVal) < accuracy)) //&& (lum_max - lum_min < accuracy) && (sat_max - sat_min < accuracy) )
                    result_pixel = (byte) 128;
                else
                    result_pixel = (byte) 0;

                //               Log.i(Constants.TAG, String.format("Lum %d %d", lum_min, lum_max));

            } // End of else

            if ((col >= span / 2) && (row >= span / 2))
                monochromaticByteArray[(row - span / 2) * numColumns + (col - span / 2)] = result_pixel;

            //            int test = (int)(satByteArray[row * numColumns + col]) & 0xFF;
            //            monochromaticByteArray[row * numColumns + (col - span/2)] = (byte) test;

        } // End of column sweep

    } // End of row sweep
    Log.i(Constants.TAG, "Completed MonoChromatic CV");
    monochromatic_image.put(0, 0, monochromaticByteArray);
    return monochromatic_image;
}