Example usage for org.opencv.core MatOfPoint2f fromList

List of usage examples for org.opencv.core MatOfPoint2f fromList

Introduction

In this page you can find the example usage for org.opencv.core MatOfPoint2f fromList.

Prototype

public void fromList(List<Point> lp) 

Source Link

Usage

From source file:OCV_MinAreaRect.java

License:Open Source License

@Override
public void run(ImageProcessor ip) {
    byte[] byteArray = (byte[]) ip.getPixels();
    int w = ip.getWidth();
    int h = ip.getHeight();
    int num_slice = ip.getSliceNumber();

    ArrayList<Point> lstPt = new ArrayList<Point>();
    MatOfPoint2f pts = new MatOfPoint2f();

    for (int y = 0; y < h; y++) {
        for (int x = 0; x < w; x++) {
            if (byteArray[x + w * y] != 0) {
                lstPt.add(new Point((double) x, (double) y));
            }//from  ww  w  .  j  a v  a2  s.  com
        }
    }

    if (lstPt.isEmpty()) {
        return;
    }

    pts.fromList(lstPt);
    RotatedRect rect = Imgproc.minAreaRect(pts);
    showData(rect, num_slice);
}

From source file:OCV_MinEnclosingCircle.java

License:Open Source License

@Override
public void run(ImageProcessor ip) {
    byte[] byteArray = (byte[]) ip.getPixels();
    int w = ip.getWidth();
    int h = ip.getHeight();
    int num_slice = ip.getSliceNumber();

    ArrayList<Point> lstPt = new ArrayList<Point>();
    MatOfPoint2f pts = new MatOfPoint2f();

    for (int y = 0; y < h; y++) {
        for (int x = 0; x < w; x++) {
            if (byteArray[x + w * y] != 0) {
                lstPt.add(new Point((double) x, (double) y));
            }/*  www.  j  av  a 2  s  . com*/
        }
    }

    if (lstPt.isEmpty()) {
        return;
    }

    pts.fromList(lstPt);
    float[] radius = new float[1];
    Point center = new Point();
    Imgproc.minEnclosingCircle(pts, center, radius);
    showData(center.x, center.y, (double) radius[0], num_slice);
}

From source file:OCV_GetPerspectiveTransform.java

License:Open Source License

@Override
public void run(ImageProcessor ip) {
    MatOfPoint2f matPt_src = new MatOfPoint2f();
    MatOfPoint2f matPt_dst = new MatOfPoint2f();
    matPt_src.fromList(lstPt_src);
    matPt_dst.fromList(lstPt_dst);//from  w  ww .  j av a2  s. c  om

    Mat mat = Imgproc.getPerspectiveTransform(matPt_src, matPt_dst);

    if (mat == null || mat.rows() <= 0 || mat.cols() <= 0) {
        IJ.showMessage("Output is null or error");
        return;
    }

    ResultsTable rt = OCV__LoadLibrary.GetResultsTable(true);

    for (int i = 0; i < 3; i++) {
        rt.incrementCounter();
        rt.addValue("Column01", String.valueOf(mat.get(i, 0)[0]));
        rt.addValue("Column02", String.valueOf(mat.get(i, 1)[0]));
        rt.addValue("Column03", String.valueOf(mat.get(i, 2)[0]));
    }

    rt.show("Results");
}

From source file:OCV_FitEllipse.java

License:Open Source License

@Override
public void run(ImageProcessor ip) {
    byte[] byteArray = (byte[]) ip.getPixels();
    int w = ip.getWidth();
    int h = ip.getHeight();
    int num_slice = ip.getSliceNumber();

    ArrayList<Point> lstPt = new ArrayList<Point>();
    MatOfPoint2f pts = new MatOfPoint2f();

    for (int y = 0; y < h; y++) {
        for (int x = 0; x < w; x++) {
            if (byteArray[x + w * y] != 0) {
                lstPt.add(new Point((double) x, (double) y));
            }//from  w ww. ja  va  2 s.c om
        }
    }

    if (lstPt.isEmpty()) {
        return;
    }

    pts.fromList(lstPt);
    RotatedRect rect = Imgproc.fitEllipse(pts);
    showData(rect, num_slice);
}

From source file:OCV_GetAffineTransform.java

License:Open Source License

@Override
public void run(ImageProcessor ip) {
    MatOfPoint2f matPt_src = new MatOfPoint2f();
    MatOfPoint2f matPt_dst = new MatOfPoint2f();
    matPt_src.fromList(lstPt_src);
    matPt_dst.fromList(lstPt_dst);/*from ww w.java 2 s.c  o  m*/

    Mat mat = Imgproc.getAffineTransform(matPt_src, matPt_dst);

    if (mat == null || mat.rows() <= 0 || mat.cols() <= 0) {
        IJ.showMessage("Output is null or error");
        return;
    }

    ResultsTable rt = OCV__LoadLibrary.GetResultsTable(true);

    for (int i = 0; i < 2; i++) {
        rt.incrementCounter();
        rt.addValue("Column01", String.valueOf(mat.get(i, 0)[0]));
        rt.addValue("Column02", String.valueOf(mat.get(i, 1)[0]));
        rt.addValue("Column03", String.valueOf(mat.get(i, 2)[0]));
    }

    rt.show("Results");
}

From source file:arlocros.ComputePose.java

License:Apache License

public boolean computePose(Mat rvec, Mat tvec, Mat image2) throws NyARException, FileNotFoundException {
    // convert image to NyAR style for processing
    final INyARRgbRaster imageRaster = NyARImageHelper.createFromMat(image2);

    // create new marker system configuration
    i_config = new NyARMarkerSystemConfig(i_param);
    markerSystemState = new NyARMarkerSystem(i_config);
    // Create wrapper that passes cam pictures to marker system
    cameraSensorWrapper = new NyARSensor(i_screen_size);
    ids = new int[markerPatterns.size()];
    patternmap = new HashMap<>();
    for (int i = 0; i < markerPatterns.size(); i++) {
        // create marker description from pattern file and add to marker
        // system
        ids[i] = markerSystemState.addARMarker(arCodes.get(i), 25, markerConfig.getMarkerSize());
        patternmap.put(ids[i], markerPatterns.get(i));
    }//w w  w. j  ava2s  .  c  o  m

    cameraSensorWrapper.update(imageRaster);
    markerSystemState.update(cameraSensorWrapper);

    // init 3D point list
    final List<Point3> points3dlist = new ArrayList<>();
    final List<Point> points2dlist = new ArrayList<>();

    for (final int id : ids) {
        // process only if this marker has been detected
        if (markerSystemState.isExistMarker(id) && markerSystemState.getConfidence(id) > 0.7) {
            // read and add 2D points
            final NyARIntPoint2d[] vertex2d = markerSystemState.getMarkerVertex2D(id);
            Point p = new Point(vertex2d[0].x, vertex2d[0].y);
            points2dlist.add(p);
            p = new Point(vertex2d[1].x, vertex2d[2].y);
            points2dlist.add(p);
            p = new Point(vertex2d[2].x, vertex2d[2].y);
            points2dlist.add(p);
            p = new Point(vertex2d[3].x, vertex2d[3].y);
            points2dlist.add(p);

            //            final MatOfPoint mop = new MatOfPoint();
            //            mop.fromList(points2dlist);
            //            final List<MatOfPoint> pts = new ArrayList<>();
            //            pts.add(mop);

            // read and add corresponding 3D points
            points3dlist.addAll(markerConfig.create3dpointlist(patternmap.get(id)));

            if (visualization) {
                // draw red rectangle around detected marker
                Core.rectangle(image2, new Point(vertex2d[0].x, vertex2d[0].y),
                        new Point(vertex2d[2].x, vertex2d[2].y), new Scalar(0, 0, 255));
                final String markerFile = patternmap.get(id).replaceAll(".*4x4_", "").replace(".patt", "");
                Core.putText(image2, markerFile,
                        new Point((vertex2d[2].x + vertex2d[0].x) / 2.0, vertex2d[0].y - 5), 4, 1,
                        new Scalar(250, 0, 0));
            }
        }

    }
    // load 2D and 3D points to Mats for solvePNP
    final MatOfPoint3f objectPoints = new MatOfPoint3f();
    objectPoints.fromList(points3dlist);
    final MatOfPoint2f imagePoints = new MatOfPoint2f();
    imagePoints.fromList(points2dlist);

    if (visualization) {
        // show image with markers detected
        Imshow.show(image2);
    }

    // do not call solvePNP with empty intput data (no markers detected)
    if (points2dlist.size() == 0) {
        objectPoints.release();
        imagePoints.release();
        return false;
    }

    // uncomment these lines if using RANSAC-based pose estimation (more
    // shaking)
    Mat inliers = new Mat();

    Calib3d.solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec, false, 300, 5, 16,
            inliers, Calib3d.CV_P3P);
    ArMarkerPoseEstimator.getLog()
            .info("Points detected: " + points2dlist.size() + " inliers: " + inliers.size());

    objectPoints.release();
    imagePoints.release();

    // avoid publish zero pose if localization failed
    if (inliers.rows() == 0) {
        inliers.release();
        return false;
    }

    inliers.release();
    return true;
}

From source file:com.github.rosjava_catkin_package_a.ARLocROS.ComputePose.java

License:Apache License

public boolean computePose(Mat rvec, Mat tvec, Mat image2) throws NyARException, FileNotFoundException {
    // convert image to NyAR style for processing
    final INyARRgbRaster imageRaster = NyARImageHelper.createFromMat(image2);

    // create new marker system configuration
    i_config = new NyARMarkerSystemConfig(i_param);
    markerSystemState = new NyARMarkerSystem(i_config);
    // Create wrapper that passes cam pictures to marker system
    cameraSensorWrapper = new NyARSensor(i_screen_size);
    ids = new int[markerPatterns.size()];
    patternmap = new HashMap<>();
    for (int i = 0; i < markerPatterns.size(); i++) {
        // create marker description from pattern file and add to marker
        // system
        ids[i] = markerSystemState.addARMarker(arCodes.get(i), 25, markerConfig.getMarkerSize());
        patternmap.put(ids[i], markerPatterns.get(i));
    }/* ww  w. j a v a2s.  c o m*/

    cameraSensorWrapper.update(imageRaster);
    markerSystemState.update(cameraSensorWrapper);

    // init 3D point list
    final List<Point3> points3dlist = new ArrayList<>();
    final List<Point> points2dlist = new ArrayList<>();

    for (final int id : ids) {
        // process only if this marker has been detected
        if (markerSystemState.isExistMarker(id) && markerSystemState.getConfidence(id) > 0.7) {
            // read and add 2D points
            final NyARIntPoint2d[] vertex2d = markerSystemState.getMarkerVertex2D(id);
            Point p = new Point(vertex2d[0].x, vertex2d[0].y);
            points2dlist.add(p);
            p = new Point(vertex2d[1].x, vertex2d[2].y);
            points2dlist.add(p);
            p = new Point(vertex2d[2].x, vertex2d[2].y);
            points2dlist.add(p);
            p = new Point(vertex2d[3].x, vertex2d[3].y);
            points2dlist.add(p);

            final MatOfPoint mop = new MatOfPoint();
            mop.fromList(points2dlist);
            final List<MatOfPoint> pts = new ArrayList<>();
            pts.add(mop);
            // read and add corresponding 3D points
            points3dlist.addAll(markerConfig.create3dpointlist(patternmap.get(id)));
            // draw red rectangle around detected marker
            Core.rectangle(image2, new Point(vertex2d[0].x, vertex2d[0].y),
                    new Point(vertex2d[2].x, vertex2d[2].y), new Scalar(0, 0, 255));
        }

    }
    // load 2D and 3D points to Mats for solvePNP
    final MatOfPoint3f objectPoints = new MatOfPoint3f();
    objectPoints.fromList(points3dlist);
    final MatOfPoint2f imagePoints = new MatOfPoint2f();
    imagePoints.fromList(points2dlist);

    if (visualization) {
        // show image with markers detected
        Imshow.show(image2);
    }

    // do not call solvePNP with empty intput data (no markers detected)
    if (points2dlist.size() == 0) {
        return false;
    }

    // uncomment these lines if using RANSAC-based pose estimation (more
    // shaking)
    Mat inliers = new Mat();

    Calib3d.solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec, false, 300, 5, 16,
            inliers, Calib3d.CV_P3P);
    ARLoc.getLog().info("Points detected: " + points2dlist.size() + " inliers: " + inliers.size());
    // avoid publish zero pose if localization failed
    if (inliers.rows() == 0) {
        return false;
    }

    return true;
}

From source file:com.seleniumtests.util.imaging.ImageDetector.java

License:Apache License

/**
 * Compute the rectangle where the searched picture is and the rotation angle between both images
 * Throw {@link ImageSearchException} if picture is not found
 * @return/*from   w  w w .  java2s  . c o m*/
 * @Deprecated Kept here for information, but open CV 3 does not include SURF anymore for java build
 */
public void detectCorrespondingZone() {
    Mat objectImageMat = Imgcodecs.imread(objectImage.getAbsolutePath(), Imgcodecs.CV_LOAD_IMAGE_COLOR);
    Mat sceneImageMat = Imgcodecs.imread(sceneImage.getAbsolutePath(), Imgcodecs.CV_LOAD_IMAGE_COLOR);
    FeatureDetector surf = FeatureDetector.create(FeatureDetector.SURF);

    MatOfKeyPoint objectKeyPoints = new MatOfKeyPoint();
    MatOfKeyPoint sceneKeyPoints = new MatOfKeyPoint();

    surf.detect(objectImageMat, objectKeyPoints);
    surf.detect(sceneImageMat, sceneKeyPoints);

    DescriptorExtractor surfExtractor = DescriptorExtractor.create(DescriptorExtractor.SURF);
    Mat objectDescriptor = new Mat();
    Mat sceneDescriptor = new Mat();
    surfExtractor.compute(objectImageMat, objectKeyPoints, objectDescriptor);
    surfExtractor.compute(sceneImageMat, sceneKeyPoints, sceneDescriptor);

    try {
        Mat outImage = new Mat();
        Features2d.drawKeypoints(objectImageMat, objectKeyPoints, outImage);
        String tempFile = File.createTempFile("img", ".png").getAbsolutePath();
        writeComparisonPictureToFile(tempFile, outImage);
    } catch (IOException e) {

    }

    // http://stackoverflow.com/questions/29828849/flann-for-opencv-java
    DescriptorMatcher matcher = DescriptorMatcher.create(DescriptorMatcher.FLANNBASED);
    MatOfDMatch matches = new MatOfDMatch();

    if (objectKeyPoints.toList().isEmpty()) {
        throw new ImageSearchException("No keypoints in object to search, check it's not uniformly coloured: "
                + objectImage.getAbsolutePath());
    }
    if (sceneKeyPoints.toList().isEmpty()) {
        throw new ImageSearchException(
                "No keypoints in scene, check it's not uniformly coloured: " + sceneImage.getAbsolutePath());
    }
    if (objectDescriptor.type() != CvType.CV_32F) {
        objectDescriptor.convertTo(objectDescriptor, CvType.CV_32F);
    }
    if (sceneDescriptor.type() != CvType.CV_32F) {
        sceneDescriptor.convertTo(sceneDescriptor, CvType.CV_32F);
    }

    matcher.match(objectDescriptor, sceneDescriptor, matches);

    double maxDist = 0;
    double minDist = 10000;

    for (int i = 0; i < objectDescriptor.rows(); i++) {
        double dist = matches.toList().get(i).distance;
        if (dist < minDist) {
            minDist = dist;
        }
        if (dist > maxDist) {
            maxDist = dist;
        }
    }

    logger.debug("-- Max dist : " + maxDist);
    logger.debug("-- Min dist : " + minDist);

    LinkedList<DMatch> goodMatches = new LinkedList<>();
    MatOfDMatch gm = new MatOfDMatch();

    for (int i = 0; i < objectDescriptor.rows(); i++) {
        if (matches.toList().get(i).distance < detectionThreshold) {
            goodMatches.addLast(matches.toList().get(i));
        }
    }
    gm.fromList(goodMatches);

    Features2d.drawMatches(objectImageMat, objectKeyPoints, sceneImageMat, sceneKeyPoints, gm, imgMatch,
            Scalar.all(-1), Scalar.all(-1), new MatOfByte(), Features2d.NOT_DRAW_SINGLE_POINTS);

    if (goodMatches.isEmpty()) {
        throw new ImageSearchException("Cannot find matching zone");
    }

    LinkedList<Point> objList = new LinkedList<>();
    LinkedList<Point> sceneList = new LinkedList<>();

    List<KeyPoint> objectKeyPointsList = objectKeyPoints.toList();
    List<KeyPoint> sceneKeyPointsList = sceneKeyPoints.toList();

    for (int i = 0; i < goodMatches.size(); i++) {
        objList.addLast(objectKeyPointsList.get(goodMatches.get(i).queryIdx).pt);
        sceneList.addLast(sceneKeyPointsList.get(goodMatches.get(i).trainIdx).pt);
    }

    MatOfPoint2f obj = new MatOfPoint2f();
    obj.fromList(objList);

    MatOfPoint2f scene = new MatOfPoint2f();
    scene.fromList(sceneList);

    // Calib3d.RANSAC could be used instead of 0
    Mat hg = Calib3d.findHomography(obj, scene, 0, 5);

    Mat objectCorners = new Mat(4, 1, CvType.CV_32FC2);
    Mat sceneCorners = new Mat(4, 1, CvType.CV_32FC2);

    objectCorners.put(0, 0, new double[] { 0, 0 });
    objectCorners.put(1, 0, new double[] { objectImageMat.cols(), 0 });
    objectCorners.put(2, 0, new double[] { objectImageMat.cols(), objectImageMat.rows() });
    objectCorners.put(3, 0, new double[] { 0, objectImageMat.rows() });

    Core.perspectiveTransform(objectCorners, sceneCorners, hg);

    // points of object
    Point po1 = new Point(objectCorners.get(0, 0));
    Point po2 = new Point(objectCorners.get(1, 0));
    Point po3 = new Point(objectCorners.get(2, 0));
    Point po4 = new Point(objectCorners.get(3, 0));

    // point of object in scene
    Point p1 = new Point(sceneCorners.get(0, 0)); // top left
    Point p2 = new Point(sceneCorners.get(1, 0)); // top right
    Point p3 = new Point(sceneCorners.get(2, 0)); // bottom right
    Point p4 = new Point(sceneCorners.get(3, 0)); // bottom left

    logger.debug(po1);
    logger.debug(po2);
    logger.debug(po3);
    logger.debug(po4);
    logger.debug(p1); // top left
    logger.debug(p2); // top right
    logger.debug(p3); // bottom right
    logger.debug(p4); // bottom left

    if (debug) {
        try {
            // translate corners
            p1.set(new double[] { p1.x + objectImageMat.cols(), p1.y });
            p2.set(new double[] { p2.x + objectImageMat.cols(), p2.y });
            p3.set(new double[] { p3.x + objectImageMat.cols(), p3.y });
            p4.set(new double[] { p4.x + objectImageMat.cols(), p4.y });

            Imgproc.line(imgMatch, p1, p2, new Scalar(0, 255, 0), 1);
            Imgproc.line(imgMatch, p2, p3, new Scalar(0, 255, 0), 1);
            Imgproc.line(imgMatch, p3, p4, new Scalar(0, 255, 0), 1);
            Imgproc.line(imgMatch, p4, p1, new Scalar(0, 255, 0), 1);

            showResultingPicture(imgMatch);
        } catch (IOException e) {
        }
    }

    // check rotation angles
    checkRotationAngle(p1, p2, p3, p4, po1, po2, po3, po4);

    // rework on scene points as new, we are sure the object rotation is 0, 90, 180 or 270
    reworkOnScenePoints(p1, p2, p3, p4);

    // check that aspect ratio of the detected height and width are the same
    checkDetectionZoneAspectRatio(p1, p2, p4, po1, po2, po4);

    recordDetectedRectangle(p1, p2, p3, p4);
}

From source file:org.ar.rubik.CubePoseEstimator.java

License:Open Source License

/**
 * Pose Estimation// w w w. ja v  a2  s .  co m
 * 
 * Deduce real world cube coordinates and rotation
 * 
 * @param rubikFace
 * @param image 
 * @param stateModel 
 * @return 
 */
public static CubePose poseEstimation(RubikFace rubikFace, Mat image, StateModel stateModel) {

    if (rubikFace == null)
        return null;

    if (rubikFace.faceRecognitionStatus != FaceRecognitionStatusEnum.SOLVED)
        return null;

    LeastMeansSquare lmsResult = rubikFace.lmsResult;

    if (lmsResult == null)
        return null;

    // OpenCV Pose Estimate requires at least four points.
    if (rubikFace.rhombusList.size() <= 4)
        return null;

    if (cameraMatrix == null) {
        cameraMatrix = stateModel.cameraCalibration.getOpenCVCameraMatrix((int) (image.size().width),
                (int) (image.size().height));
        distCoeffs = new MatOfDouble(stateModel.cameraCalibration.getDistortionCoefficients());
    }

    /*
     * For the purposes of external camera calibration: i.e., where the cube is 
     * located in camera coordinates, we define the geometry of the face of a
     * cube composed of nine 3D locations each representing the center of each tile.
     * Correspondence between these points and nine 2D points from the actual
     * camera image, along with camera calibration data, are using to calculate
     * the Pose of the Cube (i.e. "Cube Pose").
     * 
     * The geometry of the cube here is defined as having center at {0,0,0},
     * and edge size of 2 units (i.e., +/- 1.0).
     */

    // List of real world point and screen points that correspond.
    List<Point3> objectPointsList = new ArrayList<Point3>(9);
    List<Point> imagePointsList = new ArrayList<Point>(9);

    // Create list of image (in 2D) and object (in 3D) points.
    // Loop over Rubik Face Tiles
    for (int n = 0; n < 3; n++) {
        for (int m = 0; m < 3; m++) {

            Rhombus rhombus = rubikFace.faceRhombusArray[n][m];

            // Only use if Rhombus was non null.
            if (rhombus != null) {

                // Obtain center of Rhombus in screen image coordinates
                // Convention:
                //  o X is zero on the left, and increases to the right.
                //  o Y is zero on the top and increases downward.
                Point imagePoint = new Point(rhombus.center.x, rhombus.center.y);
                imagePointsList.add(imagePoint);

                // N and M are actual not conceptual (as in design doc).
                int mm = 2 - n;
                int nn = 2 - m;
                // above now matches design doc.
                // that is:
                //  o the nn vector is to the right and upwards.
                //  o the mm vector is to the left and upwards.

                // Calculate center of Tile in OpenCV World Space Coordinates
                // Convention:
                //  o X is zero in the center, and increases to the left.
                //  o Y is zero in the center and increases downward.
                //  o Z is zero (at the world coordinate origin) and increase away for the camera.
                float x = (1 - mm) * 0.66666f;
                float y = -1.0f;
                float z = -1.0f * (1 - nn) * 0.666666f;
                Point3 objectPoint = new Point3(x, y, z);
                objectPointsList.add(objectPoint);
            }
        }
    }

    // Cast image point list into OpenCV Matrix.
    MatOfPoint2f imagePoints = new MatOfPoint2f();
    imagePoints.fromList(imagePointsList);

    // Cast object point list into OpenCV Matrix.
    MatOfPoint3f objectPoints = new MatOfPoint3f();
    objectPoints.fromList(objectPointsList);

    Mat rvec = new Mat();
    Mat tvec = new Mat();

    //      Log.e(Constants.TAG, "Image Points: " + imagePoints.dump());
    //      Log.e(Constants.TAG, "Object Points: " + objectPoints.dump());

    //      =+= sometimes a "count >= 4" exception 
    Calib3d.solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec);

    Log.v(Constants.TAG, String.format("Open CV Rotation Vector x=%4.2f y=%4.2f z=%4.2f", rvec.get(0, 0)[0],
            rvec.get(1, 0)[0], rvec.get(2, 0)[0]));

    // Convert from OpenCV to OpenGL World Coordinates
    float x = +1.0f * (float) tvec.get(0, 0)[0];
    float y = -1.0f * (float) tvec.get(1, 0)[0];
    float z = -1.0f * (float) tvec.get(2, 0)[0];

    //        // =+= Add manual offset correction to translation  
    //        x += MenuAndParams.xTranslationOffsetParam.value;
    //        y += MenuAndParams.yTranslationOffsetParam.value;
    //        z += MenuAndParams.zTranslationOffsetParam.value;      

    // Convert Rotation Vector from OpenCL polarity axes definition to OpenGL definition
    // Note, polarity of x-axis is OK, no need to invert.
    rvec.put(1, 0, -1.0f * rvec.get(1, 0)[0]); // y-axis
    rvec.put(2, 0, -1.0f * rvec.get(2, 0)[0]); // z-axis

    //        // =+= Add manual offset correction to Rotation
    //        rvec.put(0, 0, rvec.get(0, 0)[0] + MenuAndParams.xRotationOffsetParam.value * Math.PI / 180.0);  // X rotation
    //        rvec.put(1, 0, rvec.get(1, 0)[0] + MenuAndParams.yRotationOffsetParam.value * Math.PI / 180.0);  // Y rotation
    //        rvec.put(2, 0, rvec.get(2, 0)[0] + MenuAndParams.zRotationOffsetParam.value * Math.PI / 180.0);  // Z rotation

    // Package up as CubePose object
    CubePose cubePose = new CubePose();
    cubePose.x = x;
    cubePose.y = y;
    cubePose.z = z;
    cubePose.xRotation = rvec.get(0, 0)[0];
    cubePose.yRotation = rvec.get(1, 0)[0];
    cubePose.zRotation = rvec.get(2, 0)[0];

    //      Log.e(Constants.TAG, "Result: " + result);
    //      Log.e(Constants.TAG, "Camera: " + cameraMatrix.dump());
    //      Log.e(Constants.TAG, "Rotation: " + rvec.dump());
    //      Log.e(Constants.TAG, "Translation: " + tvec.dump());

    //      // Reporting in OpenGL World Coordinates
    //      Core.rectangle(image, new Point(0, 50), new Point(1270, 150), Constants.ColorBlack, -1);
    //      Core.putText(image, String.format("Translation  x=%4.2f y=%4.2f z=%4.2f", x, y, z), new Point(50, 100), Constants.FontFace, 3, Constants.ColorWhite, 3);
    //      Core.putText(image, String.format("Rotation     x=%4.0f y=%4.0f z=%4.0f", cubeXrotation, cubeYrotation, cubeZrotation), new Point(50, 150), Constants.FontFace, 3, Constants.ColorWhite, 3);

    Log.v(Constants.TAG, "Cube Pose: " + cubePose);

    return cubePose;
}

From source file:org.ar.rubik.CubeReconstructor.java

License:Open Source License

/**
 * Pose Estimation//from  w ww  .ja  v a2s.com
 * 
 * Deduce real world cube coordinates and rotation
 * 
 * @param rubikFace
 * @param image 
 * @param stateModel 
 */
public void poseEstimation(RubikFace rubikFace, Mat image, StateModel stateModel) {

    if (rubikFace == null)
        return;

    if (rubikFace.faceRecognitionStatus != FaceRecognitionStatusEnum.SOLVED)
        return;

    LeastMeansSquare lmsResult = rubikFace.lmsResult;

    if (lmsResult == null)
        return;

    // OpenCV Pose Estimate requires at least four points.
    if (rubikFace.rhombusList.size() <= 4)
        return;

    // List of real world point and screen points that correspond.
    List<Point3> objectPointsList = new ArrayList<Point3>(9);
    List<Point> imagePointsList = new ArrayList<Point>(9);

    // Create list of image (in 2D) and object (in 3D) points.
    // Loop over Rubik Face Tiles/Rhombi
    for (int n = 0; n < 3; n++) {
        for (int m = 0; m < 3; m++) {

            Rhombus rhombus = rubikFace.faceRhombusArray[n][m];

            // Only use if Rhombus was non null.
            if (rhombus != null) {

                // Obtain center of Rhombus in screen image coordinates
                // Convention:
                //  o X is zero on the left, and increases to the right.
                //  o Y is zero on the top and increases downward.
                Point imagePoint = new Point(rhombus.center.x, rhombus.center.y);
                imagePointsList.add(imagePoint);

                // N and M are actual not conceptual (as in design doc).
                int mm = 2 - n;
                int nn = 2 - m;
                // above now matches design doc.
                // that is:
                //  o the nn vector is to the right and upwards.
                //  o the mm vector is to the left and upwards.

                // Calculate center of Tile in OpenCV World Space Coordinates
                // Convention:
                //  o X is zero in the center, and increases to the left.
                //  o Y is zero in the center and increases downward.
                //  o Z is zero (at the world coordinate origin) and increase away for the camera.
                float x = (1 - mm) * 0.66666f;
                float y = -1.0f;
                float z = -1.0f * (1 - nn) * 0.666666f;
                Point3 objectPoint = new Point3(x, y, z);
                objectPointsList.add(objectPoint);
            }
        }
    }

    // Cast image point list into OpenCV Matrix.
    MatOfPoint2f imagePoints = new MatOfPoint2f();
    imagePoints.fromList(imagePointsList);

    // Cast object point list into OpenCV Matrix.
    MatOfPoint3f objectPoints = new MatOfPoint3f();
    objectPoints.fromList(objectPointsList);

    Mat cameraMatrix = stateModel.cameraParameters.getOpenCVCameraMatrix();
    MatOfDouble distCoeffs = new MatOfDouble();
    Mat rvec = new Mat();
    Mat tvec = new Mat();

    //      Log.e(Constants.TAG, "Image Points: " + imagePoints.dump());
    //      Log.e(Constants.TAG, "Object Points: " + objectPoints.dump());

    //      boolean result = 
    Calib3d.solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec);

    Log.v(Constants.TAG, String.format("Open CV Rotation Vector x=%4.2f y=%4.2f z=%4.2f", rvec.get(0, 0)[0],
            rvec.get(1, 0)[0], rvec.get(2, 0)[0]));

    // Convert from OpenCV to OpenGL World Coordinates
    x = +1.0f * (float) tvec.get(0, 0)[0];
    y = -1.0f * (float) tvec.get(1, 0)[0];
    z = -1.0f * (float) tvec.get(2, 0)[0];

    // =+= Add manual offset correction to translation  
    x += MenuAndParams.xTranslationOffsetParam.value;
    y += MenuAndParams.yTranslationOffsetParam.value;
    z += MenuAndParams.zTranslationOffsetParam.value;

    // Convert Rotation Vector from OpenCL polarity axes definition to OpenGL definition
    rvec.put(1, 0, -1.0f * rvec.get(1, 0)[0]);
    rvec.put(2, 0, -1.0f * rvec.get(2, 0)[0]);

    // =+= Add manual offset correction to Rotation
    rvec.put(0, 0, rvec.get(0, 0)[0] + MenuAndParams.xRotationOffsetParam.value * Math.PI / 180.0); // X rotation
    rvec.put(1, 0, rvec.get(1, 0)[0] + MenuAndParams.yRotationOffsetParam.value * Math.PI / 180.0); // Y rotation
    rvec.put(2, 0, rvec.get(2, 0)[0] + MenuAndParams.zRotationOffsetParam.value * Math.PI / 180.0); // Z rotation

    // Create an OpenCV Rotation Matrix from a Rotation Vector
    Mat rMatrix = new Mat(4, 4, CvType.CV_32FC2);
    Calib3d.Rodrigues(rvec, rMatrix);
    Log.v(Constants.TAG, "Rodrigues Matrix: " + rMatrix.dump());

    /*
     * Create an OpenGL Rotation Matrix
     * Notes:
     *   o  OpenGL is in column-row order (correct?).
     *   o  OpenCV Rodrigues Rotation Matrix is 3x3 where OpenGL Rotation Matrix is 4x4.
     */

    // Initialize all Rotational Matrix elements to zero.
    for (int i = 0; i < 16; i++)
        rotationMatrix[i] = 0.0f; // Initialize to zero

    // Initialize element [3,3] to 1.0: i.e., "w" component in homogenous coordinates
    rotationMatrix[3 * 4 + 3] = 1.0f;

    // Copy OpenCV matrix to OpenGL matrix element by element.
    for (int r = 0; r < 3; r++)
        for (int c = 0; c < 3; c++)
            rotationMatrix[r + c * 4] = (float) (rMatrix.get(r, c)[0]);

    // Diagnostics
    for (int r = 0; r < 4; r++)
        Log.v(Constants.TAG, String.format("Rotation Matrix  r=%d  [%5.2f  %5.2f  %5.2f  %5.2f]", r,
                rotationMatrix[r + 0], rotationMatrix[r + 4], rotationMatrix[r + 8], rotationMatrix[r + 12]));

    //      Log.e(Constants.TAG, "Result: " + result);
    //      Log.e(Constants.TAG, "Camera: " + cameraMatrix.dump());
    //      Log.e(Constants.TAG, "Rotation: " + rvec.dump());
    //      Log.e(Constants.TAG, "Translation: " + tvec.dump());

    //      // Reporting in OpenGL World Coordinates
    //      Core.rectangle(image, new Point(0, 50), new Point(1270, 150), Constants.ColorBlack, -1);
    //      Core.putText(image, String.format("Translation  x=%4.2f y=%4.2f z=%4.2f", x, y, z), new Point(50, 100), Constants.FontFace, 3, Constants.ColorWhite, 3);
    //      Core.putText(image, String.format("Rotation     x=%4.0f y=%4.0f z=%4.0f", cubeXrotation, cubeYrotation, cubeZrotation), new Point(50, 150), Constants.FontFace, 3, Constants.ColorWhite, 3);
}