Example usage for org.opencv.core MatOfPoint2f convertTo

List of usage examples for org.opencv.core MatOfPoint2f convertTo

Introduction

In this page you can find the example usage for org.opencv.core MatOfPoint2f convertTo.

Prototype

public void convertTo(Mat m, int rtype) 

Source Link

Usage

From source file:ImageReade.java

public static void detectLetter(Mat img) {
    ArrayList<Rect> boundRect = new ArrayList<>();
    Mat img_gray, img_sobel, img_threshold, element;
    img_gray = new Mat();
    img_sobel = new Mat();
    img_threshold = new Mat();
    element = new Mat();
    Imgproc.cvtColor(img, img_gray, Imgproc.COLOR_BGRA2GRAY);
    imshow("Rec img_gray", img_gray);
    Imgproc.Sobel(img_gray, img_sobel, CvType.CV_8U, 1, 0, 3, 1, 0, Imgproc.BORDER_DEFAULT);
    imshow("Rec img_sobel", img_sobel);
    Imgproc.threshold(img_sobel, img_threshold, 0, 255, CV_THRESH_OTSU + CV_THRESH_BINARY);
    imshow("Rec img_threshold", img_threshold);

    element = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(16, 6));

    Imgproc.morphologyEx(img_threshold, img_threshold, CV_MOP_CLOSE, element);
    imshow("Rec img_threshold second", img_threshold);

    List<MatOfPoint> contours = new ArrayList<MatOfPoint>();

    //Imgproc.findContours(img_threshold, contours, new Mat(), Imgproc.RETR_LIST,Imgproc.CHAIN_APPROX_SIMPLE);
    Imgproc.findContours(img_threshold, contours, new Mat(), 0, 1);

    for (int i = 0; i < contours.size(); i++) {
        System.out.println(Imgproc.contourArea(contours.get(i)));
        //            if (Imgproc.contourArea(contours.get(i)) > 100) {
        //                //Imgproc.approxPolyDP( contours.get(i), contours_poly[i], 3, true );
        //                Rect rect = Imgproc.boundingRect(contours.get(i));
        //                System.out.println(rect.height);
        //                if (rect.width > rect.height) {
        //                    //System.out.println(rect.x +","+rect.y+","+rect.height+","+rect.width);
        //                    Core.rectangle(img, new Point(rect.x,rect.y), new Point(rect.x+rect.width,rect.y+rect.height),new Scalar(0,0,255));
        //                }
        //                    
        //                    
        //            }
        if (Imgproc.contourArea(contours.get(i)) > 100) {
            MatOfPoint2f mMOP2f1 = new MatOfPoint2f();
            MatOfPoint2f mMOP2f2 = new MatOfPoint2f();
            contours.get(i).convertTo(mMOP2f1, CvType.CV_32FC2);
            Imgproc.approxPolyDP(mMOP2f1, mMOP2f2, 3, true);
            mMOP2f2.convertTo(contours.get(i), CvType.CV_32S);
            Rect rect = Imgproc.boundingRect(contours.get(i));
            if (rect.width > rect.height) {
                Core.rectangle(img, new Point(rect.x, rect.y),
                        new Point(rect.x + rect.width, rect.y + rect.height), new Scalar(0, 0, 255));
            }/*from  www  . j  av a  2 s  .  c o m*/
        }
    }
    imshow("Rec Detected", img);
}

From source file:ThirdTry.java

public static void detectLetter(Mat img, Mat m2) {
    ArrayList<Rect> boundRect = new ArrayList<>();
    Mat img_gray, img_sobel, img_threshold, element;
    img_gray = new Mat();
    img_sobel = new Mat();
    img_threshold = new Mat();
    element = new Mat();
    Imgproc.cvtColor(img, img_gray, Imgproc.COLOR_BGRA2GRAY);
    //imshow("Rec img_gray", img_gray);
    Imgproc.Sobel(img_gray, img_sobel, CvType.CV_8UC1, 1, 0, 3, 1, 0, Imgproc.BORDER_DEFAULT);
    //imshow("Rec img_sobel", img_sobel);
    Imgproc.threshold(m2, img_threshold, 0, 255, CV_THRESH_OTSU + CV_THRESH_BINARY);
    //imshow("Rec img_threshold", img_threshold);

    element = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(3, 2));

    Imgproc.morphologyEx(m2, img_threshold, CV_MOP_CLOSE, element);
    imshow("Rec img_threshold second", img_threshold);

    element = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(12, 12));
    Imgproc.morphologyEx(img_threshold, img_threshold, CV_MOP_CLOSE, element);
    //imshow("Rec img_threshold second", img_threshold);

    List<MatOfPoint> contours = new ArrayList<MatOfPoint>();

    //Imgproc.findContours(img_threshold, contours, new Mat(), Imgproc.RETR_LIST,Imgproc.CHAIN_APPROX_SIMPLE);
    Imgproc.findContours(img_threshold, contours, new Mat(), 0, 1);

    for (int i = 0; i < contours.size(); i++) {
        System.out.println(Imgproc.contourArea(contours.get(i)));
        //            if (Imgproc.contourArea(contours.get(i)) > 100) {
        //                //Imgproc.approxPolyDP( contours.get(i), contours_poly[i], 3, true );
        //                Rect rect = Imgproc.boundingRect(contours.get(i));
        //                System.out.println(rect.height);
        //                if (rect.width > rect.height) {
        //                    //System.out.println(rect.x +","+rect.y+","+rect.height+","+rect.width);
        //                    Core.rectangle(img, new Point(rect.x,rect.y), new Point(rect.x+rect.width,rect.y+rect.height),new Scalar(0,0,255));
        //                }
        //                    
        //                    
        //            }
        if (Imgproc.contourArea(contours.get(i)) > 100) {
            MatOfPoint2f mMOP2f1 = new MatOfPoint2f();
            MatOfPoint2f mMOP2f2 = new MatOfPoint2f();
            contours.get(i).convertTo(mMOP2f1, CvType.CV_32FC2);
            Imgproc.approxPolyDP(mMOP2f1, mMOP2f2, 3, true);
            mMOP2f2.convertTo(contours.get(i), CvType.CV_32S);
            Rect rect = Imgproc.boundingRect(contours.get(i));
            if (rect.width > rect.height) {
                Core.rectangle(img, new Point(rect.x, rect.y),
                        new Point(rect.x + rect.width, rect.y + rect.height), new Scalar(0, 0, 255));
            }//from  www .j  av a 2  s  . com
        }
    }
    //imshow("Rec Detected", img);
}

From source file:opencv.CaptchaDetection.java

/***
 * ??, ROI//from  w w w .j a v a2  s.c  o  m
 * @param src
 * @return 
 */
private static List<Mat> find_number(Mat src) {
    Mat src_tmp = src.clone();

    //  
    Imgproc.dilate(src_tmp, src_tmp, new Mat());

    //  ?
    Mat canny_edge = new Mat();
    Imgproc.blur(src_tmp, src_tmp, new Size(3, 3));
    Imgproc.Canny(src_tmp, canny_edge, 50, 150, 3, false);

    //  
    List<MatOfPoint> contours = new ArrayList<>();
    Imgproc.findContours(canny_edge, contours, new Mat(), Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);

    List<Rect> boundRect = new ArrayList<>();

    //  ??, ??
    for (int i = 0; i < contours.size(); i++) {
        MatOfPoint2f tmp_mp2f_1 = new MatOfPoint2f();
        MatOfPoint2f tmp_mp2f_2 = new MatOfPoint2f();

        contours.get(i).convertTo(tmp_mp2f_1, CvType.CV_32FC2);

        Imgproc.approxPolyDP(tmp_mp2f_1, tmp_mp2f_2, 3, true);

        tmp_mp2f_2.convertTo(contours.get(i), CvType.CV_32S);

        Rect rect = Imgproc.boundingRect(contours.get(i));

        //if (rect.area() > 300)
        //out.println("h : " + rect.height + ", w : " + rect.width + ", aera :  " + rect.area());

        if (rect.height >= 21 && rect.width >= 21 && rect.area() >= 700)
            boundRect.add(rect);
    }

    //  ??
    for (Rect rect : boundRect) {
        Scalar color = new Scalar(128);
        Imgproc.rectangle(src_tmp, rect.tl(), rect.br(), color, 2, 8, 0);
    }

    //  ???
    Collections.sort(boundRect, rectSort);

    List<Mat> numRoi = new ArrayList<>();
    for (Rect rect : boundRect)
        numRoi.add(src.submat(rect));

    //for (Mat roi : numRoi) 
    //showResult(roi, "roi");

    return numRoi;
}

From source file:org.ar.rubik.ImageRecognizer.java

License:Open Source License

/**
 * On Camera Frame/*from w w  w  .j a  va  2s .  com*/
 * 
 * Process frame image through Rubik Face recognition possibly resulting in a state change.
 * 
 *  (non-Javadoc)
 * @see org.opencv.android.CameraBridgeViewBase.CvCameraViewListener2#onCameraFrame(org.opencv.android.CameraBridgeViewBase.CvCameraViewFrame)
 */
@Override
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {

    //      Log.e(Constants.TAG, "CV Thread ID = " + Thread.currentThread().getId());

    // Just display error message if it is non-null.
    if (errorImage != null)
        return errorImage;

    Mat image = inputFrame.rgba();
    Size imageSize = image.size();
    Log.v(Constants.TAG_CAL, "Input Frame width=" + imageSize.width + " height=" + imageSize.height);
    if (imageSize.width != stateModel.openCVSize.width || imageSize.height != stateModel.openCVSize.height)
        Log.e(Constants.TAG_CAL, "State Model openCVSize does not agree with input frame!");

    // Save or Recall image as requested
    switch (MenuAndParams.imageSourceMode) {
    case NORMAL:
        break;
    case SAVE_NEXT:
        Util.saveImage(image);
        MenuAndParams.imageSourceMode = ImageSourceModeEnum.NORMAL;
        break;
    case PLAYBACK:
        image = Util.recallImage();
    default:
        break;
    }

    // Calculate and display Frames Per Second
    long newTimeStamp = System.currentTimeMillis();
    if (framesPerSecondTimeStamp > 0) {
        long frameTime = newTimeStamp - framesPerSecondTimeStamp;
        double framesPerSecond = 1000.0 / frameTime;
        String string = String.format("%4.1f FPS", framesPerSecond);
        Core.putText(image, string, new Point(50, 700), Constants.FontFace, 2, ColorTileEnum.WHITE.cvColor, 2);
    }
    framesPerSecondTimeStamp = newTimeStamp;

    try {

        // Initialize
        RubikFace rubikFace = new RubikFace();
        rubikFace.profiler.markTime(Profiler.Event.START);
        Log.i(Constants.TAG, "============================================================================");

        /* **********************************************************************
         * **********************************************************************
         * Return Original Image
         */
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.DIRECT) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            return annotation.drawAnnotation(image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Process to Grey Scale
         * 
         * This algorithm finds highlights areas that are all of nearly
         * the same hue.  In particular, cube faces should be highlighted.
         */
        Mat greyscale_image = new Mat();
        Imgproc.cvtColor(image, greyscale_image, Imgproc.COLOR_BGR2GRAY);
        rubikFace.profiler.markTime(Profiler.Event.GREYSCALE);
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.GREYSCALE) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            image.release();
            return annotation.drawAnnotation(greyscale_image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Gaussian Filter Blur prevents getting a lot of false hits 
         */
        Mat blur_image = new Mat();

        int kernelSize = (int) MenuAndParams.gaussianBlurKernelSizeParam.value;
        kernelSize = kernelSize % 2 == 0 ? kernelSize + 1 : kernelSize; // make odd
        Imgproc.GaussianBlur(greyscale_image, blur_image, new Size(kernelSize, kernelSize), -1, -1);
        rubikFace.profiler.markTime(Profiler.Event.GAUSSIAN);
        greyscale_image.release();
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.GAUSSIAN) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            image.release();
            return annotation.drawAnnotation(blur_image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Canny Edge Detection
         */
        Mat canny_image = new Mat();
        Imgproc.Canny(blur_image, canny_image, MenuAndParams.cannyLowerThresholdParam.value,
                MenuAndParams.cannyUpperThresholdParam.value, 3, // Sobel Aperture size.  This seems to be typically value used in the literature: i.e., a 3x3 Sobel Matrix.
                false); // use cheap gradient calculation: norm =|dI/dx|+|dI/dy|
        rubikFace.profiler.markTime(Profiler.Event.EDGE);
        blur_image.release();
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.CANNY) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            image.release();
            return annotation.drawAnnotation(canny_image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Dilation Image Process
         */
        Mat dilate_image = new Mat();
        Imgproc.dilate(canny_image, dilate_image, Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(
                MenuAndParams.dilationKernelSizeParam.value, MenuAndParams.dilationKernelSizeParam.value)));
        rubikFace.profiler.markTime(Profiler.Event.DILATION);
        canny_image.release();
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.DILATION) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            image.release();
            return annotation.drawAnnotation(dilate_image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Contour Generation 
         */
        List<MatOfPoint> contours = new LinkedList<MatOfPoint>();
        Mat heirarchy = new Mat();
        Imgproc.findContours(dilate_image, contours, heirarchy, Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE); // Note: tried other TC89 options, but no significant change or improvement on cpu time.
        rubikFace.profiler.markTime(Profiler.Event.CONTOUR);
        dilate_image.release();

        // Create gray scale image but in RGB format, and then added yellow colored contours on top.
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.CONTOUR) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            Mat gray_image = new Mat(imageSize, CvType.CV_8UC4);
            Mat rgba_gray_image = new Mat(imageSize, CvType.CV_8UC4);
            Imgproc.cvtColor(image, gray_image, Imgproc.COLOR_RGB2GRAY);
            Imgproc.cvtColor(gray_image, rgba_gray_image, Imgproc.COLOR_GRAY2BGRA, 3);
            Imgproc.drawContours(rgba_gray_image, contours, -1, ColorTileEnum.YELLOW.cvColor, 3);
            Core.putText(rgba_gray_image, "Num Contours: " + contours.size(), new Point(500, 50),
                    Constants.FontFace, 4, ColorTileEnum.RED.cvColor, 4);
            gray_image.release();
            image.release();
            return annotation.drawAnnotation(rgba_gray_image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Polygon Detection
         */
        List<Rhombus> polygonList = new LinkedList<Rhombus>();
        for (MatOfPoint contour : contours) {

            // Keep only counter clockwise contours.  A clockwise contour is reported as a negative number.
            double contourArea = Imgproc.contourArea(contour, true);
            if (contourArea < 0.0)
                continue;

            // Keep only reasonable area contours
            if (contourArea < MenuAndParams.minimumContourAreaParam.value)
                continue;

            // Floating, instead of Double, for some reason required for approximate polygon detection algorithm.
            MatOfPoint2f contour2f = new MatOfPoint2f();
            MatOfPoint2f polygone2f = new MatOfPoint2f();
            MatOfPoint polygon = new MatOfPoint();

            // Make a Polygon out of a contour with provide Epsilon accuracy parameter.
            // It uses the Douglas-Peucker algorithm http://en.wikipedia.org/wiki/Ramer-Douglas-Peucker_algorithm
            contour.convertTo(contour2f, CvType.CV_32FC2);
            Imgproc.approxPolyDP(contour2f, polygone2f, MenuAndParams.polygonEpsilonParam.value, // The maximum distance between the original curve and its approximation.
                    true); // Resulting polygon representation is "closed:" its first and last vertices are connected.
            polygone2f.convertTo(polygon, CvType.CV_32S);

            polygonList.add(new Rhombus(polygon));
        }

        rubikFace.profiler.markTime(Profiler.Event.POLYGON);

        // Create gray scale image but in RGB format, and then add yellow colored polygons on top.
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.POLYGON) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            Mat gray_image = new Mat(imageSize, CvType.CV_8UC4);
            Mat rgba_gray_image = new Mat(imageSize, CvType.CV_8UC4);
            Imgproc.cvtColor(image, gray_image, Imgproc.COLOR_RGB2GRAY);
            Imgproc.cvtColor(gray_image, rgba_gray_image, Imgproc.COLOR_GRAY2BGRA, 4);
            for (Rhombus polygon : polygonList)
                polygon.draw(rgba_gray_image, ColorTileEnum.YELLOW.cvColor);
            Core.putText(rgba_gray_image, "Num Polygons: " + polygonList.size(), new Point(500, 50),
                    Constants.FontFace, 3, ColorTileEnum.RED.cvColor, 4);
            return annotation.drawAnnotation(rgba_gray_image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Rhombus Tile Recognition
         * 
         * From polygon list, produces a list of suitable Parallelograms (Rhombi).
         */
        Log.i(Constants.TAG, String.format("Rhombus:   X    Y   Area   a-a  b-a a-l b-l gamma"));
        List<Rhombus> rhombusList = new LinkedList<Rhombus>();
        // Get only valid Rhombus(es) : actually parallelograms.
        for (Rhombus rhombus : polygonList) {
            rhombus.qualify();
            if (rhombus.status == Rhombus.StatusEnum.VALID)
                rhombusList.add(rhombus);
        }

        // Filtering w.r.t. Rhmobus set characteristics
        Rhombus.removedOutlierRhombi(rhombusList);

        rubikFace.profiler.markTime(Profiler.Event.RHOMBUS);

        // Create gray scale image but in RGB format, and then add yellow colored Rhombi (parallelograms) on top.
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.RHOMBUS) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            Mat gray_image = new Mat(imageSize, CvType.CV_8UC4);
            Mat rgba_gray_image = new Mat(imageSize, CvType.CV_8UC4);
            Imgproc.cvtColor(image, gray_image, Imgproc.COLOR_RGB2GRAY);
            Imgproc.cvtColor(gray_image, rgba_gray_image, Imgproc.COLOR_GRAY2BGRA, 4);
            for (Rhombus rhombus : rhombusList)
                rhombus.draw(rgba_gray_image, ColorTileEnum.YELLOW.cvColor);
            Core.putText(rgba_gray_image, "Num Rhombus: " + rhombusList.size(), new Point(500, 50),
                    Constants.FontFace, 4, ColorTileEnum.RED.cvColor, 4);
            gray_image.release();
            image.release();
            return annotation.drawAnnotation(rgba_gray_image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Face Recognition
         * 
         * Takes a collection of Rhombus objects and determines if a valid
         * Rubik Face can be determined from them, and then also determines 
         * initial color for all nine tiles. 
         */
        rubikFace.processRhombuses(rhombusList, image);
        Log.i(Constants.TAG, "Face Solution = " + rubikFace.faceRecognitionStatus);
        rubikFace.profiler.markTime(Profiler.Event.FACE);
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.FACE_DETECT) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            return annotation.drawAnnotation(image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Cube Pose Estimation
         * 
         * Reconstruct the Rubik Cube 3D location and orientation in GL space coordinates.
         */
        if (rubikFace.faceRecognitionStatus == FaceRecognitionStatusEnum.SOLVED) {

            // Obtain Cube Pose from Face Grid information.
            stateModel.cubePose = CubePoseEstimator.poseEstimation(rubikFace, image, stateModel);

            // Process measurement update on Kalman Filter (if it exists).
            KalmanFilter kalmanFilter = stateModel.kalmanFilter;
            if (kalmanFilter != null)
                kalmanFilter.measurementUpdate(stateModel.cubePose, System.currentTimeMillis());

            // Process measurement update on Kalman Filter ALSM (if it exists).
            KalmanFilterALSM kalmanFilterALSM = stateModel.kalmanFilterALSM;
            if (kalmanFilter != null)
                kalmanFilterALSM.measurementUpdate(stateModel.cubePose, System.currentTimeMillis());
        } else {
            stateModel.cubePose = null;
        }
        rubikFace.profiler.markTime(Profiler.Event.POSE);

        /* **********************************************************************
         * **********************************************************************
         * Application State Machine
         * 
         * Will provide user instructions.
         * Will determine when we are on-face and off-face
         * Will determine when we are on-new-face
         * Will change state 
         */
        appStateMachine.onFaceEvent(rubikFace);
        rubikFace.profiler.markTime(Profiler.Event.CONTROLLER);
        rubikFace.profiler.markTime(Profiler.Event.TOTAL);

        // Normal return point.
        stateModel.activeRubikFace = rubikFace;
        return annotation.drawAnnotation(image);

        // =+= Issue: how to get stdio to print as error and not warning in logcat?
    } catch (CvException e) {
        Log.e(Constants.TAG, "CvException: " + e.getMessage());
        e.printStackTrace();
        errorImage = new Mat(imageSize, CvType.CV_8UC4);
        Core.putText(errorImage, "CvException: " + e.getMessage(), new Point(50, 50), Constants.FontFace, 2,
                ColorTileEnum.WHITE.cvColor, 2);
        int i = 1;
        for (StackTraceElement element : e.getStackTrace())
            Core.putText(errorImage, element.toString(), new Point(50, 50 + 50 * i++), Constants.FontFace, 2,
                    ColorTileEnum.WHITE.cvColor, 2);
    } catch (Exception e) {
        Log.e(Constants.TAG, "Exception: " + e.getMessage());
        e.printStackTrace();
        errorImage = new Mat(imageSize, CvType.CV_8UC4);
        Core.putText(errorImage, "Exception: " + e.getMessage(), new Point(50, 50), Constants.FontFace, 2,
                ColorTileEnum.WHITE.cvColor, 2);
        int i = 1;
        for (StackTraceElement element : e.getStackTrace())
            Core.putText(errorImage, element.toString(), new Point(50, 50 + 50 * i++), Constants.FontFace, 2,
                    ColorTileEnum.WHITE.cvColor, 2);
    } catch (Error e) {
        Log.e(Constants.TAG, "Error: " + e.getMessage());
        e.printStackTrace();
        errorImage = new Mat(imageSize, CvType.CV_8UC4);
        Core.putText(errorImage, "Error: " + e.getMessage(), new Point(50, 50), Constants.FontFace, 2,
                ColorTileEnum.WHITE.cvColor, 2);
        int i = 1;
        for (StackTraceElement element : e.getStackTrace())
            Core.putText(errorImage, element.toString(), new Point(50, 50 + 50 * i++), Constants.FontFace, 2,
                    ColorTileEnum.WHITE.cvColor, 2);
    }

    return annotation.drawAnnotation(image);
}

From source file:org.usfirst.frc.team2084.CMonster2016.vision.Target.java

License:Open Source License

/**
 * Creates a new possible target based on the specified blob and calculates
 * its score.//from w  w  w  . j  av a  2 s  . c o m
 *
 * @param p the shape of the possible target
 */
public Target(MatOfPoint contour, Mat grayImage) {
    // Simplify contour to make the corner finding algorithm work better
    MatOfPoint2f fContour = new MatOfPoint2f();
    contour.convertTo(fContour, CvType.CV_32F);
    Imgproc.approxPolyDP(fContour, fContour, VisionParameters.getGoalApproxPolyEpsilon(), true);
    fContour.convertTo(contour, CvType.CV_32S);

    this.contour = contour;

    // Check area, and don't do any calculations if it is not valid
    if (validArea = validateArea()) {

        // Find a bounding rectangle
        RotatedRect rect = Imgproc.minAreaRect(fContour);

        Point[] rectPoints = new Point[4];
        rect.points(rectPoints);

        for (int j = 0; j < rectPoints.length; j++) {
            Point rectPoint = rectPoints[j];

            double minDistance = Double.MAX_VALUE;
            Point point = null;

            for (int i = 0; i < contour.rows(); i++) {
                Point contourPoint = new Point(contour.get(i, 0));
                double dist = distance(rectPoint, contourPoint);
                if (dist < minDistance) {
                    minDistance = dist;
                    point = contourPoint;
                }
            }

            rectPoints[j] = point;
        }
        MatOfPoint2f rectMat = new MatOfPoint2f(rectPoints);
        // Refine the corners to improve accuracy
        Imgproc.cornerSubPix(grayImage, rectMat, new Size(4, 10), new Size(-1, -1),
                new TermCriteria(TermCriteria.EPS + TermCriteria.COUNT, 30, 0.1));
        rectPoints = rectMat.toArray();

        // Identify each corner
        SortedMap<Double, List<Point>> x = new TreeMap<>();
        Arrays.stream(rectPoints).forEach((p) -> {
            List<Point> points;
            if ((points = x.get(p.x)) == null) {
                x.put(p.x, points = new LinkedList<>());
            }
            points.add(p);
        });

        int i = 0;
        for (Iterator<List<Point>> it = x.values().iterator(); it.hasNext();) {
            List<Point> s = it.next();

            for (Point p : s) {
                switch (i) {
                case 0:
                    topLeft = p;
                    break;
                case 1:
                    bottomLeft = p;
                    break;
                case 2:
                    topRight = p;
                    break;
                case 3:
                    bottomRight = p;
                }
                i++;
            }
        }

        // Organize corners
        if (topLeft.y > bottomLeft.y) {
            Point p = bottomLeft;
            bottomLeft = topLeft;
            topLeft = p;
        }

        if (topRight.y > bottomRight.y) {
            Point p = bottomRight;
            bottomRight = topRight;
            topRight = p;
        }

        // Create corners for centroid calculation
        corners = new MatOfPoint2f(rectPoints);

        // Calculate center
        Moments moments = Imgproc.moments(corners);
        center = new Point(moments.m10 / moments.m00, moments.m01 / moments.m00);

        // Put the points in the correct order for solvePNP
        rectPoints[0] = topLeft;
        rectPoints[1] = topRight;
        rectPoints[2] = bottomLeft;
        rectPoints[3] = bottomRight;
        // Recreate corners in the new order
        corners = new MatOfPoint2f(rectPoints);

        widthTop = distance(topLeft, topRight);
        widthBottom = distance(bottomLeft, bottomRight);
        width = (widthTop + widthBottom) / 2.0;
        heightLeft = distance(topLeft, bottomLeft);
        heightRight = distance(topRight, bottomRight);
        height = (heightLeft + heightRight) / 2.0;

        Mat tvec = new Mat();

        // Calculate target's location
        Calib3d.solvePnP(OBJECT_POINTS, corners, CAMERA_MAT, DISTORTION_MAT, rotation, tvec, false,
                Calib3d.CV_P3P);

        // =======================================
        // Position and Orientation Transformation
        // =======================================

        double armAngle = VisionResults.getArmAngle();

        // Flip y axis to point upward
        Core.multiply(tvec, SIGN_NORMALIZATION_MATRIX, tvec);

        // Shift origin to arm pivot point, on the robot's centerline
        CoordinateMath.translate(tvec, CAMERA_X_OFFSET, CAMERA_Y_OFFSET, ARM_LENGTH);

        // Align axes with ground
        CoordinateMath.rotateX(tvec, -armAngle);
        Core.add(rotation, new MatOfDouble(armAngle, 0, 0), rotation);

        // Shift origin to robot center of rotation
        CoordinateMath.translate(tvec, 0, ARM_PIVOT_Y_OFFSET, -ARM_PIVOT_Z_OFFSET);

        double xPosFeet = tvec.get(0, 0)[0];
        double yPosFeet = tvec.get(1, 0)[0];
        double zPosFeet = tvec.get(2, 0)[0];

        // Old less effective aiming heading and distance calculation
        // double pixelsToFeet = TARGET_WIDTH / width;

        // distance = (TARGET_WIDTH * HighGoalProcessor.IMAGE_SIZE.width
        // / (2 * width ** Math.tan(VisionParameters.getFOVAngle() / 2)));
        // double xPosFeet = (center.x - (HighGoalProcessor.IMAGE_SIZE.width
        // / 2)) * pixelsToFeet;
        // double yPosFeet = -(center.y -
        // (HighGoalProcessor.IMAGE_SIZE.height / 2)) * pixelsToFeet;

        distance = Math.sqrt(xPosFeet * xPosFeet + zPosFeet * zPosFeet);

        position = new Point3(xPosFeet, yPosFeet, zPosFeet);

        xGoalAngle = Math.atan(xPosFeet / zPosFeet);
        yGoalAngle = Math.atan(yPosFeet / zPosFeet);

        validate();
        score = calculateScore();
    } else {
        valid = false;
    }
}