Example usage for org.opencv.core Mat Mat

List of usage examples for org.opencv.core Mat Mat

Introduction

In this page you can find the example usage for org.opencv.core Mat Mat.

Prototype

public Mat() 

Source Link

Usage

From source file:M.java

/**
 * Call the real-time camera and resize the image to the size of
 * WIDTH*HEIGHT. The resized image is stored in the folder "img_resized".
 *
 * @throws Exception/*from   w w  w  . j  a v  a 2s. c  o  m*/
 */
public static String realtimeCamera() throws Exception {
    System.out.println("Camera is called!");
    String destPath = "";
    System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
    //or ...     System.loadLibrary("opencv_java244");       
    //make the JFrame
    JFrame frame = new JFrame("WebCam Capture - Face detection");
    frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);

    FaceDetector fd = new FaceDetector();
    FacePanel facePanel = new FacePanel();

    frame.setSize(400, 400);
    frame.setBackground(Color.BLUE);
    frame.add(facePanel, BorderLayout.CENTER);
    //        
    frame.setVisible(true);
    facePanel.setVisible(true);
    facePanel.validate();

    //        Thread t = new Thread();
    //Open and Read from the video stream  
    Mat webcam_image = new Mat();
    VideoCapture webCam = new VideoCapture(0);
    if (webCam.isOpened()) {
        //            Thread.sleep(500); /// This one-time delay allows the Webcam to initialize itself  
        while (M.flag) {
            webCam.read(webcam_image);
            if (!webcam_image.empty()) {
                //                    Thread.sleep(200); /// This delay eases the computational load .. with little performance leakage
                System.out.println("CAMERA: " + Thread.currentThread());
                frame.setSize(webcam_image.width() + 40, webcam_image.height() + 60);
                //Apply the classifier to the captured image  
                Mat temp = webcam_image;
                temp = fd.detect(webcam_image);
                //Display the image --------BUG
                facePanel.matToBufferedImage(temp);
                System.out.println("Image buffered!");
                facePanel.repaint();
                System.out.println("Panel repainted!");
                System.out.println(facePanel.isVisible());
                //                    System.out.println("visibility:"+facePanel.isVisible());//true
                //                    System.out.println("enabled?"+facePanel.isEnabled());//true
                //                    System.out.println("validity?"+facePanel.isValid());//true
                MatOfByte mb = new MatOfByte();
                Highgui.imencode(".jpg", webcam_image, mb);
                BufferedImage image = ImageIO.read(new ByteArrayInputStream(mb.toArray()));
                destPath = "build\\classes\\cam_img\\capture.jpg";
                File file = new File(destPath);
                ImageIO.write(image, "JPEG", file);

            } else {
                System.out.println(" --(!) No captured frame from webcam !");
                break;
            }
        }
    }
    webCam.release(); //release the webcam
    String imgPath = resize(destPath);
    flag = true;
    frame.dispose();
    return imgPath;
}

From source file:abc.RomanCharacterPicture.java

public int evaluatePicture() {
    try {//ww w  .j  a va 2  s .c  om
        ITesseract instance = new Tesseract();

        MatToBufImg webcamImageBuff = new MatToBufImg();

        webcamImageBuff.setMatrix(webcam_image, ".jpg");
        double heightRatio = (double) webcamImageBuff.getBufferedImage().getHeight()
                / (double) webcam_image.height();
        double widthRatio = (double) webcamImageBuff.getBufferedImage().getWidth()
                / (double) webcam_image.width();
        int x1 = this.leftRectangle.getxPos();
        int y1 = this.leftRectangle.getyPos();
        int x2 = this.rightRectangle.getxPos();
        int y2 = this.rightRectangle.getyPos();
        Rect rect = new Rect(leftRectangle.getxPos(), leftRectangle.getyPos(),
                (rightRectangle.getxPos() - leftRectangle.getxPos()),
                (rightRectangle.getyPos() - leftRectangle.getyPos()));
        //Rect rect = new Rect(new Point(leftRectangle.getxPos(), leftRectangle.getyPos()), new Point(leftRectangle.getxPos(), rightRectangle.getyPos()), , (rightRectangle.getxPos()-leftRectangle.getxPos()));
        Mat subImageMat = webcam_image.submat(rect);

        BufferedImage romanCharacter = webcamImageBuff.getBufferedImage().getSubimage((int) (x1 * widthRatio),
                (int) (y1 * heightRatio), (int) (widthRatio * (x2 - x1)), (int) (heightRatio * (y2 - y1)));

        //int[] pixels = ((DataBufferInt) romanCharacter.getRaster().getDataBuffer()).getData();
        //Mat subImageMat = new Mat(romanCharacter.getHeight(), romanCharacter.getWidth(), CvType.CV_8UC3);
        //subImageMat.put(0, 0, pixels);

        Mat hsv_image = new Mat();
        Imgproc.cvtColor(subImageMat, hsv_image, Imgproc.COLOR_BGR2HSV);

        Mat lower_black_hue_range = new Mat();
        Mat upper_black_hue_range = new Mat();

        Core.inRange(hsv_image, new Scalar(0, 0, 0), new Scalar(180, 255, 30), lower_black_hue_range);
        Core.inRange(hsv_image, new Scalar(0, 0, 20), new Scalar(180, 255, 40), upper_black_hue_range);

        Mat black_hue_image = new Mat();
        Core.addWeighted(lower_black_hue_range, 1.0, upper_black_hue_range, 1.0, 0.0, black_hue_image);

        Imgproc.GaussianBlur(black_hue_image, black_hue_image, new Size(9, 9), 2, 2);

        MatToBufImg blackImageBuff = new MatToBufImg();

        blackImageBuff.setMatrix(black_hue_image, ".jpg");
        BufferedImage test = blackImageBuff.getBufferedImage();

        //ImageIO.write(test, "PNG", new FileOutputStream((Math.round(Math.random()*1000))+"dst.png"));
        String result = instance.doOCR(test);
        int counterI = 0;
        for (int i = 0; i < result.length(); i++) {
            if (result.charAt(i) == 'I' || result.charAt(i) == 'l' || result.charAt(i) == '1'
                    || result.charAt(i) == 'i' || result.charAt(i) == 'L' || result.charAt(i) == 'j'
                    || result.charAt(i) == 'J') {
                counterI++;
            }
        }

        int counterV = 0;
        for (int i = 0; i < result.length(); i++) {
            if (result.charAt(i) == 'V' || result.charAt(i) == 'v' || result.charAt(i) == 'W'
                    || result.charAt(i) == 'w' || result.contains("\\//")) {
                counterV++;
            }
        }
        //System.out.println("Result: "+result+ " calc:" + (counterI + (counterV * 5)));
        return (counterI + (counterV * 5));
    } catch (Exception ex) {
        //System.out.println(ex.getMessage());
        ex.printStackTrace();
        return 0;
    }

}

From source file:ac.robinson.ticqr.TickBoxImageParserTask.java

License:Apache License

@Override
protected ArrayList<PointF> doInBackground(Void... unused) {
    Log.d(TAG, "Searching for tick boxes of " + mBoxSize + " size");

    // we look for *un-ticked* boxes, rather than ticked, as they are uniform in appearance (and hence easier to
    // detect) - they show up as a box within a box
    ArrayList<PointF> centrePoints = new ArrayList<>();
    int minimumOuterBoxArea = (int) Math.round(Math.pow(mBoxSize, 2));
    int maximumOuterBoxArea = (int) Math.round(Math.pow(mBoxSize * 1.35f, 2));
    int minimumInnerBoxArea = (int) Math.round(Math.pow(mBoxSize * 0.5f, 2));

    // image adjustment - blurSize, blurSTDev and adaptiveThresholdSize must not be even numbers
    int blurSize = 9;
    int blurSTDev = 3;
    int adaptiveThresholdSize = Math.round(mBoxSize * 3); // (oddness ensured below)
    int adaptiveThresholdC = 4; // value to add to the mean (can be negative or zero)
    adaptiveThresholdSize = adaptiveThresholdSize % 2 == 0 ? adaptiveThresholdSize + 1 : adaptiveThresholdSize;

    // how similar the recognised polygon must be to its actual contour - lower is more similar
    float outerPolygonSimilarity = 0.045f;
    float innerPolygonSimilarity = 0.075f; // don't require as much accuracy for the inner part of the tick box

    // how large the maximum internal angle can be (e.g., for checking square shape)
    float maxOuterAngleCos = 0.3f;
    float maxInnerAngleCos = 0.4f;

    // use OpenCV to recognise boxes that have a box inside them - i.e. an un-ticked tick box
    // see: http://stackoverflow.com/a/11427501
    // Bitmap newBitmap = mBitmap.copy(Bitmap.Config.RGB_565, true); // not needed
    Mat bitMat = new Mat();
    Utils.bitmapToMat(mBitmap, bitMat);//from  w  ww .  j av a  2  s  .  co  m

    // blur and convert to grey
    // alternative (less flexible): Imgproc.medianBlur(bitMat, bitMat, blurSize);
    Imgproc.GaussianBlur(bitMat, bitMat, new Size(blurSize, blurSize), blurSTDev, blurSTDev);
    Imgproc.cvtColor(bitMat, bitMat, Imgproc.COLOR_RGB2GRAY); // need 8uC1 (1 channel, unsigned char) image type

    // perform adaptive thresholding to detect edges
    // alternative (slower): Imgproc.Canny(bitMat, bitMat, 10, 20, 3, false);
    Imgproc.adaptiveThreshold(bitMat, bitMat, 255, Imgproc.ADAPTIVE_THRESH_GAUSSIAN_C, Imgproc.THRESH_BINARY,
            adaptiveThresholdSize, adaptiveThresholdC);

    // get the contours in the image, and their hierarchy
    Mat hierarchyMat = new Mat();
    List<MatOfPoint> contours = new ArrayList<>();
    Imgproc.findContours(bitMat, contours, hierarchyMat, Imgproc.RETR_TREE, Imgproc.CHAIN_APPROX_SIMPLE);
    if (DEBUG) {
        Imgproc.drawContours(bitMat, contours, -1, new Scalar(30, 255, 255), 1);
    }

    // parse the contours and look for a box containing another box, with similar enough sizes
    int numContours = contours.size();
    ArrayList<Integer> searchedContours = new ArrayList<>();
    Log.d(TAG, "Found " + numContours + " possible tick box areas");
    if (numContours > 0 && !hierarchyMat.empty()) {
        for (int i = 0; i < numContours; i++) {

            // the original detected contour
            MatOfPoint boxPoints = contours.get(i);

            // hierarchy key: 0 = next sibling num, 1 = previous sibling num, 2 = first child num, 3 = parent num
            int childBox = (int) hierarchyMat.get(0, i)[2]; // usually the largest child (as we're doing RETR_TREE)
            if (childBox == -1) { // we only want elements that have children
                continue;
            } else {
                if (searchedContours.contains(childBox)) {
                    if (DEBUG) {
                        Log.d(TAG, "Ignoring duplicate box at first stage: " + childBox);
                    }
                    continue;
                } else {
                    searchedContours.add(childBox);
                }
            }

            // discard smaller (i.e. noise) outer box areas as soon as possible for speed
            // used to do Imgproc.isContourConvex(outerPoints) later, but the angle check covers this, so no need
            double originalArea = Math.abs(Imgproc.contourArea(boxPoints));
            if (originalArea < minimumOuterBoxArea) {
                // if (DEBUG) {
                // drawPoints(bitMat, boxPoints, new Scalar(255, 255, 255), 1);
                // Log.d(TAG, "Outer box too small");
                // }
                continue;
            }
            if (originalArea > maximumOuterBoxArea) {
                // if (DEBUG) {
                // drawPoints(bitMat, boxPoints, new Scalar(255, 255, 255), 1);
                // Log.d(TAG, "Outer box too big");
                // }
                continue;
            }

            // simplify the contours of the outer box - we want to detect four-sided shapes only
            MatOfPoint2f boxPoints2f = new MatOfPoint2f(boxPoints.toArray()); // Point2f for approxPolyDP
            Imgproc.approxPolyDP(boxPoints2f, boxPoints2f,
                    outerPolygonSimilarity * Imgproc.arcLength(boxPoints2f, true), true); // simplify the contour
            if (boxPoints2f.height() != 4) { // height is number of points
                if (DEBUG) {
                    // drawPoints(bitMat, new MatOfPoint(boxPoints2f.toArray()), new Scalar(255, 255, 255), 1);
                    Log.d(TAG, "Outer box not 4 points");
                }
                continue;
            }

            // check that the simplified outer box is approximately a square, angle-wise
            org.opencv.core.Point[] boxPointsArray = boxPoints2f.toArray();
            double maxCosine = 0;
            for (int j = 0; j < 4; j++) {
                org.opencv.core.Point pL = boxPointsArray[j];
                org.opencv.core.Point pIntersect = boxPointsArray[(j + 1) % 4];
                org.opencv.core.Point pR = boxPointsArray[(j + 2) % 4];
                getLineAngle(pL, pIntersect, pR);
                maxCosine = Math.max(maxCosine, getLineAngle(pL, pIntersect, pR));
            }
            if (maxCosine > maxOuterAngleCos) {
                if (DEBUG) {
                    // drawPoints(bitMat, new MatOfPoint(boxPoints2f.toArray()), new Scalar(255, 255, 255), 1);
                    Log.d(TAG, "Outer angles not square enough");
                }
                continue;
            }

            // check that the simplified outer box is approximately a square, line length-wise
            double minLine = Double.MAX_VALUE;
            double maxLine = 0;
            for (int p = 1; p < 4; p++) {
                org.opencv.core.Point p1 = boxPointsArray[p - 1];
                org.opencv.core.Point p2 = boxPointsArray[p];
                double xd = p1.x - p2.x;
                double yd = p1.y - p2.y;
                double lineLength = Math.sqrt((xd * xd) + (yd * yd));
                minLine = Math.min(minLine, lineLength);
                maxLine = Math.max(maxLine, lineLength);
            }
            if (maxLine - minLine > minLine) {
                if (DEBUG) {
                    // drawPoints(bitMat, new MatOfPoint(boxPoints2f.toArray()), new Scalar(255, 255, 255), 1);
                    Log.d(TAG, "Outer lines not square enough");
                }
                continue;
            }

            // draw the outer box if debugging
            if (DEBUG) {
                MatOfPoint debugBoxPoints = new MatOfPoint(boxPointsArray);
                Log.d(TAG,
                        "Potential tick box: " + boxPoints2f.size() + ", " + "area: "
                                + Math.abs(Imgproc.contourArea(debugBoxPoints)) + " (min:" + minimumOuterBoxArea
                                + ", max:" + maximumOuterBoxArea + ")");
                drawPoints(bitMat, debugBoxPoints, new Scalar(50, 255, 255), 2);
            }

            // loop through the children - they should be in descending size order, but sometimes this is wrong
            boolean wrongBox = false;
            while (true) {
                if (DEBUG) {
                    Log.d(TAG, "Looping with box: " + childBox);
                }

                // we've previously tried a child - try the next one
                // key: 0 = next sibling num, 1 = previous sibling num, 2 = first child num, 3 = parent num
                if (wrongBox) {
                    childBox = (int) hierarchyMat.get(0, childBox)[0];
                    if (childBox == -1) {
                        break;
                    }
                    if (searchedContours.contains(childBox)) {
                        if (DEBUG) {
                            Log.d(TAG, "Ignoring duplicate box at loop stage: " + childBox);
                        }
                        break;
                    } else {
                        searchedContours.add(childBox);
                    }
                    //noinspection UnusedAssignment
                    wrongBox = false;
                }

                // perhaps this is the outer box - check its child has no children itself
                // (removed so tiny children (i.e. noise) don't mean we mis-detect an un-ticked box as ticked)
                // if (hierarchyMat.get(0, childBox)[2] != -1) {
                // continue;
                // }

                // check the size of the child box is large enough
                boxPoints = contours.get(childBox);
                originalArea = Math.abs(Imgproc.contourArea(boxPoints));
                if (originalArea < minimumInnerBoxArea) {
                    if (DEBUG) {
                        // drawPoints(bitMat, boxPoints, new Scalar(255, 255, 255), 1);
                        Log.d(TAG, "Inner box too small");
                    }
                    wrongBox = true;
                    continue;
                }

                // simplify the contours of the inner box - again, we want four-sided shapes only
                boxPoints2f = new MatOfPoint2f(boxPoints.toArray());
                Imgproc.approxPolyDP(boxPoints2f, boxPoints2f,
                        innerPolygonSimilarity * Imgproc.arcLength(boxPoints2f, true), true);
                if (boxPoints2f.height() != 4) { // height is number of points
                    // if (DEBUG) {
                    // drawPoints(bitMat, boxPoints, new Scalar(255, 255, 255), 1);
                    // }
                    Log.d(TAG, "Inner box fewer than 4 points"); // TODO: allow > 4 for low quality images?
                    wrongBox = true;
                    continue;
                }

                // check that the simplified inner box is approximately a square, angle-wise
                // higher tolerance because noise means if we get several inners, the box may not be quite square
                boxPointsArray = boxPoints2f.toArray();
                maxCosine = 0;
                for (int j = 0; j < 4; j++) {
                    org.opencv.core.Point pL = boxPointsArray[j];
                    org.opencv.core.Point pIntersect = boxPointsArray[(j + 1) % 4];
                    org.opencv.core.Point pR = boxPointsArray[(j + 2) % 4];
                    getLineAngle(pL, pIntersect, pR);
                    maxCosine = Math.max(maxCosine, getLineAngle(pL, pIntersect, pR));
                }
                if (maxCosine > maxInnerAngleCos) {
                    Log.d(TAG, "Inner angles not square enough");
                    wrongBox = true;
                    continue;
                }

                // this is probably an inner box - log if debugging
                if (DEBUG) {
                    Log.d(TAG,
                            "Un-ticked inner box: " + boxPoints2f.size() + ", " + "area: "
                                    + Math.abs(Imgproc.contourArea(new MatOfPoint2f(boxPointsArray)))
                                    + " (min: " + minimumInnerBoxArea + ")");
                }

                // find the inner box centre
                double centreX = (boxPointsArray[0].x + boxPointsArray[1].x + boxPointsArray[2].x
                        + boxPointsArray[3].x) / 4f;
                double centreY = (boxPointsArray[0].y + boxPointsArray[1].y + boxPointsArray[2].y
                        + boxPointsArray[3].y) / 4f;

                // draw the inner box if debugging
                if (DEBUG) {
                    drawPoints(bitMat, new MatOfPoint(boxPointsArray), new Scalar(255, 255, 255), 1);
                    Core.circle(bitMat, new org.opencv.core.Point(centreX, centreY), 3,
                            new Scalar(255, 255, 255));
                }

                // add to the list of boxes to check
                centrePoints.add(new PointF((float) centreX, (float) centreY));
                break;
            }
        }
    }

    Log.d(TAG, "Found " + centrePoints.size() + " un-ticked boxes");
    return centrePoints;
}

From source file:acseg.reconocimiento.matriculas.FXMLReconocimientoMatriculasController.java

/**
 * Get a frame from the opened video stream (if any)
 *
 * @return the {@link Mat} to show/*w w w.  j  a v a  2 s .  c  o  m*/
 */
private Mat grabFrame() {
    //Iniciamos el frme
    Mat frame = new Mat();

    //Verificamos si la camara esta activa para grabar
    if (this.capture.isOpened()) {
        try {
            //leemos el frame actual
            this.capture.read(frame);

            //si el frame no esta vacio lo procesa
            if (!frame.empty()) {
                fm = frame;
            }

        } catch (Exception e) {
            System.err.println("Exception during the image elaboration: " + e);
        }
    }
    return frame;
}

From source file:android.google.com.basiccamera.imageprocessing.CannyEdgeDetector.java

License:BSD License

protected void runTask() {
    Log.i(TAG, "Starting heavy image processing task");
    while (running) {
        Long begin, end;//w ww  . ja  v  a2s  .  co m
        //begin = System.currentTimeMillis();

        // requests picture and blocks till it receives one
        mTaskManager.requestPreviewFrame();
        //end = System.currentTimeMillis();
        //Log.i(TAG, "Process took " + String.valueOf(end - begin) + " ms");
        byte[] image = getImage();
        if (image == null) {
            Log.w(TAG, "Received null as picture");
        }

        // do Canny edge detection
        Mat img = new Mat();
        Bitmap bmp = BitmapFactory.decodeByteArray(image, 0, image.length);
        Utils.bitmapToMat(bmp, img);
        Imgproc.cvtColor(img, img, Imgproc.COLOR_RGB2GRAY);
        Imgproc.blur(img, img, new Size(3, 3));
        Imgproc.Canny(img, img, 20, 100);
        Utils.matToBitmap(img, bmp);

        mTaskManager.drawResult(bmp);
    }
    return;
}

From source file:app.device.manager.Source.java

/**
 * /*from w w w  .j  a v  a  2  s.  co m*/
 */
public Source() {
    System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
    this.device = new VideoCapture(0);
    this.frame = new Mat();
    this.buffer = new MatOfByte();
}

From source file:app.device.manager.Source.java

/**
 * //  www .  j  a v a 2 s.c  o  m
 * @param filename 
 */
public Source(String filename) {
    System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
    this.device = new VideoCapture(filename);
    this.frame = new Mat();
    this.buffer = new MatOfByte();
}

From source file:arlocros.ComputePose.java

License:Apache License

public boolean computePose(Mat rvec, Mat tvec, Mat image2) throws NyARException, FileNotFoundException {
    // convert image to NyAR style for processing
    final INyARRgbRaster imageRaster = NyARImageHelper.createFromMat(image2);

    // create new marker system configuration
    i_config = new NyARMarkerSystemConfig(i_param);
    markerSystemState = new NyARMarkerSystem(i_config);
    // Create wrapper that passes cam pictures to marker system
    cameraSensorWrapper = new NyARSensor(i_screen_size);
    ids = new int[markerPatterns.size()];
    patternmap = new HashMap<>();
    for (int i = 0; i < markerPatterns.size(); i++) {
        // create marker description from pattern file and add to marker
        // system
        ids[i] = markerSystemState.addARMarker(arCodes.get(i), 25, markerConfig.getMarkerSize());
        patternmap.put(ids[i], markerPatterns.get(i));
    }/*www. j  a  v a2 s  .  co m*/

    cameraSensorWrapper.update(imageRaster);
    markerSystemState.update(cameraSensorWrapper);

    // init 3D point list
    final List<Point3> points3dlist = new ArrayList<>();
    final List<Point> points2dlist = new ArrayList<>();

    for (final int id : ids) {
        // process only if this marker has been detected
        if (markerSystemState.isExistMarker(id) && markerSystemState.getConfidence(id) > 0.7) {
            // read and add 2D points
            final NyARIntPoint2d[] vertex2d = markerSystemState.getMarkerVertex2D(id);
            Point p = new Point(vertex2d[0].x, vertex2d[0].y);
            points2dlist.add(p);
            p = new Point(vertex2d[1].x, vertex2d[2].y);
            points2dlist.add(p);
            p = new Point(vertex2d[2].x, vertex2d[2].y);
            points2dlist.add(p);
            p = new Point(vertex2d[3].x, vertex2d[3].y);
            points2dlist.add(p);

            //            final MatOfPoint mop = new MatOfPoint();
            //            mop.fromList(points2dlist);
            //            final List<MatOfPoint> pts = new ArrayList<>();
            //            pts.add(mop);

            // read and add corresponding 3D points
            points3dlist.addAll(markerConfig.create3dpointlist(patternmap.get(id)));

            if (visualization) {
                // draw red rectangle around detected marker
                Core.rectangle(image2, new Point(vertex2d[0].x, vertex2d[0].y),
                        new Point(vertex2d[2].x, vertex2d[2].y), new Scalar(0, 0, 255));
                final String markerFile = patternmap.get(id).replaceAll(".*4x4_", "").replace(".patt", "");
                Core.putText(image2, markerFile,
                        new Point((vertex2d[2].x + vertex2d[0].x) / 2.0, vertex2d[0].y - 5), 4, 1,
                        new Scalar(250, 0, 0));
            }
        }

    }
    // load 2D and 3D points to Mats for solvePNP
    final MatOfPoint3f objectPoints = new MatOfPoint3f();
    objectPoints.fromList(points3dlist);
    final MatOfPoint2f imagePoints = new MatOfPoint2f();
    imagePoints.fromList(points2dlist);

    if (visualization) {
        // show image with markers detected
        Imshow.show(image2);
    }

    // do not call solvePNP with empty intput data (no markers detected)
    if (points2dlist.size() == 0) {
        objectPoints.release();
        imagePoints.release();
        return false;
    }

    // uncomment these lines if using RANSAC-based pose estimation (more
    // shaking)
    Mat inliers = new Mat();

    Calib3d.solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec, false, 300, 5, 16,
            inliers, Calib3d.CV_P3P);
    ArMarkerPoseEstimator.getLog()
            .info("Points detected: " + points2dlist.size() + " inliers: " + inliers.size());

    objectPoints.release();
    imagePoints.release();

    // avoid publish zero pose if localization failed
    if (inliers.rows() == 0) {
        inliers.release();
        return false;
    }

    inliers.release();
    return true;
}

From source file:at.uniklu.itec.videosummary.Summarize.java

License:GNU General Public License

private double getImageSharpness(File frame) {
    Mat img = Highgui.imread(frame.getAbsolutePath(), 0);
    Mat dx, dy;/*from  w w  w.jav a2s  . c  o m*/
    dx = new Mat();
    dy = new Mat();
    Imgproc.Sobel(img, dx, CvType.CV_32F, 1, 0);
    Imgproc.Sobel(img, dy, CvType.CV_32F, 0, 1);
    Core.magnitude(dx, dy, dx);
    Scalar sum = Core.sumElems(dx);
    img.release();
    dx.release();
    dy.release();
    System.gc();
    System.gc();
    System.gc();
    //System.out.println("Sum of gradients= "+sum);
    return (sum.val[0]);
}

From source file:attendance_system_adder.cv.image.java

public Mat RGBtoGRAY(Mat mat) {
    Mat gray = new Mat();

    Imgproc.cvtColor(mat, gray, COLOR_RGB2GRAY);

    return gray;
}