Example usage for org.opencv.core Mat cols

List of usage examples for org.opencv.core Mat cols

Introduction

In this page you can find the example usage for org.opencv.core Mat cols.

Prototype

public int cols() 

Source Link

Usage

From source file:org.sleuthkit.autopsy.coreutils.VideoUtils.java

License:Open Source License

@NbBundle.Messages({ "# {0} - file name",
        "VideoUtils.genVideoThumb.progress.text=extracting temporary file {0}" })
static BufferedImage generateVideoThumbnail(AbstractFile file, int iconSize) {
    java.io.File tempFile = getTempVideoFile(file);
    if (tempFile.exists() == false || tempFile.length() < file.getSize()) {
        ProgressHandle progress = ProgressHandle
                .createHandle(Bundle.VideoUtils_genVideoThumb_progress_text(file.getName()));
        progress.start(100);/*from   w  w  w.j ava 2  s .co m*/
        try {
            Files.createParentDirs(tempFile);
            ContentUtils.writeToFile(file, tempFile, progress, null, true);
        } catch (IOException ex) {
            LOGGER.log(Level.WARNING,
                    "Error extracting temporary file for " + ImageUtils.getContentPathSafe(file), ex); //NON-NLS
        } finally {
            progress.finish();
        }
    }

    VideoCapture videoFile = new VideoCapture(); // will contain the video

    if (!videoFile.open(tempFile.toString())) {
        LOGGER.log(Level.WARNING, "Error opening {0} for preview generation.",
                ImageUtils.getContentPathSafe(file)); //NON-NLS
        return null;
    }
    double fps = videoFile.get(CV_CAP_PROP_FPS); // gets frame per second
    double totalFrames = videoFile.get(CV_CAP_PROP_FRAME_COUNT); // gets total frames
    if (fps <= 0 || totalFrames <= 0) {
        LOGGER.log(Level.WARNING, "Error getting fps or total frames for {0}",
                ImageUtils.getContentPathSafe(file)); //NON-NLS
        return null;
    }
    double milliseconds = 1000 * (totalFrames / fps); //total milliseconds

    double timestamp = Math.min(milliseconds, 500); //default time to check for is 500ms, unless the files is extremely small

    int framkeskip = Double.valueOf(Math.floor((milliseconds - timestamp) / (THUMB_COLUMNS * THUMB_ROWS)))
            .intValue();

    Mat imageMatrix = new Mat();
    BufferedImage bufferedImage = null;

    for (int x = 0; x < THUMB_COLUMNS; x++) {
        for (int y = 0; y < THUMB_ROWS; y++) {
            if (!videoFile.set(CV_CAP_PROP_POS_MSEC,
                    timestamp + x * framkeskip + y * framkeskip * THUMB_COLUMNS)) {
                LOGGER.log(Level.WARNING, "Error seeking to " + timestamp + "ms in {0}",
                        ImageUtils.getContentPathSafe(file)); //NON-NLS
                break; // if we can't set the time, return black for that frame
            }
            //read the frame into the image/matrix
            if (!videoFile.read(imageMatrix)) {
                LOGGER.log(Level.WARNING, "Error reading frames at " + timestamp + "ms from {0}",
                        ImageUtils.getContentPathSafe(file)); //NON-NLS
                break; //if the image for some reason is bad, return black for that frame
            }

            if (bufferedImage == null) {
                bufferedImage = new BufferedImage(imageMatrix.cols() * THUMB_COLUMNS,
                        imageMatrix.rows() * THUMB_ROWS, BufferedImage.TYPE_3BYTE_BGR);
            }

            byte[] data = new byte[imageMatrix.rows() * imageMatrix.cols() * (int) (imageMatrix.elemSize())];
            imageMatrix.get(0, 0, data); //copy the image to data

            //todo: this looks like we are swapping the first and third channels.  so we can use  BufferedImage.TYPE_3BYTE_BGR
            if (imageMatrix.channels() == 3) {
                for (int k = 0; k < data.length; k += 3) {
                    byte temp = data[k];
                    data[k] = data[k + 2];
                    data[k + 2] = temp;
                }
            }

            bufferedImage.getRaster().setDataElements(imageMatrix.cols() * x, imageMatrix.rows() * y,
                    imageMatrix.cols(), imageMatrix.rows(), data);
        }
    }

    videoFile.release(); // close the file

    return bufferedImage == null ? null : ScalrWrapper.resizeFast(bufferedImage, iconSize);
}

From source file:org.surmon.pattern.detector.houghcircle.HoughCircleDetector.java

/**
 * {@inheritDoc}/*w  w  w  .  ja  v  a  2s  . co m*/
 * 
 * Runs detection of circles in image. Hough detector parameters are taken
 * from declared fields. This method detects circle particles in given image.
 */
@Override
public List<CircleParticle> detectIn(PatternImage image) {
    List<CircleParticle> circles = new ArrayList<>();
    Mat mat = image.getPixels();
    Mat rawCircles = new Mat();

    Imgproc.GaussianBlur(mat, mat, new Size(ksize, ksize), sigma);
    Imgproc.HoughCircles(mat, rawCircles, Imgproc.CV_HOUGH_GRADIENT, dp, minDist, param1, param2, minRadius,
            maxRadius);

    // creates particle and assignes to image
    for (int i = 0; i < rawCircles.cols(); i++) {
        double[] var = rawCircles.get(0, i);
        CircleParticle circle = new CircleParticle(i, var[0], var[1], var[2]);
        circles.add(circle);
    }

    return circles;
}

From source file:overwatchteampicker.OverwatchTeamPicker.java

public static ReturnValues findImage(String template, String source, int flag) {
    File lib = null;//  w  w  w. j  a  v  a2 s .co  m
    BufferedImage image = null;
    try {
        image = ImageIO.read(new File(source));
    } catch (Exception e) {
        e.printStackTrace();
    }

    String os = System.getProperty("os.name");
    String bitness = System.getProperty("sun.arch.data.model");

    if (os.toUpperCase().contains("WINDOWS")) {
        if (bitness.endsWith("64")) {
            lib = new File("C:\\Users\\POWERUSER\\Downloads\\opencv\\build\\java\\x64\\"
                    + System.mapLibraryName("opencv_java2413"));
        } else {
            lib = new File("libs//x86//" + System.mapLibraryName("opencv_java2413"));
        }
    }
    System.load(lib.getAbsolutePath());
    String tempObject = "images\\hero_templates\\" + template + ".png";
    String source_pic = source;
    Mat objectImage = Highgui.imread(tempObject, Highgui.CV_LOAD_IMAGE_GRAYSCALE);
    Mat sceneImage = Highgui.imread(source_pic, Highgui.CV_LOAD_IMAGE_GRAYSCALE);

    MatOfKeyPoint objectKeyPoints = new MatOfKeyPoint();
    FeatureDetector featureDetector = FeatureDetector.create(FeatureDetector.SURF);
    featureDetector.detect(objectImage, objectKeyPoints);
    KeyPoint[] keypoints = objectKeyPoints.toArray();
    MatOfKeyPoint objectDescriptors = new MatOfKeyPoint();
    DescriptorExtractor descriptorExtractor = DescriptorExtractor.create(DescriptorExtractor.SURF);
    descriptorExtractor.compute(objectImage, objectKeyPoints, objectDescriptors);

    // Create the matrix for output image.
    Mat outputImage = new Mat(objectImage.rows(), objectImage.cols(), Highgui.CV_LOAD_IMAGE_COLOR);
    Scalar newKeypointColor = new Scalar(255, 0, 0);
    Features2d.drawKeypoints(objectImage, objectKeyPoints, outputImage, newKeypointColor, 0);

    // Match object image with the scene image
    MatOfKeyPoint sceneKeyPoints = new MatOfKeyPoint();
    MatOfKeyPoint sceneDescriptors = new MatOfKeyPoint();
    featureDetector.detect(sceneImage, sceneKeyPoints);
    descriptorExtractor.compute(sceneImage, sceneKeyPoints, sceneDescriptors);

    Mat matchoutput = new Mat(sceneImage.rows() * 2, sceneImage.cols() * 2, Highgui.CV_LOAD_IMAGE_COLOR);
    Scalar matchestColor = new Scalar(0, 255, 25);

    List<MatOfDMatch> matches = new LinkedList<MatOfDMatch>();
    DescriptorMatcher descriptorMatcher = DescriptorMatcher.create(DescriptorMatcher.FLANNBASED);
    descriptorMatcher.knnMatch(objectDescriptors, sceneDescriptors, matches, 2);

    LinkedList<DMatch> goodMatchesList = new LinkedList<DMatch>();

    float nndrRatio = .78f;

    for (int i = 0; i < matches.size(); i++) {
        MatOfDMatch matofDMatch = matches.get(i);
        DMatch[] dmatcharray = matofDMatch.toArray();
        DMatch m1 = dmatcharray[0];
        DMatch m2 = dmatcharray[1];

        if (m1.distance <= m2.distance * nndrRatio) {
            goodMatchesList.addLast(m1);

        }
    }

    if (goodMatchesList.size() >= 4) {

        List<KeyPoint> objKeypointlist = objectKeyPoints.toList();
        List<KeyPoint> scnKeypointlist = sceneKeyPoints.toList();

        LinkedList<Point> objectPoints = new LinkedList<>();
        LinkedList<Point> scenePoints = new LinkedList<>();

        for (int i = 0; i < goodMatchesList.size(); i++) {
            objectPoints.addLast(objKeypointlist.get(goodMatchesList.get(i).queryIdx).pt);
            scenePoints.addLast(scnKeypointlist.get(goodMatchesList.get(i).trainIdx).pt);
        }

        MatOfPoint2f objMatOfPoint2f = new MatOfPoint2f();
        objMatOfPoint2f.fromList(objectPoints);
        MatOfPoint2f scnMatOfPoint2f = new MatOfPoint2f();
        scnMatOfPoint2f.fromList(scenePoints);

        Mat homography = Calib3d.findHomography(objMatOfPoint2f, scnMatOfPoint2f, Calib3d.RANSAC, 3);

        Mat obj_corners = new Mat(4, 1, CvType.CV_32FC2);
        Mat scene_corners = new Mat(4, 1, CvType.CV_32FC2);

        obj_corners.put(0, 0, new double[] { 0, 0 });
        obj_corners.put(1, 0, new double[] { objectImage.cols(), 0 });
        obj_corners.put(2, 0, new double[] { objectImage.cols(), objectImage.rows() });
        obj_corners.put(3, 0, new double[] { 0, objectImage.rows() });

        Core.perspectiveTransform(obj_corners, scene_corners, homography);

        Mat img = Highgui.imread(source_pic, Highgui.CV_LOAD_IMAGE_COLOR);

        Core.line(img, new Point(scene_corners.get(0, 0)), new Point(scene_corners.get(1, 0)),
                new Scalar(0, 255, 255), 4);
        Core.line(img, new Point(scene_corners.get(1, 0)), new Point(scene_corners.get(2, 0)),
                new Scalar(255, 255, 0), 4);
        Core.line(img, new Point(scene_corners.get(2, 0)), new Point(scene_corners.get(3, 0)),
                new Scalar(0, 255, 0), 4);
        Core.line(img, new Point(scene_corners.get(3, 0)), new Point(scene_corners.get(0, 0)),
                new Scalar(0, 255, 0), 4);

        MatOfDMatch goodMatches = new MatOfDMatch();
        goodMatches.fromList(goodMatchesList);

        Features2d.drawMatches(objectImage, objectKeyPoints, sceneImage, sceneKeyPoints, goodMatches,
                matchoutput, matchestColor, newKeypointColor, new MatOfByte(), 2);
        if (new Point(scene_corners.get(0, 0)).x < new Point(scene_corners.get(1, 0)).x
                && new Point(scene_corners.get(0, 0)).y < new Point(scene_corners.get(2, 0)).y) {
            System.out.println("found " + template);
            Highgui.imwrite("points.jpg", outputImage);
            Highgui.imwrite("matches.jpg", matchoutput);
            Highgui.imwrite("final.jpg", img);

            if (flag == 0) {
                ReturnValues retVal = null;
                int y = (int) new Point(scene_corners.get(3, 0)).y;
                int yHeight = (int) new Point(scene_corners.get(3, 0)).y
                        - (int) new Point(scene_corners.get(2, 0)).y;
                if (y < image.getHeight() * .6) { //if found hero is in upper half of image then return point 3,0
                    retVal = new ReturnValues(y + (int) (image.getHeight() * .01), yHeight);
                } else { //if found hero is in lower half of image then return point 2,0
                    y = (int) new Point(scene_corners.get(2, 0)).y;
                    retVal = new ReturnValues(y + (int) (image.getHeight() * .3), yHeight);
                }
                return retVal;
            } else if (flag == 1) {
                int[] xPoints = new int[4];
                int[] yPoints = new int[4];

                xPoints[0] = (int) (new Point(scene_corners.get(0, 0)).x);
                xPoints[1] = (int) (new Point(scene_corners.get(1, 0)).x);
                xPoints[2] = (int) (new Point(scene_corners.get(2, 0)).x);
                xPoints[3] = (int) (new Point(scene_corners.get(3, 0)).x);

                yPoints[0] = (int) (new Point(scene_corners.get(0, 0)).y);
                yPoints[1] = (int) (new Point(scene_corners.get(1, 0)).y);
                yPoints[2] = (int) (new Point(scene_corners.get(2, 0)).y);
                yPoints[3] = (int) (new Point(scene_corners.get(3, 0)).y);

                ReturnValues retVal = new ReturnValues(xPoints, yPoints);
                return retVal;

            }
        }
    }
    return null;

}

From source file:pfe.Segmentation.java

@Override
public void dilate_erose_ocr() {
    // pour le ocr
    Mat rgbImage = Imgcodecs.imread(ocrReadFrom);
    Mat destination2 = new Mat(rgbImage.rows(), rgbImage.cols(), rgbImage.type());
    // l objectif et de corriger les erreur de la transformation en noire et blan
    int dilation_size1 = 2;

    // la matrice de la dilatation on cherche a dilater en forme de rectange ( Imgproc.MORPH_RECT )   
    Mat element11 = Imgproc.getStructuringElement(Imgproc.MORPH_ELLIPSE,
            new Size(dilation_size1 + 1, dilation_size1 + 1));
    // on dilate l image
    Imgproc.dilate(rgbImage, destination2, element11);
    Imgcodecs.imwrite(ocrReadFrom, destination2);

}

From source file:pfe.Segmentation.java

/**
 * la dilatation et l erosion nous permette de reparer les forme
 * triangulaire afin de facilit leur detection ainsi elle permet de bien
 * remplir les texts pour qu il sois facilement detectable par les Ocrs
 *//*from ww w.ja  v  a2s . c o  m*/
@Override
public void dilate_erose(JLabel jLabelErosion, JLabel jLabelDilated) {

    // lecture de l image a traiter  Pour la segmentation
    Mat source = Imgcodecs.imread(blackAndWhite, Imgcodecs.CV_LOAD_IMAGE_COLOR);
    // preparation de l image resultante
    Mat destination = new Mat(source.rows(), source.cols(), source.type());

    // taille de l erosion a 1 si petit juste pour corriger les contour
    int erosion_size = 1;
    // taille de dilatation inferieur a la moyenne pour evit la destruction des details et des formes 
    // l objectif et de corriger les erreur de la transformation en noire et blan
    int dilation_size = 3;

    destination = source;

    // la matrice de la dilatation on cherche a dilater en forme de rectange ( Imgproc.MORPH_RECT )   
    Mat element1 = Imgproc.getStructuringElement(Imgproc.MORPH_RECT,
            new Size(dilation_size + 1, dilation_size + 1));
    // on dilate l image
    Imgproc.dilate(source, destination, element1);
    // on sauvegarde    
    Imgcodecs.imwrite(dilation, destination);

    //on va maintenant eroser l image dilat ( repar la finition )
    source = Imgcodecs.imread(dilation, Imgcodecs.CV_LOAD_IMAGE_COLOR);

    destination = source;

    // on repare en ellipse pour repar les coins de l ecriture    
    Mat element = Imgproc.getStructuringElement(Imgproc.MORPH_ELLIPSE,
            new Size(erosion_size + 1, erosion_size + 1));
    Imgproc.erode(source, destination, element);
    Imgcodecs.imwrite(erosion, destination);

    jLabelErosion.setIcon(new ImageIcon(erosion));
    jLabelDilated.setIcon(new ImageIcon(dilation));
}

From source file:pfe.Segmentation.java

@Override
public void toB_A_W(JLabel jLabel) {
    Mat rgbImage = Imgcodecs.imread(original);
    Mat destination = new Mat(rgbImage.rows(), rgbImage.cols(), rgbImage.type());
    // l objectif et de corriger les erreur de la transformation en noire et blan
    int dilation_size = 2;

    // la matrice de la dilatation on cherche a dilater en forme de rectange ( Imgproc.MORPH_RECT ) puis a eroser   
    Mat element1 = Imgproc.getStructuringElement(Imgproc.MORPH_ERODE,
            new Size(dilation_size + 1, dilation_size + 1));
    // on dilate l image puis elle erose
    Imgproc.dilate(rgbImage, destination, element1);

    // on prepare une nouvelle matrice pour l image noire et blan
    Mat labImage = new Mat();
    // ici on va convertir les couleur vers bgr2gray ( noire et blan ) et mettre le resultat dans labimage
    cvtColor(destination, labImage, Imgproc.COLOR_BGR2GRAY);
    Imgcodecs.imwrite(ocrReadFrom, labImage);
    jLabel.setIcon(new ImageIcon(ocrReadFrom));
    // JOptionPane.showConfirmDialog(null, "");
}

From source file:pipeline.TextRegion.java

public static String SplitFiles(File fileIn) {
    String result = "";
    try {//from  w  ww.ja  va 2  s  .  com
        String nomeFile = fileIn.getName();
        //System.out.println("il nome del file  "+nomeFile);
        FileInputStream in = new FileInputStream("src/pipeline/receivedImg/" + nomeFile);
        JPEGImageDecoder decoder = JPEGCodec.createJPEGDecoder(in);
        BufferedImage image = decoder.decodeAsBufferedImage();
        in.close();

        TextRecognition myget = new TextRecognition(image);
        LinkedList boxes = myget.getTextBoxes();

        String nomeFileOut = "src/pipeline/outputImg/" + Global.getJPGNameFile() + " out.jpg";
        FileOutputStream out = new FileOutputStream(nomeFileOut);
        JPEGImageEncoder encoder = JPEGCodec.createJPEGEncoder(out);
        encoder.encode(myget.isolateText(boxes));
        out.close();

        //parte con opencv

        System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
        File f = new File("src/pipeline/receivedImg/" + nomeFile);
        BufferedImage imageFile = ImageIO.read(f);

        byte[] data = ((DataBufferByte) imageFile.getRaster().getDataBuffer()).getData();
        Mat mat = new Mat(imageFile.getHeight(), imageFile.getWidth(), CvType.CV_8UC3);
        mat.put(0, 0, data);
        int tolleranza = 15;

        for (int i = 0; i < boxes.size(); i++) {
            TextRegion app = (TextRegion) boxes.get(i);
            //             System.out.println("RIGA: "+i+"  -> "+app.x1 +" "+app.x2 +" "+app.y1 +" "+app.y2 +" ");
            Rect roi1 = new Rect(app.x1 - tolleranza, app.y1 - tolleranza, app.x2 - app.x1 + tolleranza,
                    app.y2 - app.y1 + 2 * tolleranza);
            Mat mat1 = new Mat(mat, roi1);

            byte[] data1 = new byte[mat1.rows() * mat1.cols() * (int) (mat1.elemSize())];
            mat1.get(0, 0, data1);
            BufferedImage image1 = new BufferedImage(mat1.cols(), mat1.rows(), BufferedImage.TYPE_3BYTE_BGR);
            image1.getRaster().setDataElements(0, 0, mat1.cols(), mat1.rows(), data1);

            String nomeFileUscrita = "src/pipeline/outputImg/" + i + Global.getJPGNameFile() + " uscita.jpg";
            File tmp = new File(nomeFileUscrita);
            File output = new File(nomeFileUscrita);
            ImageIO.write(image1, "jpg", output);
            result += (i + 1) + ")" + OCR_Processing.performOCR_String2Text(output);
            tmp.delete();

        }
        f.delete();
        File foo = new File(nomeFileOut);
        foo.delete();

    } catch (Exception e) {
        System.out.println("Exception: " + e);
    }

    return result;

}

From source file:processdata.ExperimentalDataProcessingUI.java

public static BufferedImage Mat2BufferedImage(Mat m) {
    // source: http://answers.opencv.org/question/10344/opencv-java-load-image-to-gui/
    // Fastest code
    // The output can be assigned either to a BufferedImage or to an Image

    int type = BufferedImage.TYPE_BYTE_GRAY;
    if (m.channels() > 1) {
        type = BufferedImage.TYPE_3BYTE_BGR;
    }/*from  ww w . j a va2  s.  c o  m*/
    int bufferSize = m.channels() * m.cols() * m.rows();
    byte[] b = new byte[bufferSize];
    m.get(0, 0, b); // get all the pixels
    BufferedImage image = new BufferedImage(m.cols(), m.rows(), type);
    final byte[] targetPixels = ((DataBufferByte) image.getRaster().getDataBuffer()).getData();
    System.arraycopy(b, 0, targetPixels, 0, b.length);
    return image;

}

From source file:processdata.ExperimentalDataProcessingUI.java

private void jButtonProcessImageActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jButtonProcessImageActionPerformed
    try {/*from w  w  w .j a  v  a2s. c om*/
        // TODO add your handling code here:
        //load library
        System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
        // TODO add your handling code here:
        folderName = textField2.getText();
        int currentFrameIndex = Integer.parseInt(initialFrameIndexBox.getText()) - 1;
        datasetIndex = Integer.parseInt(textField1.getText());
        String videoImageFileName = "./videoFrames//" + folderName + "//" + "frame_outVideo_"
                + currentFrameIndex + ".jpg";

        String depthFrameFileName = initialImagePath + datasetIndex + "//" + folderName + "//" + "depthData//"
                + "outDepthByte_" + currentFrameIndex;

        rgbFrame = Highgui.imread(videoImageFileName, Highgui.CV_LOAD_IMAGE_GRAYSCALE);

        depthFrame = depthDataProcessingUtilities.processDepthDataFile(depthFrameFileName, jSlider2.getValue(),
                jSlider1.getValue());

        Mat[] backgroundFrames = readBackground();
        rgbBackgroundFrame = backgroundFrames[0];
        depthBackgroundFrame = backgroundFrames[1];

        //subtract depth background
        Mat depthFrameBackgroundSubtracted = new Mat();
        Core.subtract(depthBackgroundFrame, depthFrame, depthFrameBackgroundSubtracted);
        Imgproc.threshold(depthFrameBackgroundSubtracted, depthFrameBackgroundSubtracted, 0, 255,
                Imgproc.THRESH_BINARY);
        displayImage(Mat2BufferedImage(
                videoProcessingUtilities.resizeImage(depthFrameBackgroundSubtracted, new Size(448, 234))),
                depthBckgSubtractedFrames);

        //remove the red-colored elements from depth image and leave only blue ones
        Mat depthImageCleaned = new Mat();
        Core.inRange(depthFrameBackgroundSubtracted, new Scalar(253, 0, 0), new Scalar(255, 0, 0),
                depthImageCleaned);

        //apply morphologic opening to remove noise
        Imgproc.morphologyEx(depthImageCleaned, depthImageCleaned, 2,
                Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(3, 3)));
        displayImage(
                Mat2BufferedImage(videoProcessingUtilities.resizeImage(depthImageCleaned, new Size(448, 234))),
                depthCleanedFramesPanel);

        //apply the homographic transform to cleaned depth image
        Mat hDepthImageCleaned = videoProcessingUtilities.performHomographyTransformation(depthImageCleaned,
                new Size(1920, 1080));

        //extract all contours
        //sort all extracted contours and choose top 2
        //overlay top 2 contours on the image and fill them with white color
        //mask the rgb frame
        // do all necessary rotation operations
        //offer user to save the image

        //extract all suitable contours between MIN and MAX areas:
        MatOfPoint[] contours = videoProcessingUtilities.extractLargestContours(hDepthImageCleaned, 100000,
                160000);
        System.out.println("Number of contorus extracted " + contours.length);

        //draw contours
        List<MatOfPoint> tempContours = new ArrayList<MatOfPoint>();
        Mat hDepthImageCleanedContours = hDepthImageCleaned.clone();
        for (MatOfPoint cnt : contours) {
            System.out.println("Extracted Contour Area is " + Imgproc.contourArea(cnt));
            tempContours.add(cnt);
        }
        Imgproc.cvtColor(hDepthImageCleanedContours, hDepthImageCleanedContours, Imgproc.COLOR_GRAY2BGR);
        Imgproc.drawContours(hDepthImageCleanedContours, tempContours, -1, new Scalar(0, 0, 255), 5);
        displayImage(
                Mat2BufferedImage(
                        videoProcessingUtilities.resizeImage(hDepthImageCleanedContours, new Size(448, 234))),
                extractedContoursPanel);

        //prepare final mask
        Mat hDepthImageFilledContours = new Mat(hDepthImageCleaned.rows(), hDepthImageCleaned.cols(),
                hDepthImageCleaned.type());
        Imgproc.drawContours(hDepthImageFilledContours, tempContours, -1, new Scalar(255, 255, 255), -1);
        displayImage(
                Mat2BufferedImage(
                        videoProcessingUtilities.resizeImage(hDepthImageFilledContours, new Size(448, 234))),
                maskedContoursPanel);

        //subtract video background
        //            Mat rgbFrameBackgroundSubtracted = new Mat();
        //            Core.subtract(rgbBackgroundFrame,rgbFrame, rgbFrameBackgroundSubtracted, hDepthImageCleaned);
        //            displayImage(Mat2BufferedImage(videoProcessingUtilities.resizeImage(rgbFrameBackgroundSubtracted, new Size(448,234))),videoBckgSubtractedFrames);
        //            
        //mask
        Mat preMaskedRGBFrame = new Mat();
        rgbFrame.copyTo(preMaskedRGBFrame, hDepthImageCleaned);
        displayImage(
                Mat2BufferedImage(videoProcessingUtilities.resizeImage(preMaskedRGBFrame, new Size(448, 234))),
                videoBckgSubtractedFrames);

        //postmask
        Mat betterMaskedRGBFrame = new Mat();
        rgbFrame.copyTo(betterMaskedRGBFrame, hDepthImageFilledContours);
        displayImage(
                Mat2BufferedImage(
                        videoProcessingUtilities.resizeImage(betterMaskedRGBFrame, new Size(448, 234))),
                videoMaskedPanel);

        //clear ArrayList containig all processed images
        finalImages.clear();
        javax.swing.JLabel[] jLabArray = { extractedShoePanel1, extractedShoePanel2 };
        //segment all images
        int panelIdx = 0;
        for (MatOfPoint contour : tempContours) {
            MatOfPoint2f newMatOfPoint2fContour = new MatOfPoint2f(contour.toArray());
            RotatedRect finalROI = Imgproc.minAreaRect(newMatOfPoint2fContour);
            Mat newMask = videoProcessingUtilities.getContourMasked(hDepthImageFilledContours.clone(), contour);
            Mat imageROIRegistred = new Mat();
            betterMaskedRGBFrame.copyTo(imageROIRegistred, newMask);
            Mat maskedRGBFrameROI = videoProcessingUtilities.rotateExtractedShoeprint(imageROIRegistred,
                    finalROI, new Size(500, 750), 2);
            finalImages.add(maskedRGBFrameROI);
            displayImage(
                    Mat2BufferedImage(
                            videoProcessingUtilities.resizeImage(maskedRGBFrameROI, new Size(203, 250))),
                    jLabArray[panelIdx]);
            panelIdx++;
        }

        //MatOfInt parameters = new MatOfInt();
        //parameters.fromArray(Highgui.CV_IMWRITE_JPEG_QUALITY, 100);
        //Highgui.imwrite(".//backgrounds//"+"test.jpg", depthFrameBackgroundSubtracted, parameters);

    } catch (FileNotFoundException ex) {
        Logger.getLogger(ExperimentalDataProcessingUI.class.getName()).log(Level.SEVERE, null, ex);
    }
}

From source file:Recognizer.Recognizer.java

public Image TemplateMatching(Image imQuery, Image imDB, int match_method) {
    System.out.println("Running Template Matching ...");

    //Mat img = Highgui.imread(inFile); // Image in which area has to be searched
    //Mat template_img = Highgui.imread(templateFile); // Search Image

    Mat matQuery = imQuery.Image3CtoMat_CV();
    Mat matDB = imDB.Image3CtoMat_CV();

    Mat hsvQ = new Mat(), hsvDB = new Mat();

    Imgproc.cvtColor(matQuery, hsvQ, COLOR_RGB2HSV);
    Imgproc.cvtColor(matDB, hsvDB, COLOR_RGB2HSV);

    // Create result image matrix
    int resultImg_cols = matDB.cols() - matQuery.cols() + 1;
    int resultImg_rows = matDB.rows() - matQuery.rows() + 1;

    Mat matRes = new Mat(resultImg_rows, resultImg_cols, CvType.CV_32FC1);

    // Template Matching with Normalization
    Imgproc.matchTemplate(hsvDB, hsvQ, matRes, match_method);
    Core.normalize(matRes, matRes, 0, 1, Core.NORM_MINMAX, -1, new Mat());

    // / Localizing the best match with minMaxLoc
    Core.MinMaxLocResult Location_Result = Core.minMaxLoc(matRes);
    Point matchLocation;//from w w w.j  a  v a  2 s  .  c o m

    if (match_method == Imgproc.TM_SQDIFF || match_method == Imgproc.TM_SQDIFF_NORMED) {
        matchLocation = Location_Result.minLoc;
    } else {
        matchLocation = Location_Result.maxLoc;
    }

    // Display Area by Rectangle
    Core.rectangle(matDB, matchLocation,
            new Point(matchLocation.x + matQuery.cols(), matchLocation.y + matQuery.rows()),
            new Scalar(0, 255, 0));

    Image imOut = new Image(matDB.width(), matDB.height());
    //Image imOut = new Image(matQuery.cols(), matQuery.rows());

    //Mat m = new Mat(matDB);

    //m =//matDB.submat((int)matchLocation.y, (int)matchLocation.y + matQuery.rows(),(int)matchLocation.x, (int)matchLocation.x + matQuery.cols());

    imOut.Mat_CVtoImage3C(matDB);

    System.out.println("Location: " + Location_Result.minLoc.x + " " + Location_Result.minLoc.y + "   "
            + Location_Result.maxLoc.x + " " + Location_Result.maxLoc.y);

    return imOut;
}