Example usage for org.opencv.core Mat elemSize

List of usage examples for org.opencv.core Mat elemSize

Introduction

In this page you can find the example usage for org.opencv.core Mat elemSize.

Prototype

public long elemSize() 

Source Link

Usage

From source file:io.smartspaces.service.image.vision.opencv.MatUtils.java

License:Apache License

/**
 * Converts a {@link Mat} into a {@link BufferedImage}.
 *
 * @param matrix//from w ww  .  j  a v  a  2s  .  c o  m
 *          Mat of type CV_8UC3 or CV_8UC1
 *
 * @return BufferedImage of type TYPE_3BYTE_BGR or TYPE_BYTE_GRAY
 *
 * @throws SimpleSmartSpacesException
 *           the OpenCV Mat type is not supported
 */
public static BufferedImage matToBufferedImage(Mat matrix) throws SimpleSmartSpacesException {
    int cols = matrix.cols();
    int rows = matrix.rows();
    int elemSize = (int) matrix.elemSize();
    byte[] data = new byte[cols * rows * elemSize];
    int type;
    matrix.get(0, 0, data);
    switch (matrix.channels()) {
    case 1:
        type = BufferedImage.TYPE_BYTE_GRAY;
        break;
    case 3:
        type = BufferedImage.TYPE_3BYTE_BGR;
        for (int i = 0; i < data.length; i = i + 3) {
            byte b = data[i];
            data[i] = data[i + 2];
            data[i + 2] = b;
        }
        break;
    default:
        throw new SimpleSmartSpacesException("The OpenCV Mat type is not supported");
    }

    BufferedImage image = new BufferedImage(cols, rows, type);
    image.getRaster().setDataElements(0, 0, cols, rows, data);

    return image;
}

From source file:javacv.JavaCV.java

/**
 * Converts/writes a Mat into a BufferedImage.
 *
 * @param matrix Mat of type CV_8UC3 or CV_8UC1
 * @return BufferedImage of type TYPE_3BYTE_BGR or TYPE_BYTE_GRAY
 *///from   w w  w.  j av a2 s.c  om
public static BufferedImage matToBufferedImage(Mat matrix) {
    int cols = matrix.cols();
    int rows = matrix.rows();
    int elemSize = (int) matrix.elemSize();
    byte[] data = new byte[cols * rows * elemSize];
    int type;
    matrix.get(0, 0, data);
    switch (matrix.channels()) {
    case 1:
        type = BufferedImage.TYPE_BYTE_GRAY;
        break;
    case 3:
        type = BufferedImage.TYPE_3BYTE_BGR;
        // bgr to rgb  
        byte b;
        for (int i = 0; i < data.length; i = i + 3) {
            b = data[i];
            data[i] = data[i + 2];
            data[i + 2] = b;
        }
        break;
    default:
        return null;
    }
    BufferedImage image2 = new BufferedImage(cols, rows, type);
    image2.getRaster().setDataElements(0, 0, cols, rows, data);
    return image2;
}

From source file:javafx1.JavaFX1.java

public Image bildLaden() {
        Image zwischenBild = null;

        try {/*from  w  w w.  j a  v  a 2s . co m*/
            File input = new File("D:/_piCam/bild.jpg");
            //FileInputStream bi = ImageIO.read(input);
            BufferedImage bi = ImageIO.read(input);

            byte[] data = ((DataBufferByte) bi.getRaster().getDataBuffer()).getData();
            Mat mat = new Mat(bi.getHeight(), bi.getWidth(), CvType.CV_8UC3);
            mat.put(0, 0, data);

            Mat bild = new Mat(bi.getHeight(), bi.getWidth(), CvType.CV_8UC1);
            Imgproc.cvtColor(mat, bild, Imgproc.COLOR_BGR2GRAY);

            byte[] data1 = new byte[bild.rows() * bild.cols() * (int) (bild.elemSize())];
            bild.get(0, 0, data1);
            BufferedImage image1 = new BufferedImage(bild.cols(), bild.rows(), BufferedImage.TYPE_BYTE_GRAY);
            image1.getRaster().setDataElements(0, 0, bild.cols(), bild.rows(), data1);

            File ouptut = new File("D:/xml/grayscale2.jpg");
            //ImageIO.write(image1, "jpg", ouptut);
            BufferedImage gray = image1.getSubimage(0, 0, image1.getTileWidth(), image1.getHeight());
            zwischenBild = SwingFXUtils.toFXImage(gray, null);

        } catch (IOException ex) {
            System.out.println("Fehler beim Bild laden...");
        }
        return zwischenBild;
    }

From source file:Main.Camera.CameraController.java

public static BufferedImage matToBufferedImage(Mat matrix, BufferedImage bimg) {
    if (matrix != null) {
        int cols = matrix.cols();
        int rows = matrix.rows();
        int elemSize = (int) matrix.elemSize();
        byte[] data = new byte[cols * rows * elemSize];
        int type;
        matrix.get(0, 0, data);/*  w  ww  .  j  av  a  2  s.  co m*/
        switch (matrix.channels()) {
        case 1:
            type = BufferedImage.TYPE_BYTE_GRAY;
            break;
        case 3:
            type = BufferedImage.TYPE_3BYTE_BGR;
            // bgr to rgb  
            byte b;
            for (int i = 0; i < data.length; i = i + 3) {
                b = data[i];
                data[i] = data[i + 2];
                data[i + 2] = b;
            }
            break;
        default:
            return null;
        }

        // Reuse existing BufferedImage if possible
        if (bimg == null || bimg.getWidth() != cols || bimg.getHeight() != rows || bimg.getType() != type) {
            bimg = new BufferedImage(cols, rows, type);
        }
        bimg.getRaster().setDataElements(0, 0, cols, rows, data);
    } else { // mat was null
        bimg = null;
    }
    return bimg;
}

From source file:model.grayscaleClass.java

/**
 *
 * @param image// ww w  .  j  a  va 2 s . c  o m
* @return
 */
public File imagePreprocessing(String image, String ex) {
    BufferedImage bImge = null;
    BufferedImage bImage2 = null;
    File grayscle = null;

    try {

        // loadOpenCV_Lib();
        //String path = "opencv\\build\\java\\x64\\opencv_java300.dll";
        System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
        FileInputStream fileName = new FileInputStream(
                "C:\\bimla\\Dev\\java\\OCRSystem\\WebContent\\uploadedImage\\" + image);
        InputStream input = fileName;
        bImge = ImageIO.read(input);
        byte[] imgeByte = ((DataBufferByte) bImge.getRaster().getDataBuffer()).getData();
        Mat mat1 = new Mat(bImge.getHeight(), bImge.getWidth(), CvType.CV_8UC3);
        mat1.put(0, 0, imgeByte);
        Mat mat2 = new Mat(bImge.getHeight(), bImge.getWidth(), CvType.CV_8UC1);
        Imgproc.cvtColor(mat1, mat2, Imgproc.COLOR_RGB2GRAY);
        byte[] imageData = new byte[mat2.rows() * mat2.cols() * (int) (mat2.elemSize())];
        mat2.get(0, 0, imageData);
        bImage2 = new BufferedImage(mat2.cols(), mat2.rows(), BufferedImage.TYPE_BYTE_GRAY);
        bImage2.getRaster().setDataElements(0, 0, mat2.cols(), mat2.rows(), imageData);

        String extn = null;
        /*   switch (extnsn) {
               case ".jpg":
           extn = "jpg";
           break;
               case ".png":
           extn = "png";
           break;
               case ".pdf":
           extn = "pdf";
           break;
               case ".tiff":
           extn = "tif";
           break;
                
           }*/
        //writing the grayscale image to the folder
        grayscle = new File(
                "C:\\bimla\\Dev\\java\\OCRSystem\\WebContent\\uploadedImage\\grayscale" + "." + "jpg");
        ImageIO.write(bImage2, "jpg", grayscle);
    } catch (IOException ex1) {
        System.out.println("" + ex1.getMessage());
    } catch (Exception ex1) {
        Logger.getLogger(grayscaleClass.class.getName()).log(Level.SEVERE, null, ex1);
    }
    return grayscle;

}

From source file:net.semanticmetadata.lire.imageanalysis.features.local.opencvfeatures.CvSiftExtractor.java

License:Open Source License

@Override
public void extract(BufferedImage img) {
    MatOfKeyPoint keypoints = new MatOfKeyPoint();
    Mat descriptors = new Mat();
    List<KeyPoint> myKeys;//  ww  w.  ja v  a2  s. c  o  m
    //        Mat img_object = Highgui.imread(image, 0); //0 = CV_LOAD_IMAGE_GRAYSCALE
    //        detector.detect(img_object, keypoints);
    byte[] data = ((DataBufferByte) img.getRaster().getDataBuffer()).getData();
    Mat matRGB = new Mat(img.getHeight(), img.getWidth(), CvType.CV_8UC3);
    matRGB.put(0, 0, data);
    Mat matGray = new Mat(img.getHeight(), img.getWidth(), CvType.CV_8UC1);
    Imgproc.cvtColor(matRGB, matGray, Imgproc.COLOR_BGR2GRAY); //TODO: RGB or BGR?
    byte[] dataGray = new byte[matGray.rows() * matGray.cols() * (int) (matGray.elemSize())];
    matGray.get(0, 0, dataGray);

    detector.detect(matGray, keypoints);
    extractor.compute(matGray, keypoints, descriptors);
    myKeys = keypoints.toList();

    features = new LinkedList<CvSiftFeature>();
    KeyPoint key;
    CvSiftFeature feat;
    double[] desc;
    int cols, rows = myKeys.size();
    for (int i = 0; i < rows; i++) {
        cols = (descriptors.row(i)).cols();
        desc = new double[cols];
        key = myKeys.get(i);
        for (int j = 0; j < cols; j++) {
            desc[j] = descriptors.get(i, j)[0];
        }
        feat = new CvSiftFeature(key.pt.x, key.pt.y, key.size, desc);
        features.add(feat);
    }
}

From source file:net.semanticmetadata.lire.imageanalysis.features.local.opencvfeatures.CvSiftExtractor.java

License:Open Source License

public LinkedList<CvSiftFeature> computeSiftKeypoints(BufferedImage img) {
    MatOfKeyPoint keypoints = new MatOfKeyPoint();
    List<KeyPoint> myKeys;//  www .ja  v a 2  s. com
    //        Mat img_object = Highgui.imread(image, 0); //0 = CV_LOAD_IMAGE_GRAYSCALE
    //        detector.detect(img_object, keypoints);
    byte[] data = ((DataBufferByte) img.getRaster().getDataBuffer()).getData();
    Mat matRGB = new Mat(img.getHeight(), img.getWidth(), CvType.CV_8UC3);
    matRGB.put(0, 0, data);
    Mat matGray = new Mat(img.getHeight(), img.getWidth(), CvType.CV_8UC1);
    Imgproc.cvtColor(matRGB, matGray, Imgproc.COLOR_BGR2GRAY); //TODO: RGB or BGR?
    byte[] dataGray = new byte[matGray.rows() * matGray.cols() * (int) (matGray.elemSize())];
    matGray.get(0, 0, dataGray);

    detector.detect(matGray, keypoints);
    myKeys = keypoints.toList();

    LinkedList<CvSiftFeature> myKeypoints = new LinkedList<CvSiftFeature>();
    KeyPoint key;
    CvSiftFeature feat;
    for (Iterator<KeyPoint> iterator = myKeys.iterator(); iterator.hasNext();) {
        key = iterator.next();
        feat = new CvSiftFeature(key.pt.x, key.pt.y, key.size, null);
        myKeypoints.add(feat);
    }

    return myKeypoints;
}

From source file:net.semanticmetadata.lire.imageanalysis.features.local.opencvfeatures.CvSurfExtractor.java

License:Open Source License

@Override
public void extract(BufferedImage img) {
    MatOfKeyPoint keypoints = new MatOfKeyPoint();
    Mat descriptors = new Mat();
    List<KeyPoint> myKeys;/*from  w  ww  .j  a  va 2 s.  c om*/
    //        Mat img_object = Highgui.imread(image, 0); //0 = CV_LOAD_IMAGE_GRAYSCALE
    //        detector.detect(img_object, keypoints);
    byte[] data = ((DataBufferByte) img.getRaster().getDataBuffer()).getData();
    Mat matRGB = new Mat(img.getHeight(), img.getWidth(), CvType.CV_8UC3);
    matRGB.put(0, 0, data);
    Mat matGray = new Mat(img.getHeight(), img.getWidth(), CvType.CV_8UC1);
    Imgproc.cvtColor(matRGB, matGray, Imgproc.COLOR_BGR2GRAY); //TODO: RGB or BGR?
    byte[] dataGray = new byte[matGray.rows() * matGray.cols() * (int) (matGray.elemSize())];
    matGray.get(0, 0, dataGray);

    detector.detect(matGray, keypoints);
    extractor.compute(matGray, keypoints, descriptors);
    myKeys = keypoints.toList();

    features = new LinkedList<CvSurfFeature>();
    KeyPoint key;
    CvSurfFeature feat;
    double[] desc;
    int cols, rows = myKeys.size();
    for (int i = 0; i < rows; i++) {
        cols = (descriptors.row(i)).cols();
        desc = new double[cols];
        key = myKeys.get(i);
        for (int j = 0; j < cols; j++) {
            desc[j] = descriptors.get(i, j)[0];
        }
        feat = new CvSurfFeature(key.pt.x, key.pt.y, key.size, desc);
        features.add(feat);
    }
}

From source file:net.semanticmetadata.lire.imageanalysis.features.local.opencvfeatures.CvSurfExtractor.java

License:Open Source License

public LinkedList<CvSurfFeature> computeSurfKeypoints(BufferedImage img) {
    MatOfKeyPoint keypoints = new MatOfKeyPoint();
    List<KeyPoint> myKeys;//from  ww  w .j  a v  a 2  s  . c o m
    //        Mat img_object = Highgui.imread(image, 0); //0 = CV_LOAD_IMAGE_GRAYSCALE
    //        detector.detect(img_object, keypoints);
    byte[] data = ((DataBufferByte) img.getRaster().getDataBuffer()).getData();
    Mat matRGB = new Mat(img.getHeight(), img.getWidth(), CvType.CV_8UC3);
    matRGB.put(0, 0, data);
    Mat matGray = new Mat(img.getHeight(), img.getWidth(), CvType.CV_8UC1);
    Imgproc.cvtColor(matRGB, matGray, Imgproc.COLOR_BGR2GRAY); //TODO: RGB or BGR?
    byte[] dataGray = new byte[matGray.rows() * matGray.cols() * (int) (matGray.elemSize())];
    matGray.get(0, 0, dataGray);

    detector.detect(matGray, keypoints);
    myKeys = keypoints.toList();

    LinkedList<CvSurfFeature> myKeypoints = new LinkedList<CvSurfFeature>();
    KeyPoint key;
    CvSurfFeature feat;
    for (Iterator<KeyPoint> iterator = myKeys.iterator(); iterator.hasNext();) {
        key = iterator.next();
        feat = new CvSurfFeature(key.pt.x, key.pt.y, key.size, null);
        myKeypoints.add(feat);
    }

    return myKeypoints;
}

From source file:org.sleuthkit.autopsy.coreutils.VideoUtils.java

License:Open Source License

@NbBundle.Messages({ "# {0} - file name",
        "VideoUtils.genVideoThumb.progress.text=extracting temporary file {0}" })
static BufferedImage generateVideoThumbnail(AbstractFile file, int iconSize) {
    java.io.File tempFile = getTempVideoFile(file);
    if (tempFile.exists() == false || tempFile.length() < file.getSize()) {
        ProgressHandle progress = ProgressHandle
                .createHandle(Bundle.VideoUtils_genVideoThumb_progress_text(file.getName()));
        progress.start(100);//from  w w  w .ja  v  a2 s .  com
        try {
            Files.createParentDirs(tempFile);
            ContentUtils.writeToFile(file, tempFile, progress, null, true);
        } catch (IOException ex) {
            LOGGER.log(Level.WARNING,
                    "Error extracting temporary file for " + ImageUtils.getContentPathSafe(file), ex); //NON-NLS
        } finally {
            progress.finish();
        }
    }

    VideoCapture videoFile = new VideoCapture(); // will contain the video

    if (!videoFile.open(tempFile.toString())) {
        LOGGER.log(Level.WARNING, "Error opening {0} for preview generation.",
                ImageUtils.getContentPathSafe(file)); //NON-NLS
        return null;
    }
    double fps = videoFile.get(CV_CAP_PROP_FPS); // gets frame per second
    double totalFrames = videoFile.get(CV_CAP_PROP_FRAME_COUNT); // gets total frames
    if (fps <= 0 || totalFrames <= 0) {
        LOGGER.log(Level.WARNING, "Error getting fps or total frames for {0}",
                ImageUtils.getContentPathSafe(file)); //NON-NLS
        return null;
    }
    double milliseconds = 1000 * (totalFrames / fps); //total milliseconds

    double timestamp = Math.min(milliseconds, 500); //default time to check for is 500ms, unless the files is extremely small

    int framkeskip = Double.valueOf(Math.floor((milliseconds - timestamp) / (THUMB_COLUMNS * THUMB_ROWS)))
            .intValue();

    Mat imageMatrix = new Mat();
    BufferedImage bufferedImage = null;

    for (int x = 0; x < THUMB_COLUMNS; x++) {
        for (int y = 0; y < THUMB_ROWS; y++) {
            if (!videoFile.set(CV_CAP_PROP_POS_MSEC,
                    timestamp + x * framkeskip + y * framkeskip * THUMB_COLUMNS)) {
                LOGGER.log(Level.WARNING, "Error seeking to " + timestamp + "ms in {0}",
                        ImageUtils.getContentPathSafe(file)); //NON-NLS
                break; // if we can't set the time, return black for that frame
            }
            //read the frame into the image/matrix
            if (!videoFile.read(imageMatrix)) {
                LOGGER.log(Level.WARNING, "Error reading frames at " + timestamp + "ms from {0}",
                        ImageUtils.getContentPathSafe(file)); //NON-NLS
                break; //if the image for some reason is bad, return black for that frame
            }

            if (bufferedImage == null) {
                bufferedImage = new BufferedImage(imageMatrix.cols() * THUMB_COLUMNS,
                        imageMatrix.rows() * THUMB_ROWS, BufferedImage.TYPE_3BYTE_BGR);
            }

            byte[] data = new byte[imageMatrix.rows() * imageMatrix.cols() * (int) (imageMatrix.elemSize())];
            imageMatrix.get(0, 0, data); //copy the image to data

            //todo: this looks like we are swapping the first and third channels.  so we can use  BufferedImage.TYPE_3BYTE_BGR
            if (imageMatrix.channels() == 3) {
                for (int k = 0; k < data.length; k += 3) {
                    byte temp = data[k];
                    data[k] = data[k + 2];
                    data[k + 2] = temp;
                }
            }

            bufferedImage.getRaster().setDataElements(imageMatrix.cols() * x, imageMatrix.rows() * y,
                    imageMatrix.cols(), imageMatrix.rows(), data);
        }
    }

    videoFile.release(); // close the file

    return bufferedImage == null ? null : ScalrWrapper.resizeFast(bufferedImage, iconSize);
}