Example usage for org.opencv.core Mat get

List of usage examples for org.opencv.core Mat get

Introduction

In this page you can find the example usage for org.opencv.core Mat get.

Prototype

public int get(int row, int col, double[] data) 

Source Link

Usage

From source file:com.example.thibautg.libreaudioview.Sonifier.java

License:Open Source License

/**
 *
 * @param processedFrame//from  w  ww. ja v a2  s  . co  m
 */
public void sonifyFrame(Mat processedFrame) {

    Arrays.fill(generatedFloatSnd, 0);
    processedFrame.get(0, 0, mBuff);

    int nbRows = Globals.outputFrameHeight;
    int nbCols = Globals.outputFrameWidth;
    int IDPix = 0;
    int cptSonifiedPix = 0;
    nbHotPix = 0;

    //Record IDs of active pixels
    for (int idLine = 0; idLine < nbRows; ++idLine) {
        for (int idColumn = 0; idColumn < nbCols; ++idColumn) {
            IDPix = idLine * nbCols + idColumn;
            if (mBuff[IDPix] != 0) {
                listIDHotPix[nbHotPix] = IDPix;
                nbHotPix += 1;
            }
            IDPix += 1;
        }
    }
    //down: to leave if compression

    int totalNbSimplification = 10000000;
    int maxNbUsedSimplification = 10000000;

    //float factorCompression = 500;
    float factorCompression = 800;
    float nbKeptHotPixel = factorCompression * (1 - factorCompression / ((float) nbHotPix + factorCompression));
    float ratio = (float) nbKeptHotPixel / (float) nbHotPix;

    totalNbSimplification = 100;
    maxNbUsedSimplification = (int) (ratio * 100);

    int cptSimplification = 0;
    for (int idHotPix = 0; idHotPix < nbHotPix; ++idHotPix) {
        IDPix = listIDHotPix[idHotPix];

        if (cptSimplification == totalNbSimplification - 1) {
            cptSimplification = 0;
        }

        if ((cptSimplification < maxNbUsedSimplification)) {
            for (int idValue = 0; idValue < generatedFloatSnd.length; ++idValue) {
                generatedFloatSnd[idValue] += listSound[IDPix][idValue];
            }
        }
        cptSimplification += 1;
    }
    //up: to leave if compression

    //dow: to leave if no compression
    /*for (int idHotPix = 0; idHotPix<nbHotPix; ++idHotPix) {
    IDPix = listIDHotPix[idHotPix];
    for (int idValue=0; idValue<generatedFloatSnd.length; ++idValue) {
        generatedFloatSnd[idValue] += listSound[IDPix][idValue];
    }
    }*/
    //up: to leave if no compression

    loops: for (int idLine = 0; idLine < nbRows; ++idLine) {
        for (int idColumn = 0; idColumn < nbCols; ++idColumn) {
            IDPix = idLine * nbCols + idColumn;
            if (mBuff[IDPix] != 0) {
                for (int idValue = 0; idValue < generatedFloatSnd.length; ++idValue) {
                    generatedFloatSnd[idValue] += listSound[IDPix][idValue];
                }
                cptSonifiedPix += 1;
                if (cptSonifiedPix == 100) {
                    break loops;
                }
            }
        }
    }
    for (int idValue = 0; idValue < generatedShortSnd.length; ++idValue) {
        generatedShortSnd[idValue] = (short) (mAmplitude * generatedFloatSnd[idValue]);
    }
    mAudioOutput.pushSound(generatedShortSnd);
}

From source file:com.example.yannic.remotefacedetection.agent.FaceDetectionAgent.java

License:Open Source License

public static BufferedImage matToBufferedImage(Mat matrix, BufferedImage bimg) {
    if (matrix != null) {
        int cols = matrix.cols();
        int rows = matrix.rows();
        int elemSize = (int) matrix.elemSize();
        byte[] data = new byte[cols * rows * elemSize];
        int type;
        matrix.get(0, 0, data);
        switch (matrix.channels()) {
        case 1://w  w w  .ja va2 s. c  om
            type = BufferedImage.TYPE_BYTE_GRAY;
            break;
        case 3:
            type = BufferedImage.TYPE_3BYTE_BGR;
            // bgr to rgb
            byte b;
            for (int i = 0; i < data.length; i = i + 3) {
                b = data[i];
                data[i] = data[i + 2];
                data[i + 2] = b;
            }
            break;
        default:
            return null;
        }

        if (bimg == null || bimg.getWidth() != cols || bimg.getHeight() != rows || bimg.getType() != type) {
            bimg = new BufferedImage(cols, rows, type);
        }
        bimg.getRaster().setDataElements(0, 0, cols, rows, data);
    } else {
        bimg = null;
    }
    return bimg;
}

From source file:com.github.rosjava_catkin_package_a.ARLocROS.Imshow.java

License:Apache License

/**
 * @param opencvImage/*from  w w w  . j ava 2  s . c o  m*/
 */
public static void show(Mat opencvImage) {

    Dimension frameSize = new Dimension(opencvImage.rows(), opencvImage.cols());
    if (frame == null) {
        frame = new Imshow("", frameSize.height, frameSize.width);
        frame.Window.setVisible(true);

        frame.Window.setDefaultCloseOperation(WindowConstants.EXIT_ON_CLOSE);
        if (frame.SizeCustom) {
            Imgproc.resize(opencvImage, opencvImage, new Size(frame.Height, frame.Width));
        }
    }
    BufferedImage bufImage = null;
    try {

        int type = BufferedImage.TYPE_BYTE_GRAY;
        if (opencvImage.channels() > 1) {
            type = BufferedImage.TYPE_3BYTE_BGR;
        }
        int bufferSize = opencvImage.channels() * opencvImage.cols() * opencvImage.rows();
        byte[] b = new byte[bufferSize];
        opencvImage.get(0, 0, b);
        BufferedImage bufferedImage = new BufferedImage(opencvImage.cols(), opencvImage.rows(), type);
        final byte[] targetPixels = ((DataBufferByte) bufferedImage.getRaster().getDataBuffer()).getData();
        System.arraycopy(b, 0, targetPixels, 0, b.length);
        bufImage = bufferedImage;
        frame.image.setImage(bufImage);
        frame.Window.pack();
        frame.label.updateUI();
        //frame.Window.setVisible(true);
    } catch (Exception e) {
        e.printStackTrace();
    }
}

From source file:com.ibm.streamsx.edgevideo.device.edgent.JsonMat.java

License:Open Source License

private static String base64MimeEncodeMat(Mat mat) {
    int width = mat.width(), height = mat.height(), channels = mat.channels();

    // resize if needed
    // With initial resize factor of 4 and being within 2' of the MBP camera,
    // a face image seems to be on the order of 15Kb.
    if (width * height * channels > 50 * 1024) {
        Mat smallerFace = new Mat();
        int resizeFactor = 2;
        Imgproc.resize(mat, smallerFace, new Size(mat.width() / resizeFactor, mat.height() / resizeFactor));
        mat = smallerFace;/* www . j a  va 2  s  .  c  o m*/
        width = mat.width();
        height = mat.height();
        channels = mat.channels();
    }

    byte[] sourcePixels = new byte[width * height * channels];
    mat.get(0, 0, sourcePixels);

    // Base64 encode the image to be able to package in JsonObject
    // java.utils.Base64 since 1.8, otherwise use Apache Commons
    Encoder encoder = Base64.getMimeEncoder();
    String base64 = encoder.encodeToString(sourcePixels);

    //System.out.println("pub face bytes size: " + sourcePixels.length + " base64 size:" + base64.length());

    return base64;
}

From source file:com.ibm.streamsx.edgevideo.device.MyPanel.java

License:Open Source License

public boolean matToBufferedImage(Mat mat, int bufferedImageType) {
    int width = mat.width(), height = mat.height(), channels = mat.channels();
    byte[] sourcePixels = new byte[width * height * channels];
    mat.get(0, 0, sourcePixels);
    // create new image and get reference to backing data  
    image = new BufferedImage(width, height, bufferedImageType);
    final byte[] targetPixels = ((DataBufferByte) image.getRaster().getDataBuffer()).getData();
    System.arraycopy(sourcePixels, 0, targetPixels, 0, sourcePixels.length);
    return true;//w w  w .  j  a v  a2s .  c om
}

From source file:com.lauszus.facerecognitionapp.TinyDB.java

License:Apache License

public void putListMat(String key, ArrayList<Mat> objArray) {
    checkForNullKey(key);/*from  w  ww .  j  a  va 2  s.  c  om*/
    ArrayList<String> objStrings = new ArrayList<String>();

    for (Mat mat : objArray) {
        int size = (int) (mat.total() * mat.channels());
        byte[] data = new byte[size];
        mat.get(0, 0, data);
        String dataString = new String(Base64.encode(data, Base64.DEFAULT));
        objStrings.add(dataString);
    }
    putListString(key, objStrings);
}

From source file:com.Linguist.model.grayscaleClass.java

@Override
public File imagePreprocessing(String image, String extnsn) {

    BufferedImage bImge = null;//ww w .  j a v  a 2s  .  c  om
    BufferedImage bImage2 = null;
    File grayscle = null;

    try {

        // loadOpenCV_Lib();
        //String path = "opencv\\build\\java\\x64\\opencv_java300.dll";
        FileInputStream fileName = new FileInputStream(
                "C:\\Users\\User\\Documents\\GitHub\\Linguist\\web\\uploadedImage\\" + image);
        InputStream input = fileName;
        bImge = ImageIO.read(input);
        byte[] imgeByte = ((DataBufferByte) bImge.getRaster().getDataBuffer()).getData();
        Mat mat1 = new Mat(bImge.getHeight(), bImge.getWidth(), CvType.CV_8UC3);
        mat1.put(0, 0, imgeByte);
        Mat mat2 = new Mat(bImge.getHeight(), bImge.getWidth(), CvType.CV_8UC1);
        Imgproc.cvtColor(mat1, mat2, Imgproc.COLOR_RGB2GRAY);
        byte[] imageData = new byte[mat2.rows() * mat2.cols() * (int) (mat2.elemSize())];
        mat2.get(0, 0, imageData);
        bImage2 = new BufferedImage(mat2.cols(), mat2.rows(), BufferedImage.TYPE_BYTE_GRAY);
        bImage2.getRaster().setDataElements(0, 0, mat2.cols(), mat2.rows(), imageData);

        String extn = null;
        switch (extnsn) {
        case ".jpg":
            extn = "jpg";
            break;
        case ".png":
            extn = "png";
            break;
        case ".pdf":
            extn = "pdf";
            break;
        case ".tiff":
            extn = "tif";
            break;

        }
        //writing the grayscale image to the folder
        grayscle = new File(
                "C:\\Users\\User\\Documents\\GitHub\\Linguist\\web\\uploadedImage\\grayscale" + "." + extn);
        ImageIO.write(bImage2, "jpg", grayscle);
    } catch (IOException ex) {
        System.out.println("" + ex.getMessage());
    } catch (Exception ex) {
        Logger.getLogger(grayscaleClass.class.getName()).log(Level.SEVERE, null, ex);
    }
    return grayscle;

}

From source file:com.minio.io.alice.MatVideoWriter.java

License:Open Source License

private byte[] captureRawBytes(Mat mat) {
    int length = (int) (mat.total() * mat.elemSize());
    matByteArray = new byte[length];
    mat.get(0, 0, matByteArray);
    return matByteArray;
}

From source file:com.mycompany.analyzer.Analyzer.java

public BufferedImage mat2BufferedImage(Mat m) {
    int type = BufferedImage.TYPE_BYTE_GRAY;
    if (m.channels() > 1) {
        type = BufferedImage.TYPE_3BYTE_BGR;
    }/*from w  ww  .  j  a  va2  s . c om*/
    int bufferSize = m.channels() * m.cols() * m.rows();
    byte[] b = new byte[bufferSize];
    m.get(0, 0, b); // get all the pixels
    BufferedImage image = new BufferedImage(m.cols(), m.rows(), type);
    final byte[] targetPixels = ((DataBufferByte) image.getRaster().getDataBuffer()).getData();
    System.arraycopy(b, 0, targetPixels, 0, b.length);
    return image;
}

From source file:com.oetermann.imageclassifier.Util.java

License:Open Source License

public static void saveMat(String path, Mat mat) {
    File file = new File(path).getAbsoluteFile();
    file.getParentFile().mkdirs();//from  w  w  w.j  a  va 2  s . c o m
    try {
        int rows = mat.rows();
        int cols = mat.cols();
        int type = mat.type();
        Object data;
        switch (mat.type()) {
        case CvType.CV_8S:
        case CvType.CV_8U:
            data = new byte[(int) mat.total() * mat.channels()];
            mat.get(0, 0, (byte[]) data);
            break;
        case CvType.CV_16S:
        case CvType.CV_16U:
            data = new short[(int) mat.total() * mat.channels()];
            mat.get(0, 0, (short[]) data);
            break;
        case CvType.CV_32S:
            data = new int[(int) mat.total() * mat.channels()];
            mat.get(0, 0, (int[]) data);
            break;
        case CvType.CV_32F:
            data = new float[(int) mat.total() * mat.channels()];
            mat.get(0, 0, (float[]) data);
            break;
        case CvType.CV_64F:
            data = new double[(int) mat.total() * mat.channels()];
            mat.get(0, 0, (double[]) data);
            break;
        default:
            data = null;
        }
        try (ObjectOutputStream oos = new ObjectOutputStream(new FileOutputStream(path))) {
            oos.writeObject(rows);
            oos.writeObject(cols);
            oos.writeObject(type);
            oos.writeObject(data);
            oos.close();
        }
    } catch (IOException | ClassCastException ex) {
        System.err.println("ERROR: Could not save mat to file: " + path);
        //            Logger.getLogger(ImageClassifier.class.getName()).log(Level.SEVERE, null, ex);
    }
}