List of usage examples for org.opencv.core Mat rows
public int rows()
From source file:at.entenbaer.utils.TPAUtils.java
License:Open Source License
/** * Saves a OpenCV Mat to a path inside the TexturePoemApp-Folder in the pictures directory * @param mat Image that should be saved * @param path path where the image should be saved inside the TexturePoemApp-Folder *///from w w w . java 2 s.c o m public static void saveMatToBitmap(Mat mat, String path) { if (Environment.getExternalStorageState().equals(Environment.MEDIA_MOUNTED)) { String galleryPath = Environment.getExternalStoragePublicDirectory(Environment.DIRECTORY_PICTURES) .toString(); Log.d("galleryPath", galleryPath); Bitmap b = Bitmap.createBitmap(mat.cols(), mat.rows(), Bitmap.Config.ARGB_8888); Utils.matToBitmap(mat, b); File album = new File(galleryPath + "/TexturePoemApp"); if (!album.isDirectory()) { album.mkdirs(); } File f = new File(galleryPath + "/TexturePoemApp/" + path); try { FileOutputStream fo = new FileOutputStream(f); b.compress(Bitmap.CompressFormat.JPEG, 100, fo); fo.flush(); fo.close(); } catch (IOException e) { Log.e("IOException", "not saved"); e.printStackTrace(); } } else { Log.d("Env", "not mounted"); } }
From source file:attendance_system_adder.cv.image.java
public BufferedImage Mat2BufferedImage(Mat m) { int type = BufferedImage.TYPE_BYTE_GRAY; if (m.channels() > 1) { type = BufferedImage.TYPE_3BYTE_BGR; }//from w w w . ja v a 2s . c o m int bufferSize = m.channels() * m.cols() * m.rows(); byte[] b = new byte[bufferSize]; m.get(0, 0, b); // get all the pixels BufferedImage image = new BufferedImage(m.cols(), m.rows(), type); final byte[] targetPixels = ((DataBufferByte) image.getRaster().getDataBuffer()).getData(); System.arraycopy(b, 0, targetPixels, 0, b.length); return image; }
From source file:balldetection.Webcam.java
public static Image toBufferedImage(Mat m) { int type = BufferedImage.TYPE_BYTE_GRAY; if (m.channels() > 1) { Mat m2 = new Mat(); Imgproc.cvtColor(m, m2, Imgproc.COLOR_BGR2RGB); type = BufferedImage.TYPE_3BYTE_BGR; m = m2;//w w w. ja va 2 s.c om } byte[] b = new byte[m.channels() * m.cols() * m.rows()]; m.get(0, 0, b); // get all the pixels BufferedImage image = new BufferedImage(m.cols(), m.rows(), type); image.getRaster().setDataElements(0, 0, m.cols(), m.rows(), b); return image; }
From source file:bgslibrary.Utils.java
License:Open Source License
static final public BufferedImage toBufferedImage(Mat m) { int type = BufferedImage.TYPE_BYTE_GRAY; if (m.channels() > 1) type = BufferedImage.TYPE_3BYTE_BGR; int bufferSize = m.channels() * m.cols() * m.rows(); byte[] b = new byte[bufferSize]; m.get(0, 0, b); // get all the pixels BufferedImage image = new BufferedImage(m.cols(), m.rows(), type); final byte[] targetPixels = ((DataBufferByte) image.getRaster().getDataBuffer()).getData(); System.arraycopy(b, 0, targetPixels, 0, b.length); return image; }
From source file:bollettini.BullettinCompiler.java
public void show() { //resize to show Size size = new Size(1100, 335); Mat resize = new Mat(); Imgproc.resize(bullettin, resize, size); //create image int type = BufferedImage.TYPE_BYTE_GRAY; int bufferSize = resize.channels() * resize.cols() * resize.rows(); byte[] b = new byte[bufferSize]; resize.get(0, 0, b); // get all the pixels BufferedImage image = new BufferedImage(resize.cols(), resize.rows(), type); final byte[] targetPixels = ((DataBufferByte) image.getRaster().getDataBuffer()).getData(); System.arraycopy(b, 0, targetPixels, 0, b.length); ImageIcon icon = new ImageIcon(image); //create image and show View view = new View(); view.init(this); view.setIcon(icon);// w ww . j av a 2s .c om view.setVisible(true); view.setDefaultCloseOperation(JFrame.DISPOSE_ON_CLOSE); }
From source file:by.zuyeu.deyestracker.core.util.CVCoreUtils.java
public static Mat selectSubmatByRect(Rect rect, Mat image) { double colScale = 1.0 * image.cols() / image.width(); int colStart = (int) (1.0 * rect.x * colScale); int colEnd = (int) (1.0 * (rect.x + rect.width) * colScale); double rowScale = 1.0 * image.rows() / image.height(); int rowStart = (int) (1.0 * rect.y * rowScale); int rowEnd = (int) (1.0 * (rect.y + rect.height) * rowScale); return image.submat(rowStart, rowEnd, colStart, colEnd); }
From source file:by.zuyeu.deyestracker.core.util.CVCoreUtils.java
public static void insertSubmatByRect(Mat subImage, Rect rect, Mat origImage) { double colScale = 1.0 * origImage.cols() / origImage.width(); int colStart = (int) (1.0 * rect.x * colScale); double rowScale = 1.0 * origImage.rows() / origImage.height(); int rowStart = (int) (1.0 * rect.y * rowScale); for (int x1 = 0, x2 = colStart; x1 < subImage.cols(); x1++, x2++) { for (int y1 = 0, y2 = rowStart; y1 < subImage.rows(); y1++, y2++) { final double[] subImgData = subImage.get(y1, x1); origImage.put(y2, x2, subImgData); }/*w w w . j a va 2s . c o m*/ } }
From source file:by.zuyeu.deyestracker.core.video.sampler.FaceInfoSampler.java
private Mat selectEyesRegionFromFace(final Mat faceImage) { return faceImage.submat(0, faceImage.rows() / 2, 0, faceImage.cols()); }
From source file:ch.hslu.pren.t37.camera.BildAuswertungKorb.java
public int bildAuswerten() { //Bild in dem gesucht werden soll String inFile = "../camera.jpg"; //das Bild dass im infile gesucht wird String templateFile = "../Bilder/korb.jpg"; //Lsung wird in diesem Bild prsentiert String outFile = "../LoesungsBild.jpg"; //berprfungswert wird gesetzt int match_method = Imgproc.TM_CCOEFF_NORMED; //das original Bild und das zu suchende werden geladen Mat img = Highgui.imread(inFile, Highgui.CV_LOAD_IMAGE_COLOR); Mat templ = Highgui.imread(templateFile, Highgui.CV_LOAD_IMAGE_COLOR); // Lsungsmatrix generieren int result_cols = img.cols() - templ.cols() + 1; int result_rows = img.rows() - templ.rows() + 1; Mat result = new Mat(result_rows, result_cols, CvType.CV_32FC1); // Suchen und normalisieren Imgproc.matchTemplate(img, templ, result, match_method); Core.normalize(result, result, 0, 1, Core.NORM_MINMAX, -1, new Mat()); // Mit MinMax Logik wird der beste "Match" gesucht Core.MinMaxLocResult mmr = Core.minMaxLoc(result); Point matchLoc;// w w w. ja v a 2 s . co m if (match_method == Imgproc.TM_SQDIFF || match_method == Imgproc.TM_SQDIFF_NORMED) { matchLoc = mmr.minLoc; } else { matchLoc = mmr.maxLoc; } // Darstellen Core.rectangle(img, matchLoc, new Point(matchLoc.x + templ.cols(), matchLoc.y + templ.rows()), new Scalar(0, 255, 0), 10); // Alle 4 Eckpunkte speichern Point topLeft = new Point(matchLoc.x, matchLoc.y); Point topRight = new Point(matchLoc.x + templ.cols(), matchLoc.y); Point downLeft = new Point(matchLoc.x, matchLoc.y + templ.rows()); Point downRight = new Point(matchLoc.x + templ.cols(), matchLoc.y + templ.rows()); // Lsungsbild speichern Highgui.imwrite(outFile, img); //Mittelpunkt berechnen double mittePicture; double mitteKorb; double differnez; Mat sol = Highgui.imread(outFile, Highgui.CV_LOAD_IMAGE_COLOR); mittePicture = sol.width() / 2; mitteKorb = (topRight.x - topLeft.x) / 2; mitteKorb = topLeft.x + mitteKorb; differnez = mitteKorb - mittePicture; logger.log(PrenLogger.LogLevel.DEBUG, "Mitte Korb: " + mitteKorb); logger.log(PrenLogger.LogLevel.DEBUG, "Mitte Bild: " + mittePicture); logger.log(PrenLogger.LogLevel.DEBUG, "Differenz: " + differnez + "\nWenn Differnez negativ, nach rechts drehen"); return (int) differnez; }
From source file:ch.zhaw.facerecognitionlibrary.Helpers.FaceDetection.java
License:Open Source License
public Rect[] getFaces(Mat img) { MatOfRect faces = new MatOfRect(); List<Rect> facesList = null; float mRelativeFaceSize = 0.2f; int mAbsoluteFaceSize = 0; if (faceDetector != null) { // If no face detected --> rotate the picture 90 and try again angle = 0;//from w w w. j av a2s. c om for (int i = 1; i <= 4; i++) { int height = img.rows(); if (Math.round(height * mRelativeFaceSize) > 0) { mAbsoluteFaceSize = Math.round(height * mRelativeFaceSize); } faceDetector.detectMultiScale(img, faces, 1.1, 2, 2, new Size(mAbsoluteFaceSize, mAbsoluteFaceSize), new Size()); // Rotate by 90 if (faces.empty()) { angle = 90 * i; MatOperation.rotate_90n(img, 90); } else { facesList = faces.toList(); // Check that each found face rectangle fits in the image, if not, remove it for (Rect face : facesList) { if (!(0 <= face.x && 0 <= face.width && face.x + face.width <= img.cols() && 0 <= face.y && 0 <= face.height && face.y + face.height <= img.rows())) { facesList.remove(face); } } if (!(facesList.size() > 0)) { return null; } // Faces found with the current image rotation this.img = img; break; } } } else { Log.e(TAG, "Detection method is not selected!"); } if (facesList != null) { return (Rect[]) facesList.toArray(); } else { return null; } }