List of usage examples for org.opencv.core Mat put
public int put(int row, int col, byte[] data)
From source file:OCV_MedianBlur.java
License:Open Source License
@Override public void run(ImageProcessor ip) { // srcdst//from ww w . ja v a 2 s . c o m int imw = ip.getWidth(); int imh = ip.getHeight(); byte[] srcdst_bytes = (byte[]) ip.getPixels(); // mat Mat src_mat = new Mat(imh, imw, CvType.CV_8UC1); Mat dst_mat = new Mat(imh, imw, CvType.CV_8UC1); // run src_mat.put(0, 0, srcdst_bytes); Imgproc.medianBlur(src_mat, dst_mat, ksize); dst_mat.get(0, 0, srcdst_bytes); }
From source file:airhockeyjava.detection.PS3EyeFrameGrabber.java
License:Open Source License
/** * Grab one frame; the caller have to make a copy of returned image before * processing.//from w w w .j a v a 2s .c o m * * It will throw null pointer exception if not started before grabbing. * * @return "read-only" RGB, 3-channel */ public Mat grabMat() { Mat matImg = new Mat(this.imageHeight, this.imageWidth, CvType.CV_8UC4); int[] img = grab_RGB4(); ByteBuffer byteBuffer = ByteBuffer.allocate(img.length * 4); IntBuffer intBuffer = byteBuffer.asIntBuffer(); intBuffer.put(img); byte[] array = byteBuffer.array(); matImg.put(0, 0, array); List<Mat> mv = new ArrayList<Mat>(); Core.split(matImg, mv); mv.remove(0); Core.merge(mv, matImg); return matImg; }
From source file:angryhexclient.OurVision.java
License:Open Source License
/** * Detects the ground in the image./*from w w w . j a va 2 s.com*/ * @return A list of blocks representing the ground. */ public List<Block> detectGround() { Mat binaryImage = new Mat(new Size(_nWidth, _nHeight), CvType.CV_8U, new Scalar(1)); // We only detect right of this margin. The slingshot has some ground // colors and would partly be detected as ground. This is not what we // want. Trajectories originate at the slingshot, and if there is ground // detected at the slingshot, the agent will think, that none of its // trajectories are valid. Therefore we start with detecting due right // of the slingshot. int startAtX = findSlingshot().x + findSlingshot().width * 2; // Now we create a binary image of the ground areas. White where there // is ground, black otherwise. for (int y = 0; y < _nHeight; y++) { for (int x = 0; x < _nWidth; x++) { if (x > startAtX && isGround(x, y)) binaryImage.put(y, x, 255); else binaryImage.put(y, x, 0); } } Mat smoothedImage = new Mat(new Size(_nWidth, _nHeight), CvType.CV_8U, new Scalar(1)); // This median filter improves the detection tremendously. There are a // whole lot of single pixels that carry ground colors spread all over // the image. We remove them here. Imgproc.medianBlur(binaryImage, smoothedImage, 7); List<MatOfPoint> contours = new ArrayList<MatOfPoint>(); // We use OpenCV to find the contours. Contours are lines, that // represent the boundaries of the objects in the binary image. Imgproc.findContours(smoothedImage, contours, new Mat(), Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE); ArrayList<Block> result = new ArrayList<Block>(); //Now for every contour, we convert it to blocks for communicating them to DLV. for (MatOfPoint mp : contours) { org.opencv.core.Point[] pts = mp.toArray(); for (int i = 0; i < pts.length - 1; i++) { Block b = new Block((int) pts[i].x, (int) pts[i].y); b.add((int) pts[i + 1].x, (int) pts[i + 1].y); result.add(b); } //One block for the first vertex to the last vertex. Block b = new Block((int) pts[pts.length - 1].x, (int) pts[pts.length - 1].y); b.add((int) pts[0].x, (int) pts[0].y); result.add(b); } return result; }
From source file:arlocros.CameraParams.java
License:Apache License
public static Mat getCameraMatrix(CameraParams cameraParams) { final Mat cameraMatrix = new Mat(new Size(3, 3), CvType.CV_32FC1); cameraMatrix.put(0, 0, cameraParams.fx()); cameraMatrix.put(0, 1, 0);/*from w w w .j av a2s . co m*/ cameraMatrix.put(0, 2, cameraParams.cx()); cameraMatrix.put(1, 0, 0); cameraMatrix.put(1, 1, cameraParams.fy()); cameraMatrix.put(1, 2, cameraParams.cy()); cameraMatrix.put(2, 0, 0); cameraMatrix.put(2, 1, 0); cameraMatrix.put(2, 2, 1); return cameraMatrix; }
From source file:arlocros.Utils.java
License:Apache License
static public Mat matFromImage(final Image source) throws Exception { byte[] imageInBytes = source.getData().array(); imageInBytes = Arrays.copyOfRange(imageInBytes, source.getData().arrayOffset(), imageInBytes.length); Mat cvImage = new Mat(source.getHeight(), source.getWidth(), CvType.CV_8UC3); cvImage.put(0, 0, imageInBytes); return cvImage; }
From source file:at.ac.tuwien.caa.docscan.camera.CameraPreview.java
License:Open Source License
private void oldSingleThread(byte[] pixels) { long currentTime = System.currentTimeMillis(); if (currentTime - mLastTime >= FRAME_TIME_DIFF) { synchronized (this) { // 1.5 since YUV Mat yuv = new Mat((int) (mFrameHeight * 1.5), mFrameWidth, CvType.CV_8UC1); yuv.put(0, 0, pixels); if (mFrameMat != null) mFrameMat.release();/*from ww w .ja v a 2 s.co m*/ mFrameMat = new Mat(mFrameHeight, mFrameWidth, CvType.CV_8UC3); Imgproc.cvtColor(yuv, mFrameMat, Imgproc.COLOR_YUV2RGB_NV21); if (mStoreMat) { ChangeDetector.initNewFrameDetector(mFrameMat); mStoreMat = false; } yuv.release(); mLastTime = currentTime; boolean processFrame = true; // This is done in series mode: if (mAwaitFrameChanges) processFrame = isFrameSteadyAndNew(); // Check if there should be short break between two successive shots in series mode: boolean paused = pauseBetweenShots(currentTime); processFrame &= !paused; // If in single mode - or the frame is steady and contains a change, do the document analysis: if (processFrame) this.notify(); } } }
From source file:at.ac.tuwien.caa.docscan.camera.CameraPreview.java
License:Open Source License
private Mat byte2Mat(byte[] pixels) { Mat yuv = new Mat((int) (mFrameHeight * 1.5), mFrameWidth, CvType.CV_8UC1); yuv.put(0, 0, pixels); Mat result = new Mat(mFrameHeight, mFrameWidth, CvType.CV_8UC3); Imgproc.cvtColor(yuv, result, Imgproc.COLOR_YUV2RGB_NV21); return result; }
From source file:Beans.Imagen.java
public Mat getMatFotografia() { byte[] pixels = ((DataBufferByte) fotografia.getRaster().getDataBuffer()).getData(); // Create a Matrix the same size of image Mat image = new Mat(alto, ancho, CvType.CV_8UC3); // Fill Matrix with image values image.put(0, 0, pixels); Imgproc.resize(image, image, new Size(480, 640)); return image; }
From source file:br.com.prj.TelaPrincipal.java
/** * Convert uma Image em Mat//from ww w. j a va 2 s. co m * * @param img * @return new Mat() */ public Mat convertImageToMat(Image img) { BufferedImage image = (BufferedImage) img; byte[] data = ((DataBufferByte) image.getRaster().getDataBuffer()).getData(); Mat mat = new Mat(image.getHeight(), image.getWidth(), CvType.CV_8UC3); mat.put(0, 0, data); return mat; }
From source file:by.zuyeu.deyestracker.core.util.CVCoreUtils.java
public static void insertSubmatByRect(Mat subImage, Rect rect, Mat origImage) { double colScale = 1.0 * origImage.cols() / origImage.width(); int colStart = (int) (1.0 * rect.x * colScale); double rowScale = 1.0 * origImage.rows() / origImage.height(); int rowStart = (int) (1.0 * rect.y * rowScale); for (int x1 = 0, x2 = colStart; x1 < subImage.cols(); x1++, x2++) { for (int y1 = 0, y2 = rowStart; y1 < subImage.rows(); y1++, y2++) { final double[] subImgData = subImage.get(y1, x1); origImage.put(y2, x2, subImgData); }/*w w w .j ava2s .com*/ } }