List of usage examples for org.opencv.core Mat put
public int put(int row, int col, byte[] data)
From source file:org.ar.rubik.MonoChromatic.java
License:Open Source License
/** * Use mask operation and then min max./*from w w w . j a v a2s . c o m*/ * This solution consumes about 20 minutes per frame! * * @param original_image * @return */ @SuppressWarnings("unused") private static Mat monochromaticMedianImageFilterUtilizingOpenCv2(Mat original_image) { final Size imageSize = original_image.size(); final int numColumns = (int) original_image.size().width; final int numRows = (int) original_image.size().height; final int bufferSize = numColumns * numRows; final int span = (int) 7; final int accuracy = (int) 5; Mat hsv_image = new Mat(imageSize, CvType.CV_8UC3); Imgproc.cvtColor(original_image, hsv_image, Imgproc.COLOR_RGB2HLS); List<Mat> channels = new LinkedList<Mat>(); Core.split(hsv_image, channels); Mat hueMat = channels.get(0); Mat lumMat = channels.get(1); Mat satMat = channels.get(2); // Output byte array for speed efficiency Mat monochromatic_image = new Mat(imageSize, CvType.CV_8UC1); byte[] monochromaticByteArray = new byte[bufferSize]; Mat mask = Mat.zeros(numRows, numColumns, CvType.CV_8UC1); Log.i(Constants.TAG, "Begin MonoChromatic CV"); for (int row = 0; row < numRows; row++) { byte result_pixel = 0; for (int col = 0; col < numColumns; col++) { if (col < span || (col >= numColumns - span)) result_pixel = 0; // Just put in black else if (row < span || (row >= numRows - span)) result_pixel = 0; // Just put in black else { // Log.i(Constants.TAG, "Creating Mask at " + row +"," + col); Core.rectangle(mask, new Point(row, col), new Point(row + span, col + span), new Scalar(1, 1, 1)); // Core.MinMaxLocResult minMaxResult = Core.minMaxLoc(hueMat, mask); Mat subset = new Mat(); hueMat.copyTo(subset, mask); Core.MinMaxLocResult minMaxResult = Core.minMaxLoc(subset); if (((minMaxResult.maxVal - minMaxResult.maxVal) < accuracy)) //&& (lum_max - lum_min < accuracy) && (sat_max - sat_min < accuracy) ) result_pixel = (byte) 128; else result_pixel = (byte) 0; // Log.i(Constants.TAG, "Completed Mask at " + row +"," + col); Core.rectangle(mask, new Point(row, col), new Point(row + span, col + span), new Scalar(0, 0, 0)); } if ((col >= span / 2) && (row >= span / 2)) monochromaticByteArray[(row - span / 2) * numColumns + (col - span / 2)] = result_pixel; } Log.i(Constants.TAG, "Completed Row: " + row); } monochromatic_image.put(0, 0, monochromaticByteArray); Log.i(Constants.TAG, "Completed MonoChromatic CV"); // System.exit(0); return monochromatic_image; }
From source file:org.ar.rubik.MonoChromatic.java
License:Open Source License
/** * Use OpenCV minMax./*from w w w. java 2s.com*/ * * However, this is enormously slow, taking 10 minutes per frame! Why? * I think because it is effective O(O^4) in computation. * * @param original_image * @return */ @SuppressWarnings("unused") private static Mat monochromaticMedianImageFilterUtilizingOpenCv(Mat original_image) { final Size imageSize = original_image.size(); final int numColumns = (int) original_image.size().width; final int numRows = (int) original_image.size().height; final int bufferSize = numColumns * numRows; final int span = (int) 7; final int accuracy = (int) 5; Mat hsv_image = new Mat(imageSize, CvType.CV_8UC3); Imgproc.cvtColor(original_image, hsv_image, Imgproc.COLOR_RGB2HLS); List<Mat> channels = new LinkedList<Mat>(); Core.split(hsv_image, channels); Mat hueMat = channels.get(0); Mat lumMat = channels.get(1); Mat satMat = channels.get(2); // Output byte array for speed efficiency Mat monochromatic_image = new Mat(imageSize, CvType.CV_8UC1); byte[] monochromaticByteArray = new byte[bufferSize]; Mat mask = Mat.zeros(numRows, numColumns, CvType.CV_8UC1); Log.i(Constants.TAG, "Begin MonoChromatic CV"); for (int row = 0; row < numRows; row++) { byte result_pixel = 0; for (int col = 0; col < numColumns; col++) { if (col < span || (col >= numColumns - span)) result_pixel = 0; // Just put in black else if (row < span || (row >= numRows - span)) result_pixel = 0; // Just put in black else { // Log.i(Constants.TAG, "Creating Mask at " + row +"," + col); Core.rectangle(mask, new Point(row, col), new Point(row + span, col + span), new Scalar(1, 1, 1)); Core.MinMaxLocResult minMaxResult = Core.minMaxLoc(hueMat, mask); if (((minMaxResult.maxVal - minMaxResult.maxVal) < accuracy)) //&& (lum_max - lum_min < accuracy) && (sat_max - sat_min < accuracy) ) result_pixel = (byte) 128; else result_pixel = (byte) 0; // Log.i(Constants.TAG, "Completed Mask at " + row +"," + col); Core.rectangle(mask, new Point(row, col), new Point(row + span, col + span), new Scalar(0, 0, 0)); } if ((col >= span / 2) && (row >= span / 2)) monochromaticByteArray[(row - span / 2) * numColumns + (col - span / 2)] = result_pixel; } Log.i(Constants.TAG, "Completed Row: " + row); } monochromatic_image.put(0, 0, monochromaticByteArray); Log.i(Constants.TAG, "Completed MonoChromatic CV"); // System.exit(0); return monochromatic_image; }
From source file:org.ar.rubik.MonoChromatic.java
License:Open Source License
/** * Simple algorithm in Java. Java byte arrays of the original image * are obtain and operated on to then produce a resulting Java byte * array./*from w ww. ja v a2s .c o m*/ * * * @param original_image * @return */ private static Mat monochromaticMedianImageFilterBruteForceInJava(Mat original_image) { final Size imageSize = original_image.size(); Mat monochromatic_image = new Mat(imageSize, CvType.CV_8UC1); Mat hsv_image = new Mat(imageSize, CvType.CV_8UC3); Imgproc.cvtColor(original_image, hsv_image, Imgproc.COLOR_RGB2HLS); // Log.i(Constants.TAG, "HSV Image: " + hsv_image); // CV_8UC3 // Try RGB below // hsv_image = result; // Get hue channel into simple byte array for speed efficiency. final int numColumns = (int) original_image.size().width; final int numRows = (int) original_image.size().height; List<Mat> channels = new LinkedList<Mat>(); Core.split(hsv_image, channels); Mat hueMat = channels.get(0); Mat lumMat = channels.get(1); Mat satMat = channels.get(2); final int bufferSize = numColumns * numRows; byte[] hueByteArray = new byte[bufferSize]; byte[] lumByteArray = new byte[bufferSize]; byte[] satByteArray = new byte[bufferSize]; hueMat.get(0, 0, hueByteArray); // get all the pixels lumMat.get(0, 0, lumByteArray); // get all the pixels satMat.get(0, 0, satByteArray); // get all the pixels // Output byte array for speed efficiency byte[] monochromaticByteArray = new byte[bufferSize]; for (int row = 0; row < numRows; row++) { final int span = (int) 7; final int accuracy = (int) 5; byte result_pixel = 0; for (int col = 0; col < numColumns; col++) { if (col < span) result_pixel = 0; // Just put in black else if (row < span) result_pixel = 0; // Just put in black else { int hue_min = 255; int hue_max = 0; int lum_min = 255; int lum_max = 0; // int sat_min = 255; // int sat_max = 0; for (int i = 0; i < span; i++) { for (int j = 0; j < span; j++) { int hue = (int) hueByteArray[(row - j) * numColumns + (col - i)] & 0xFF; if (hue > hue_max) hue_max = hue; if (hue < hue_min) hue_min = hue; int lum = (int) lumByteArray[(row - j) * numColumns + (col - i)] & 0xFF; if (lum > lum_max) lum_max = lum; if (lum < lum_min) lum_min = lum; // =+= Saturation does not look correct when veiw as gray scale image. Not sure what is going on. // int sat = (int)satByteArray[row * numColumns + (col - i) ] & 0xFF; // if(sat > sat_max) // sat_max = sat; // if(sat < sat_min) // sat_min = sat; } // End of row min/max sweep } // End of column min/max sweep if ((hue_max - hue_min < accuracy)) //&& (lum_max - lum_min < accuracy) && (sat_max - sat_min < accuracy) ) result_pixel = (byte) 128; else result_pixel = (byte) 0; // Eliminate all black areas from consideration even if they are very flat. // For some reason, keying off minimum lumosity works best. if (lum_min < 30) result_pixel = 0; // Log.i(Constants.TAG, String.format("Lum %d %d", lum_min, lum_max)); } // End of else if ((col >= span / 2) && (row >= span / 2)) monochromaticByteArray[(row - span / 2) * numColumns + (col - span / 2)] = result_pixel; // int test = (int)(satByteArray[row * numColumns + col]) & 0xFF; // monochromaticByteArray[row * numColumns + (col - span/2)] = (byte) test; } // End of column sweep } // End of row sweep monochromatic_image.put(0, 0, monochromaticByteArray); return monochromatic_image; }
From source file:org.ar.rubik.RubikFace.java
License:Open Source License
/** * Calculate the optimum fit for the given layout of Rhombus in the Face. * /*from w w w.j a v a 2s. c om*/ * Set Up BIG Linear Equation: Y = AX * Where: * Y is a 2k x 1 matrix of actual x and y location from rhombus centers (known values) * X is a 3 x 1 matrix of { x_origin, y_origin, and alpha_lattice } (values we wish to find) * A is a 2k x 3 matrix of coefficients derived from m, n, alpha, beta, and gamma. * * Notes: * k := Twice the number of available rhombus. * n := integer axis of the face. * m := integer axis of the face. * * gamma := ratio of beta to alpha lattice size. * * Also, calculate sum of errors squared. * E = Y - AX * @return */ private LeastMeansSquare findOptimumFaceFit() { // Count how many non-empty cell actually have a rhombus in it. int k = 0; for (int n = 0; n < 3; n++) for (int m = 0; m < 3; m++) if (faceRhombusArray[n][m] != null) k++; Log.i(Constants.TAG, "Big K: " + k); Mat bigAmatrix = new Mat(2 * k, 3, CvType.CV_64FC1); Mat bigYmatrix = new Mat(2 * k, 1, CvType.CV_64FC1); Mat bigXmatrix = new Mat(3, 1, CvType.CV_64FC1); //{ origin_x, origin_y, latticeAlpha } // Load up matrices Y and A // X_k = X + n * L_alpha * cos(alpha) + m * L_beta * cos(beta) // Y_k = Y + n * L_alpha * sin(alpha) + m * L_beta * sin(beta) int index = 0; for (int n = 0; n < 3; n++) { for (int m = 0; m < 3; m++) { Rhombus rhombus = faceRhombusArray[n][m]; if (rhombus != null) { { // Actual X axis value of Rhombus in this location double bigY = rhombus.center.x; // Express expected X axis value : i.e. x = func( x_origin, n, m, alpha, beta, alphaLattice, gamma) double bigA = n * Math.cos(alphaAngle) + gammaRatio * m * Math.cos(betaAngle); bigYmatrix.put(index, 0, new double[] { bigY }); bigAmatrix.put(index, 0, new double[] { 1.0 }); bigAmatrix.put(index, 1, new double[] { 0.0 }); bigAmatrix.put(index, 2, new double[] { bigA }); index++; } { // Actual Y axis value of Rhombus in this location double bigY = rhombus.center.y; // Express expected Y axis value : i.e. y = func( y_origin, n, m, alpha, beta, alphaLattice, gamma) double bigA = n * Math.sin(alphaAngle) + gammaRatio * m * Math.sin(betaAngle); bigYmatrix.put(index, 0, new double[] { bigY }); bigAmatrix.put(index, 0, new double[] { 0.0 }); bigAmatrix.put(index, 1, new double[] { 1.0 }); bigAmatrix.put(index, 2, new double[] { bigA }); index++; } } } } // Log.v(Constants.TAG, "Big A Matrix: " + bigAmatrix.dump()); // Log.v(Constants.TAG, "Big Y Matrix: " + bigYmatrix.dump()); // Least Means Square Regression to find best values of origin_x, origin_y, and alpha_lattice. // Problem: Y=AX Known Y and A, but find X. // Tactic: Find minimum | AX - Y | (actually sum square of elements?) // OpenCV: Core.solve(Mat src1, Mat src2, Mat dst, int) // OpenCV: dst = arg min _X|src1 * X - src2| // Thus: src1 = A { 2k rows and 3 columns } // src2 = Y { 2k rows and 1 column } // dst = X { 3 rows and 1 column } // boolean solveFlag = Core.solve(bigAmatrix, bigYmatrix, bigXmatrix, Core.DECOMP_NORMAL); // Log.v(Constants.TAG, "Big X Matrix Result: " + bigXmatrix.dump()); // Sum of error square // Given X from above, the Y_estimate = AX // E = Y - AX Mat bigEmatrix = new Mat(2 * k, 1, CvType.CV_64FC1); for (int r = 0; r < (2 * k); r++) { double y = bigYmatrix.get(r, 0)[0]; double error = y; for (int c = 0; c < 3; c++) { double a = bigAmatrix.get(r, c)[0]; double x = bigXmatrix.get(c, 0)[0]; error -= a * x; } bigEmatrix.put(r, 0, error); } // sigma^2 = diagonal_sum( Et * E) double sigma = 0; for (int r = 0; r < (2 * k); r++) { double error = bigEmatrix.get(r, 0)[0]; sigma += error * error; } sigma = Math.sqrt(sigma); // Log.v(Constants.TAG, "Big E Matrix Result: " + bigEmatrix.dump()); // =+= not currently in use, could be deleted. // Retrieve Error terms and compose an array of error vectors: one of each occupied // cell who's vector point from tile center to actual location of rhombus. Point[][] errorVectorArray = new Point[3][3]; index = 0; for (int n = 0; n < 3; n++) { for (int m = 0; m < 3; m++) { Rhombus rhombus = faceRhombusArray[n][m]; // We expect this array to not have change from above. if (rhombus != null) { errorVectorArray[n][m] = new Point(bigEmatrix.get(index++, 0)[0], bigEmatrix.get(index++, 0)[0]); } } } double x = bigXmatrix.get(0, 0)[0]; double y = bigXmatrix.get(1, 0)[0]; double alphaLatice = bigXmatrix.get(2, 0)[0]; boolean valid = !Double.isNaN(x) && !Double.isNaN(y) && !Double.isNaN(alphaLatice) && !Double.isNaN(sigma); Log.i(Constants.TAG, String.format("Rubik Solution: x=%4.0f y=%4.0f alphaLattice=%4.0f sigma=%4.0f flag=%b", x, y, alphaLatice, sigma, solveFlag)); return new LeastMeansSquare(x, y, alphaLatice, errorVectorArray, sigma, valid); }
From source file:org.firstinspires.ftc.rmrobotics.histoy.opmodes.feRMilab.experimental.AutoExperimental.java
private Mat GetCameraImage() { Mat mat = null; while (mat == null) { try {//from w ww . j ava2s.co m VuforiaLocalizer.CloseableFrame frame = vuforia.getFrameQueue().take(); //takes the frame at the head of the queue Image rgb = null; long numImages = frame.getNumImages(); for (int i = 0; i < numImages; i++) { //finds a frame that is in color, not grayscale if (frame.getImage(i).getFormat() == PIXEL_FORMAT.RGB888) { rgb = frame.getImage(i); break; } } if (rgb != null && rgb.getPixels() != null) { ByteBuffer bb = rgb.getPixels(); byte[] b = new byte[bb.remaining()]; bb.get(b); mat = new Mat(rgb.getBufferHeight(), rgb.getBufferWidth(), CvType.CV_8UC3); mat.put(0, 0, b); frame.close(); Imgproc.resize(mat, mat, new Size(852, 480)); Core.flip(mat, mat, 1); } } catch (InterruptedException ex) { sleep(10); } } return mat; }
From source file:org.firstinspires.ftc.teamcode.libraries.VuforiaBallLib.java
protected Mat getFrame() throws InterruptedException { VuforiaLocalizer.CloseableFrame frame = ray.take(); int img = 0;/*from w w w . ja v a 2 s. c o m*/ for (; img < frame.getNumImages(); img++) { //telemetry.addData("Image format " + img, frame.getImage(img).getFormat()); if (frame.getImage(img).getFormat() == PIXEL_FORMAT.RGB888) break; } if (img == frame.getNumImages()) throw new IllegalArgumentException("Incorrect format"); Image mImage = frame.getImage(img); //stick it in a Mat for "display purposes" Mat out = new Mat(mImage.getHeight(), mImage.getWidth(), CV_8UC3); java.nio.ByteBuffer color = mImage.getPixels(); byte[] ray = new byte[color.limit()]; color.rewind(); color.get(ray); out.put(0, 0, ray); frame.close(); return out; }
From source file:org.firstinspires.ftc.teamcode.opmodes.demo.VuforiaNavigationTest1.java
License:Open Source License
@Override public void loop() { VuforiaLocalizer.CloseableFrame frame; mVLib.loop(true); // update location info and do debug telemetry if (!mVLib.getFrames().isEmpty()) { frame = mVLib.getFrames().poll(); Image img = frame.getImage(0); ByteBuffer buf = img.getPixels(); Mat m = new Mat(); m.put(0, 0, buf.array()); telemetry.addData("Mat Width:", m.width()); telemetry.addData("Mat Height:", m.height()); }/*w w w. ja v a 2 s . co m*/ }
From source file:org.firstinspires.ftc.teamcode.opmodes.demo.VumarkHiJackVideo.java
@Override public void init_loop() { RelicRecoveryVuMark vuMark = RelicRecoveryVuMark.from(relicTemplate); if (vuMark != RelicRecoveryVuMark.UNKNOWN) { /* Found an instance of the template. In the actual game, you will probably * loop until this condition occurs, then move on to act accordingly depending * on which VuMark was visible. */ telemetry.addData("VuMark", "%s visible", vuMark); /* For fun, we also exhibit the navigational pose. In the Relic Recovery game, * it is perhaps unlikely that you will actually need to act on this pose information, but * we illustrate it nevertheless, for completeness. */ //qualcomm is stupid and didn't let me see the real vuforia matrix, so I shimmed it //heres where is gets good VuforiaDefaultListenerShim writeGoodCode = (VuforiaDefaultListenerShim) relicTemplate.getListener(); OpenGLMatrix pose = writeGoodCode.getRawPose(); telemetry.addData("Pose", format(pose)); /* We further illustrate how to decompose the pose into useful rotational and * translational components */ if (pose != null) { //alternate projection! //get vuforia's real position matrix Matrix34F goodCodeWritten = writeGoodCode.getRealPose(); //reset imagePoints final float[][] vufPoints = new float[point.length][2]; //use vuforias projectPoints method to project all those box points for (int i = 0; i < point.length; i++) { //project vufPoints[i] = Tool.projectPoint(camCal, goodCodeWritten, point[i]).getData(); //convert to opencv language //telemetry.addData("point", "num: %d, x: %f.2, y: %f.2", i, vufPoints[i][0], vufPoints[i][1]); }//from ww w .j a v a 2 s . c o m //telemetry.addData("Camera Size", "w: %f.2, h: %f.2", camCal.getSize().getData()[0], camCal.getSize().getData()[1]); //get frame from vuforia try { VuforiaLocalizer.CloseableFrame frame = ray.take(); int img = 0; for (; img < frame.getNumImages(); img++) { //telemetry.addData("Image format " + img, frame.getImage(img).getFormat()); if (frame.getImage(img).getFormat() == PIXEL_FORMAT.RGB888) break; } Image mImage = frame.getImage(img); //stick it in a Mat for "display purposes" Mat out = new Mat(mImage.getHeight(), mImage.getWidth(), CV_8UC3); java.nio.ByteBuffer color = mImage.getPixels(); byte[] ray = new byte[color.limit()]; color.rewind(); color.get(ray); out.put(0, 0, ray); frame.close(); //convert points, halfing distances b/c vuforia does that internally so we gotta fix it for (int i = 0; i < vufPoints.length; i++) imagePoints[i] = new Point((int) vufPoints[i][0], (int) vufPoints[i][1]); Scalar green = new Scalar(0, 255, 0); for (int i = 0; i < 2; i++) for (int o = 0; o < 4; o++) Imgproc.line(out, imagePoints[o == 0 ? 3 + i * 4 : i * 4 + o - 1], imagePoints[i * 4 + o], green); //connect the rectangles for (int i = 0; i < 4; i++) Imgproc.line(out, imagePoints[i], imagePoints[i + 4], green); for (int i = 8; i < imagePoints.length; i++) Imgproc.drawMarker(out, imagePoints[i], green); //calculate points from projection //find the midpoint between the two points int leftXPoint = (int) ((imagePoints[8].x + imagePoints[9].x) / 2.0); int leftYPoint = (int) ((imagePoints[8].y + imagePoints[9].y) / 2.0); //find the y distande between the two leftDist = (int) (Math.abs(imagePoints[8].y - imagePoints[9].y) / 2.0); leftBall = new int[] { leftXPoint - (leftDist / 2), leftYPoint - (leftDist / 2) }; //find the midpoint between the two points int rightXPoint = (int) ((imagePoints[10].x + imagePoints[11].x) / 2.0); int rightYPoint = (int) ((imagePoints[10].y + imagePoints[11].y) / 2.0); //find the y distande between the two rightDist = (int) (Math.abs(imagePoints[10].y - imagePoints[11].y) / 2.0); rightBall = new int[] { rightXPoint - (rightDist / 2), rightYPoint - (rightDist / 2) }; //operation: subsquare //take a square mat we are 100% sure will have a ball in it //sum it up and find the average color leftColor = drawSquare(out, leftBall, leftDist); rightColor = drawSquare(out, rightBall, rightDist); //flip it for display Core.flip(out, out, -1); matQueue.add(out); //display! mView.getHandler().post(new Runnable() { @Override public void run() { try { Mat frame = matQueue.take(); //convert to bitmap Utils.matToBitmap(frame, bm); frame.release(); mView.invalidate(); } catch (InterruptedException e) { //huh } } }); } catch (Exception e) { Log.e("OPENCV", e.getLocalizedMessage()); } } } else { telemetry.addData("VuMark", "not visible"); } }
From source file:org.firstinspires.ftc.teamcode.opmodes.demo.VumarkHiJackVideo.java
private Mat getMatFromImage(Image img) { java.nio.ByteBuffer grey = img.getPixels(); Mat thing = new Mat(img.getHeight(), img.getWidth(), CV_8U); byte[] ray = new byte[grey.limit()]; grey.rewind();//w w w . j a v a 2 s. c o m grey.get(ray); thing.put(0, 0, ray); //rotate -90 //Core.transpose(thing, thing); //Core.flip(thing, thing, 0); //fill color space Imgproc.cvtColor(thing, thing, Imgproc.COLOR_GRAY2RGB); return thing; }
From source file:org.firstinspires.ftc.teamcode.opmodes.old2017.OpenCVVuforia.java
License:Open Source License
@Override public void loop() { VuforiaLocalizer.CloseableFrame frame; mVLib.loop(true); // update location info and do debug telemetry if (!mVLib.getFrames().isEmpty()) { frame = mVLib.getFrames().poll(); Image img = frame.getImage(0); ByteBuffer buf = img.getPixels(); //this currently throws an unsupportedOperation exception byte[] ray = buf.array(); Mat m = new Mat(); m.put(0, 0, ray); telemetry.addData("Mat Width:", m.width()); telemetry.addData("Mat Height:", m.height()); }/* w w w .j a va 2 s .c om*/ }