List of usage examples for org.opencv.core Mat zeros
public static Mat zeros(int rows, int cols, int type)
From source file:org.usfirst.frc.team2084.CMonster2016.vision.CameraCalibration.java
License:Open Source License
/** * Calibrate the camera. This goes through all the corners in the list and * calibrates based off them./*from w w w .ja v a 2s. com*/ * * @return the reprojection error */ public double calibrate() { cameraMatrix = Mat.eye(3, 3, CvType.CV_64F); distCoeffs = new MatOfDouble(Mat.zeros(8, 1, CvType.CV_64F)); List<Mat> rvecs = new LinkedList<>(); List<Mat> tvecs = new LinkedList<>(); // Set the fixed aspect ratio cameraMatrix.put(0, 0, aspectRatio); List<Mat> objectPoints = Collections.nCopies(calibrationCorners.size(), calcBoardCornerPositions()); System.out.println(cameraMatrix); return error = Calib3d.calibrateCamera(objectPoints, calibrationCorners, HighGoalProcessor.IMAGE_SIZE, cameraMatrix, distCoeffs, rvecs, tvecs, Calib3d.CALIB_FIX_PRINCIPAL_POINT); }
From source file:samples.LWF.java
private static void affine(Mat mat, double[][] from, double[][] to, double[][] coeficients, Mat lienzo, double escala, double gap) { // throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates. // http://stackoverflow.com/questions/10100715/opencv-warping-from-one-triangle-to-another // https://www.learnopencv.com/warp-one-triangle-to-another-using-opencv-c-python/ // http://docs.opencv.org/2.4/doc/tutorials/imgproc/imgtrans/warp_affine/warp_affine.html MatOfPoint2f src_pf = new MatOfPoint2f(new Point(from[0][0], from[0][1]), new Point(from[1][0], from[1][1]), new Point(from[2][0], from[2][1])); MatOfPoint2f dst_pf = new MatOfPoint2f(new Point(to[0][0], to[0][1]), new Point(to[1][0], to[1][1]), new Point(to[2][0], to[2][1])); // https://www.learnopencv.com/warp-one-triangle-to-another-using-opencv-c-python/#download //how do I set up the position numbers in MatOfPoint2f here? // Mat perspective_matrix = Imgproc.getAffineTransform(src_pf, dst_pf); Rect r1 = Imgproc.boundingRect(new MatOfPoint(new Point(from[0][0], from[0][1]), new Point(from[1][0], from[1][1]), new Point(from[2][0], from[2][1]))); Rect r2 = Imgproc.boundingRect(new MatOfPoint(new Point(to[0][0], to[0][1]), new Point(to[1][0], to[1][1]), new Point(to[2][0], to[2][1]))); MatOfPoint2f tri1Cropped = new MatOfPoint2f(new Point(from[0][0] - r1.x, from[0][1] - r1.y), new Point(from[1][0] - r1.x, from[1][1] - r1.y), new Point(from[2][0] - r1.x, from[2][1] - r1.y)); MatOfPoint tri2CroppedInt = new MatOfPoint(new Point(to[0][0] - r2.x, to[0][1] - r2.y), new Point(to[1][0] - r2.x, to[1][1] - r2.y), new Point(to[2][0] - r2.x, to[2][1] - r2.y)); MatOfPoint2f tri2Cropped = new MatOfPoint2f(new Point((to[0][0] - r2.x), (to[0][1] - r2.y)), new Point((to[1][0] - r2.x), (to[1][1] - r2.y)), new Point((to[2][0] - r2.x), (to[2][1] - r2.y))); // for (int i = 0; i < 3; i++) { // // tri1Cropped.push_back(new MatOfPoint(new Point(from[i][0] - r1.x, from[i][1] - r1.y))); // new Point( from[i][0] - r1.x, from[i][1]- r1.y) ); // //tri2Cropped.push_back(new MatOfPoint(new Point(to[i][0] - r2.x, to[i][1] - r2.y))); ////from w w w . ja va 2 s . co m // // fillConvexPoly needs a vector of Point and not Point2f // // tri2CroppedInt.push_back(new MatOfPoint2f(new Point((int) (to[i][0] - r2.x), (int) (to[i][1] - r2.y)))); // // } // Apply warpImage to small rectangular patches Mat img1Cropped = mat.submat(r1); //img1(r1).copyTo(img1Cropped); // Given a pair of triangles, find the affine transform. Mat warpMat = Imgproc.getAffineTransform(tri1Cropped, tri2Cropped); // Mat bbb = warpMat.mul(tri1Cropped); // // System.out.println( warpMat.dump() ); // System.out.println( tri2Cropped.dump() ); // System.out.println( bbb.dump() ); // Apply the Affine Transform just found to the src image Mat img2Cropped = Mat.zeros(r2.height, r2.width, img1Cropped.type()); Imgproc.warpAffine(img1Cropped, img2Cropped, warpMat, img2Cropped.size(), 0, Imgproc.INTER_LINEAR, new Scalar(Core.BORDER_TRANSPARENT)); //, 0, Imgproc.INTER_LINEAR, new Scalar(Core.BORDER_REFLECT_101)); // Get mask by filling triangle Mat mask = Mat.zeros(r2.height, r2.width, CvType.CV_8UC3); ///CV_8U CV_32FC3 Imgproc.fillConvexPoly(mask, tri2CroppedInt, new Scalar(1.0, 1.0, 1.0), 16, 0); // Copy triangular region of the rectangular patch to the output image // Core.multiply(img2Cropped,mask, img2Cropped); // // Core.multiply(mask, new Scalar(-1), mask); // Core.(mask,new Scalar(gap), mask); //Core.multiply(lienzo.submat(r2), (new Scalar(1.0,1.0,1.0)). - Core.multiply(mask,), lienzo.submat(r2)); // img2(r2) = img2(r2) + img2Cropped; // Core.subtract(Mat.ones(mask.height(), mask.width(), CvType.CV_8UC3), mask, mask); // Mat ff = ; // este Core.multiply(img2Cropped, mask, img2Cropped); //Core.multiply(lienzo.submat(r2), mask , lienzo.submat(r2)); Core.add(lienzo.submat(r2), img2Cropped, lienzo.submat(r2)); /* Mat bb = new Mat(mat, r2); bb.setTo(new Scalar(rnd.nextInt(),rnd.nextInt(),rnd.nextInt())); Core.multiply(bb,mask, bb); Core.multiply(lienzo.submat(r2), mask , lienzo.submat(r2)); Core.add(lienzo.submat(r2), bb, lienzo.submat(r2)); */ // lienzo.submat(r2).setTo(new Scalar(rnd.nextInt(),rnd.nextInt(),rnd.nextInt())); // // Imgproc.fillConvexPoly(lienzo, new MatOfPoint( // new Point(to[0][0] , to[0][1]), // new Point(to[1][0] , to[1][1]), // new Point(to[2][0] , to[2][1] )), new Scalar(1,1,1)); // img2Cropped.copyTo(lienzo); // return; // http://stackoverflow.com/questions/14111716/how-to-set-a-mask-image-for-grabcut-in-opencv // Imgproc.warpAffine(mat, lienzo, perspective_matrix, lienzo.size()); // Imgproc.getAffineTransform(null, null); /* // Find bounding rectangle for each triangle Rect r1 = boundingRect(tri1); Rect r2 = boundingRect(tri2); // Offset points by left top corner of the respective rectangles vector<Point2f> tri1Cropped, tri2Cropped; vector<Point> tri2CroppedInt; for(int i = 0; i < 3; i++) { tri1Cropped.push_back( Point2f( tri1[i].x - r1.x, tri1[i].y - r1.y) ); tri2Cropped.push_back( Point2f( tri2[i].x - r2.x, tri2[i].y - r2.y) ); // fillConvexPoly needs a vector of Point and not Point2f tri2CroppedInt.push_back( Point((int)(tri2[i].x - r2.x), (int)(tri2[i].y - r2.y)) ); } // Apply warpImage to small rectangular patches Mat img1Cropped; img1(r1).copyTo(img1Cropped); // Given a pair of triangles, find the affine transform. Mat warpMat = getAffineTransform( tri1Cropped, tri2Cropped ); // Apply the Affine Transform just found to the src image Mat img2Cropped = Mat::zeros(r2.height, r2.width, img1Cropped.type()); warpAffine( img1Cropped, img2Cropped, warpMat, img2Cropped.size(), INTER_LINEAR, BORDER_REFLECT_101); // Get mask by filling triangle Mat mask = Mat::zeros(r2.height, r2.width, CV_32FC3); fillConvexPoly(mask, tri2CroppedInt, Scalar(1.0, 1.0, 1.0), 16, 0); // Copy triangular region of the rectangular patch to the output image multiply(img2Cropped,mask, img2Cropped); multiply(img2(r2), Scalar(1.0,1.0,1.0) - mask, img2(r2)); img2(r2) = img2(r2) + img2Cropped;*/ }
From source file:se.hb.jcp.bindings.opencv.DenseDoubleMatrix1D.java
License:Open Source License
/** * Constructs a matrix with a given number of columns. * All entries are initially <tt>0</tt>. * @param columns the number of columns the matrix shall have. * @throws IllegalArgumentException if/*from w ww . java 2 s . c o m*/ <tt>columns<0 || columns > Integer.MAX_VALUE</tt>. */ public DenseDoubleMatrix1D(int columns) { _mat = new MatOfFloat(); Mat.zeros(1, columns, CvType.CV_32F).assignTo(_mat); setUp(columns); }
From source file:se.hb.jcp.bindings.opencv.DenseDoubleMatrix2D.java
License:Open Source License
/** * Constructs a matrix with a given number of rows and columns. * All entries are initially <tt>0</tt>. * @param rows the number of rows the matrix shall have. * @param columns the number of columns the matrix shall have. * @throws IllegalArgumentException if/*from w w w . j a v a2 s . c o m*/ <tt>rows<0 || columns<0 || (double)columns*rows > Integer.MAX_VALUE</tt>. */ public DenseDoubleMatrix2D(int rows, int columns) { _mat = new MatOfFloat(); Mat.zeros(rows, columns, CvType.CV_32F).assignTo(_mat); setUp(rows, columns); }
From source file:servlets.FillAreaByScribble.java
/** * Processes requests for both HTTP <code>GET</code> and <code>POST</code> * methods./*ww w.j a va 2s.c o m*/ * * @param request servlet request * @param response servlet response * @throws ServletException if a servlet-specific error occurs * @throws IOException if an I/O error occurs */ protected void processRequest(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { response.setContentType("text/html;charset=UTF-8"); try (PrintWriter out = response.getWriter()) { String imageForTextRecognition = request.getParameter("imageForTextRecognition") + ".png"; String isSingleRegion = request.getParameter("isSingleRegion"); boolean makeSingleRegion = isSingleRegion.toLowerCase().equals("true"); Mat original = ImageUtils.loadImage(imageForTextRecognition, request); Mat image = original.clone(); Mat mask = Mat.zeros(image.rows() + 2, image.cols() + 2, CvType.CV_8UC1); String samplingPoints = request.getParameter("samplingPoints"); Gson gson = new Gson(); Point[] tmpPoints = gson.fromJson(samplingPoints, Point[].class); ArrayList<Point> userPoints = new ArrayList<Point>(Arrays.asList(tmpPoints)); Mat userPointsImage = image.clone(); ArrayList<Mat> maskRegions = new ArrayList<>(); Random random = new Random(); int b = random.nextInt(256); int g = random.nextInt(256); int r = random.nextInt(256); Scalar newVal = new Scalar(b, g, r); FloodFillFacade floodFillFacade = new FloodFillFacade(); int k = 0; for (int i = 0; i < userPoints.size(); i++) { Point point = userPoints.get(i); image = floodFillFacade.fill(image, mask, (int) point.x, (int) point.y, newVal); Mat seedImage = original.clone(); Core.circle(seedImage, point, 9, new Scalar(0, 0, 255), -1); Core.putText(userPointsImage, "" + k, new Point(point.x + 5, point.y + 5), 3, 0.5, new Scalar(0, 0, 0)); // ImageUtils.saveImage(seedImage, "mask_" + k + "_seed" + imageForTextRecognition + ".png", request); if (!makeSingleRegion) { Mat element = new Mat(3, 3, CvType.CV_8U, new Scalar(1)); Imgproc.morphologyEx(mask, mask, Imgproc.MORPH_CLOSE, element, new Point(-1, -1), 3); Imgproc.resize(mask, mask, original.size()); } // ImageUtils.saveImage(mask, "mask_" + k + "" + imageForTextRecognition + ".png", request); Mat dilatedMask = new Mat(); int elementSide = 21; Mat element = new Mat(elementSide, elementSide, CvType.CV_8U, new Scalar(1)); Imgproc.morphologyEx(mask, dilatedMask, Imgproc.MORPH_DILATE, element, new Point(-1, -1), 1); Imgproc.resize(dilatedMask, dilatedMask, original.size()); // ImageUtils.saveImage(dilatedMask, "mask_" + k + "_dilated" + imageForTextRecognition + ".png", request); maskRegions.add(mask); if (!makeSingleRegion) { int totalRemovedPoints = filterPoints(userPoints, dilatedMask); if (totalRemovedPoints > 0) { i = -1; // so that the algorithm starts again at the first element of the userPoints array } } else { filterPoints(userPoints, mask); } // System.out.println("Total points after filtering:"); // System.out.println(userPoints.size()); if (!makeSingleRegion) { mask = Mat.zeros(original.rows() + 2, original.cols() + 2, CvType.CV_8UC1); } k++; } ArrayList<FindingResponse> findingResponses = new ArrayList<>(); if (makeSingleRegion) { Mat element = new Mat(3, 3, CvType.CV_8U, new Scalar(1)); Imgproc.morphologyEx(mask, mask, Imgproc.MORPH_CLOSE, element, new Point(-1, -1), 3); Imgproc.resize(mask, mask, image.size()); List<MatOfPoint> contours = new ArrayList<MatOfPoint>(); Imgproc.findContours(mask.clone(), contours, new Mat(), Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_NONE); MatOfPoint biggestContour = contours.get(0); // getting the biggest contour double contourArea = Imgproc.contourArea(biggestContour); if (contours.size() > 1) { biggestContour = Collections.max(contours, new ContourComparator()); // getting the biggest contour in case there are more than one } Point[] biggestContourPoints = biggestContour.toArray(); String path = "M " + (int) biggestContourPoints[0].x + " " + (int) biggestContourPoints[0].y + " "; for (int i = 1; i < biggestContourPoints.length; ++i) { Point v = biggestContourPoints[i]; path += "L " + (int) v.x + " " + (int) v.y + " "; } path += "Z"; // System.out.println("path:"); // System.out.println(path); Rect computedSearchWindow = Imgproc.boundingRect(biggestContour); Point massCenter = computedSearchWindow.tl(); Scalar meanColor = Core.mean(original, mask); // ImageUtils.saveImage(mask, "single_mask_" + imageForTextRecognition + ".png", request); FindingResponse findingResponse = new FindingResponse(path, meanColor, massCenter, -1, contourArea); findingResponses.add(findingResponse); } else { float imageArea = image.cols() * image.rows(); for (int j = 0; j < maskRegions.size(); j++) { Mat region = maskRegions.get(j); List<MatOfPoint> contours = new ArrayList<MatOfPoint>(); Imgproc.findContours(region.clone(), contours, new Mat(), Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_NONE); MatOfPoint biggestContour = contours.get(0); // getting the biggest contour if (contours.size() > 1) { biggestContour = Collections.max(contours, new ContourComparator()); // getting the biggest contour in case there are more than one } double contourArea = Imgproc.contourArea(biggestContour); if (contourArea / imageArea < 0.8) { // only areas less than 80% of that of the image are accepted Point[] biggestContourPoints = biggestContour.toArray(); String path = "M " + (int) biggestContourPoints[0].x + " " + (int) biggestContourPoints[0].y + " "; for (int i = 1; i < biggestContourPoints.length; ++i) { Point v = biggestContourPoints[i]; path += "L " + (int) v.x + " " + (int) v.y + " "; } path += "Z"; Rect computedSearchWindow = Imgproc.boundingRect(biggestContour); Point massCenter = computedSearchWindow.tl(); // System.out.println("Contour area: " + contourArea); Mat contoursImage = userPointsImage.clone(); Imgproc.drawContours(contoursImage, contours, 0, newVal, 1); Scalar meanColor = Core.mean(original, region); FindingResponse findingResponse = new FindingResponse(path, meanColor, massCenter, -1, contourArea); findingResponses.add(findingResponse); // ImageUtils.saveImage(contoursImage, "mask_" + j + "_contourned" + imageForTextRecognition + ".png", request); } } } String jsonResponse = gson.toJson(findingResponses, ArrayList.class); out.println(jsonResponse); } }
From source file:servlets.processScribble.java
/** * Processes requests for both HTTP <code>GET</code> and <code>POST</code> * methods./* w w w.ja v a2 s. co m*/ * * @param request servlet request * @param response servlet response * @throws ServletException if a servlet-specific error occurs * @throws IOException if an I/O error occurs */ protected void processRequest(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { response.setContentType("text/html;charset=UTF-8"); try (PrintWriter out = response.getWriter()) { String imageForTextRecognition = request.getParameter("imageForTextRecognition") + ".png"; Mat original = ImageUtils.loadImage(imageForTextRecognition, request); Mat image = original.clone(); Mat mask = Mat.zeros(image.rows() + 2, image.cols() + 2, CvType.CV_8UC1); String samplingPoints = request.getParameter("samplingPoints"); Gson gson = new Gson(); Point[] userPoints = gson.fromJson(samplingPoints, Point[].class); MatOfPoint points = new MatOfPoint(new Mat(userPoints.length, 1, CvType.CV_32SC2)); int cont = 0; for (Point point : userPoints) { int y = (int) point.y; int x = (int) point.x; int[] data = { x, y }; points.put(cont++, 0, data); } MatOfInt hull = new MatOfInt(); Imgproc.convexHull(points, hull); MatOfPoint mopOut = new MatOfPoint(); mopOut.create((int) hull.size().height, 1, CvType.CV_32SC2); int totalPoints = (int) hull.size().height; Point[] convexHullPoints = new Point[totalPoints]; ArrayList<Point> seeds = new ArrayList<>(); for (int i = 0; i < totalPoints; i++) { int index = (int) hull.get(i, 0)[0]; double[] point = new double[] { points.get(index, 0)[0], points.get(index, 0)[1] }; mopOut.put(i, 0, point); convexHullPoints[i] = new Point(point[0], point[1]); seeds.add(new Point(point[0], point[1])); } MatOfPoint mop = new MatOfPoint(); mop.fromArray(convexHullPoints); ArrayList<MatOfPoint> arrayList = new ArrayList<MatOfPoint>(); arrayList.add(mop); Random random = new Random(); int b = random.nextInt(256); int g = random.nextInt(256); int r = random.nextInt(256); Scalar newVal = new Scalar(b, g, r); FloodFillFacade floodFillFacade = new FloodFillFacade(); for (int i = 0; i < seeds.size(); i++) { Point seed = seeds.get(i); image = floodFillFacade.fill(image, mask, (int) seed.x, (int) seed.y, newVal); } Imgproc.drawContours(image, arrayList, 0, newVal, -1); Imgproc.resize(mask, mask, image.size()); Scalar meanColor = Core.mean(original, mask); // Highgui.imwrite("C:\\Users\\Gonzalo\\Documents\\NetBeansProjects\\iVoLVER\\uploads\\the_convexHull.png", image); ImageUtils.saveImage(image, imageForTextRecognition + "_the_convexHull.png", request); newVal = new Scalar(255, 255, 0); floodFillFacade.setMasked(false); System.out.println("Last one:"); floodFillFacade.fill(image, mask, 211, 194, newVal); Core.circle(image, new Point(211, 194), 5, new Scalar(0, 0, 0), -1); ImageUtils.saveImage(image, imageForTextRecognition + "_final.png", request); // Highgui.imwrite("C:\\Users\\Gonzalo\\Documents\\NetBeansProjects\\iVoLVER\\uploads\\final.png", image); Mat element = new Mat(3, 3, CvType.CV_8U, new Scalar(1)); Imgproc.morphologyEx(mask, mask, Imgproc.MORPH_CLOSE, element, new Point(-1, -1), 3); Imgproc.resize(mask, mask, image.size()); // ImageUtils.saveImage(mask, "final_mask_dilated.png", request); // Highgui.imwrite("C:\\Users\\Gonzalo\\Documents\\NetBeansProjects\\iVoLVER\\uploads\\final_mask_dilated.png", mask); List<MatOfPoint> contours = new ArrayList<MatOfPoint>(); Imgproc.findContours(mask.clone(), contours, new Mat(), Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_NONE); double contourArea = 0; String path = ""; MatOfPoint biggestContour = contours.get(0); // getting the biggest contour contourArea = Imgproc.contourArea(biggestContour); if (contours.size() > 1) { biggestContour = Collections.max(contours, new ContourComparator()); // getting the biggest contour in case there are more than one } Point[] biggestContourPoints = biggestContour.toArray(); path = "M " + (int) biggestContourPoints[0].x + " " + (int) biggestContourPoints[0].y + " "; for (int i = 1; i < biggestContourPoints.length; ++i) { Point v = biggestContourPoints[i]; path += "L " + (int) v.x + " " + (int) v.y + " "; } path += "Z"; System.out.println("path:"); System.out.println(path); Rect computedSearchWindow = Imgproc.boundingRect(biggestContour); Point massCenter = computedSearchWindow.tl(); FindingResponse findingResponse = new FindingResponse(path, meanColor, massCenter, -1, contourArea); String jsonResponse = gson.toJson(findingResponse, FindingResponse.class); out.println(jsonResponse); // String jsonResponse = gson.toJson(path); // out.println(jsonResponse); } }