List of usage examples for org.opencv.core Mat rows
public int rows()
From source file:org.openpnp.machine.reference.vision.OpenCvVisionProvider.java
License:Open Source License
private void locateTemplateMatchesDebug(Mat roiImage, Mat templateImage, org.opencv.core.Point matchLoc) { if (logger.isDebugEnabled()) { try {// w w w. j av a2 s . c o m Core.rectangle(roiImage, matchLoc, new org.opencv.core.Point(matchLoc.x + templateImage.cols(), matchLoc.y + templateImage.rows()), new Scalar(0, 255, 0)); BufferedImage debugImage = OpenCvUtils.toBufferedImage(roiImage); File file = Configuration.get().createResourceFile(OpenCvVisionProvider.class, "debug_", ".png"); ImageIO.write(debugImage, "PNG", file); logger.debug("Debug image filename {}", file); } catch (Exception e) { e.printStackTrace(); } } }
From source file:org.openpnp.machine.reference.vision.OpenCvVisionProvider.java
License:Open Source License
static List<Point> matMaxima(Mat mat, double rangeMin, double rangeMax) { List<Point> locations = new ArrayList<Point>(); int rEnd = mat.rows() - 1; int cEnd = mat.cols() - 1; // CHECK EACH ROW MAXIMA FOR LOCAL 2D MAXIMA for (int r = 0; r <= rEnd; r++) { MinMaxState state = MinMaxState.BEFORE_INFLECTION; double curVal = mat.get(r, 0)[0]; for (int c = 1; c <= cEnd; c++) { double val = mat.get(r, c)[0]; if (val == curVal) { continue; } else if (curVal < val) { if (state == MinMaxState.BEFORE_INFLECTION) { // n/a } else { state = MinMaxState.BEFORE_INFLECTION; }//from ww w .j a va 2 s .co m } else { // curVal > val if (state == MinMaxState.BEFORE_INFLECTION) { if (rangeMin <= curVal && curVal <= rangeMax) { // ROW // MAXIMA if (0 < r && (mat.get(r - 1, c - 1)[0] >= curVal || mat.get(r - 1, c)[0] >= curVal)) { // cout << "reject:r-1 " << r << "," << c-1 << // endl; // - x x // - - - // - - - } else if (r < rEnd && (mat.get(r + 1, c - 1)[0] > curVal || mat.get(r + 1, c)[0] > curVal)) { // cout << "reject:r+1 " << r << "," << c-1 << // endl; // - - - // - - - // - x x } else if (1 < c && (0 < r && mat.get(r - 1, c - 2)[0] >= curVal || mat.get(r, c - 2)[0] > curVal || r < rEnd && mat.get(r + 1, c - 2)[0] > curVal)) { // cout << "reject:c-2 " << r << "," << c-1 << // endl; // x - - // x - - // x - - } else { locations.add(new Point(c - 1, r)); } } state = MinMaxState.AFTER_INFLECTION; } else { // n/a } } curVal = val; } // PROCESS END OF ROW if (state == MinMaxState.BEFORE_INFLECTION) { if (rangeMin <= curVal && curVal <= rangeMax) { // ROW MAXIMA if (0 < r && (mat.get(r - 1, cEnd - 1)[0] >= curVal || mat.get(r - 1, cEnd)[0] >= curVal)) { // cout << "rejectEnd:r-1 " << r << "," << cEnd-1 << // endl; // - x x // - - - // - - - } else if (r < rEnd && (mat.get(r + 1, cEnd - 1)[0] > curVal || mat.get(r + 1, cEnd)[0] > curVal)) { // cout << "rejectEnd:r+1 " << r << "," << cEnd-1 << // endl; // - - - // - - - // - x x } else if (1 < r && mat.get(r - 1, cEnd - 2)[0] >= curVal || mat.get(r, cEnd - 2)[0] > curVal || r < rEnd && mat.get(r + 1, cEnd - 2)[0] > curVal) { // cout << "rejectEnd:cEnd-2 " << r << "," << cEnd-1 << // endl; // x - - // x - - // x - - } else { locations.add(new Point(cEnd, r)); } } } } return locations; }
From source file:org.openpnp.vision.FluentCv.java
License:Open Source License
/** * Draw the infinite line defined by the two points to the extents of the image instead of just * between the two points. From://from w w w . j a v a 2 s. c om * http://stackoverflow.com/questions/13160722/how-to-draw-line-not-line-segment-opencv-2-4-2 * * @param img * @param p1 * @param p2 * @param color */ public static void infiniteLine(Mat img, Point p1, Point p2, Color color) { Point p = new Point(), q = new Point(); // Check if the line is a vertical line because vertical lines don't // have slope if (p1.x != p2.x) { p.x = 0; q.x = img.cols(); // Slope equation (y1 - y2) / (x1 - x2) float m = (float) ((p1.y - p2.y) / (p1.x - p2.x)); // Line equation: y = mx + b float b = (float) (p1.y - (m * p1.x)); p.y = m * p.x + b; q.y = m * q.x + b; } else { p.x = q.x = p2.x; p.y = 0; q.y = img.rows(); } Core.line(img, p, q, colorToScalar(color)); }
From source file:org.openpnp.vision.FluentCv.java
License:Open Source License
/** * From FireSight: https://github.com/firepick1/FireSight/wiki/op-Sharpness * /*from w w w . j a v a 2 s. co m*/ * @param image * @return */ public static double calculateSharpnessGRAS(Mat image) { int sum = 0; Mat matGray = new Mat(); if (image.channels() == 1) { matGray = image; } else { Imgproc.cvtColor(image, matGray, Imgproc.COLOR_BGR2GRAY); } byte[] b1 = new byte[1]; byte[] b2 = new byte[1]; for (int r = 0; r < matGray.rows(); r++) { for (int c = 0; c < matGray.cols() - 1; c++) { matGray.get(r, c, b1); matGray.get(r, c + 1, b2); int df = (int) b1[0] - (int) b2[0]; sum += df * df; } } return ((double) sum / matGray.rows() / (matGray.cols() - 1)); }
From source file:org.pattern.utils.MatUtils.java
/** * Compares if two image matrices contains similar data. * //from www. ja va2 s. c o m * @param mat1 * @param mat2 * @return */ public static boolean similar(Mat mat1, Mat mat2) { if (mat1.cols() != mat2.cols() || mat1.rows() != mat2.rows()) { return false; } Mat mat = new Mat(); Core.compare(mat1, mat2, mat, Core.CMP_EQ); return Core.countNonZero(mat) != 0; }
From source file:org.pidome.client.userdetection.faces.FD_Controller.java
/** * Perform face detection and show a rectangle around the detected face. * * @param frame the current frame/*from w w w .j a va 2s . c o m*/ */ private void detectAndDisplay(Mat frame) { // init MatOfRect faces = new MatOfRect(); Mat grayFrame = new Mat(); // convert the frame in gray scale Imgproc.cvtColor(frame, grayFrame, Imgproc.COLOR_BGR2GRAY); // equalize the frame histogram to improve the result Imgproc.equalizeHist(grayFrame, grayFrame); // compute minimum face size (20% of the frame height) if (this.absoluteFaceSize == 0) { int height = grayFrame.rows(); if (Math.round(height * 0.2f) > 0) { this.absoluteFaceSize = Math.round(height * 0.2f); } } // detect faces this.faceCascade.detectMultiScale(grayFrame, faces, 1.1, 2, Objdetect.CASCADE_SCALE_IMAGE | Objdetect.CASCADE_DO_ROUGH_SEARCH | Objdetect.CASCADE_FIND_BIGGEST_OBJECT, new Size(this.absoluteFaceSize, this.absoluteFaceSize), new Size()); // each rectangle in faces is a face Rect[] facesArray = faces.toArray(); if (facesArray.length == 1) { Point loc = facesArray[0].tl(); Size size = facesArray[0].size(); faceRect = new Rectangle(); synchronized (faceRect) { faceRect.setRect(loc.x, loc.y, size.width, size.height); } Core.rectangle(frame, loc, facesArray[0].br(), new Scalar(0, 255, 0, 255), 2); } }
From source file:org.pidome.client.video.capture.faces.recognition.FaceDetection.java
/** * Perform face detection./* w w w. j a v a 2 s .com*/ * When drawRectangle is set to true a green rectangle will be placed on the image around a detected face. * @param frame the current frame * @return The location and size of the detected face. */ public final FaceRect detectFace(Mat frame) { // init MatOfRect faces = new MatOfRect(); Mat grayFrame = new Mat(); // convert the frame in gray scale Imgproc.cvtColor(frame, grayFrame, Imgproc.COLOR_BGR2GRAY); // equalize the frame histogram to improve the result Imgproc.equalizeHist(grayFrame, grayFrame); // compute minimum face size (20% of the frame height) if (absoluteFaceSize == 0) { int height = grayFrame.rows(); if (Math.round(height * 0.2f) > 0) { absoluteFaceSize = Math.round(height * 0.2f); } } // detect faces this.faceCascade.detectMultiScale(grayFrame, faces, 1.1, 2, Objdetect.CASCADE_SCALE_IMAGE | Objdetect.CASCADE_DO_ROUGH_SEARCH | Objdetect.CASCADE_FIND_BIGGEST_OBJECT, new Size(absoluteFaceSize, absoluteFaceSize), new Size()); // each rectangle in faces is a face Rect[] facesArray = faces.toArray(); FaceRect faceRect = new FaceRect(); if (facesArray.length == 1) { Point loc = facesArray[0].tl(); Size size = facesArray[0].size(); faceRect.setRect(loc.x + rectThickness, loc.y + rectThickness, size.width - rectThickness, size.height - rectThickness); if (drawRectangle) { Core.rectangle(frame, loc, facesArray[0].br(), new Scalar(0, 255, 0, 255), rectThickness); } } return faceRect; }
From source file:org.sikuli.script.Finder.java
License:MIT License
private static void printMatI(Mat mat) { int[] data = new int[mat.channels()]; for (int r = 0; r < mat.rows(); r++) { for (int c = 0; c < mat.cols(); c++) { mat.get(r, c, data);/* www . j av a 2 s. co m*/ log(lvl, "(%d, %d) %s", r, c, Arrays.toString(data)); } } }
From source file:org.sleuthkit.autopsy.coreutils.VideoUtils.java
License:Open Source License
@NbBundle.Messages({ "# {0} - file name", "VideoUtils.genVideoThumb.progress.text=extracting temporary file {0}" }) static BufferedImage generateVideoThumbnail(AbstractFile file, int iconSize) { java.io.File tempFile = getTempVideoFile(file); if (tempFile.exists() == false || tempFile.length() < file.getSize()) { ProgressHandle progress = ProgressHandle .createHandle(Bundle.VideoUtils_genVideoThumb_progress_text(file.getName())); progress.start(100);// w w w .jav a 2 s . co m try { Files.createParentDirs(tempFile); ContentUtils.writeToFile(file, tempFile, progress, null, true); } catch (IOException ex) { LOGGER.log(Level.WARNING, "Error extracting temporary file for " + ImageUtils.getContentPathSafe(file), ex); //NON-NLS } finally { progress.finish(); } } VideoCapture videoFile = new VideoCapture(); // will contain the video if (!videoFile.open(tempFile.toString())) { LOGGER.log(Level.WARNING, "Error opening {0} for preview generation.", ImageUtils.getContentPathSafe(file)); //NON-NLS return null; } double fps = videoFile.get(CV_CAP_PROP_FPS); // gets frame per second double totalFrames = videoFile.get(CV_CAP_PROP_FRAME_COUNT); // gets total frames if (fps <= 0 || totalFrames <= 0) { LOGGER.log(Level.WARNING, "Error getting fps or total frames for {0}", ImageUtils.getContentPathSafe(file)); //NON-NLS return null; } double milliseconds = 1000 * (totalFrames / fps); //total milliseconds double timestamp = Math.min(milliseconds, 500); //default time to check for is 500ms, unless the files is extremely small int framkeskip = Double.valueOf(Math.floor((milliseconds - timestamp) / (THUMB_COLUMNS * THUMB_ROWS))) .intValue(); Mat imageMatrix = new Mat(); BufferedImage bufferedImage = null; for (int x = 0; x < THUMB_COLUMNS; x++) { for (int y = 0; y < THUMB_ROWS; y++) { if (!videoFile.set(CV_CAP_PROP_POS_MSEC, timestamp + x * framkeskip + y * framkeskip * THUMB_COLUMNS)) { LOGGER.log(Level.WARNING, "Error seeking to " + timestamp + "ms in {0}", ImageUtils.getContentPathSafe(file)); //NON-NLS break; // if we can't set the time, return black for that frame } //read the frame into the image/matrix if (!videoFile.read(imageMatrix)) { LOGGER.log(Level.WARNING, "Error reading frames at " + timestamp + "ms from {0}", ImageUtils.getContentPathSafe(file)); //NON-NLS break; //if the image for some reason is bad, return black for that frame } if (bufferedImage == null) { bufferedImage = new BufferedImage(imageMatrix.cols() * THUMB_COLUMNS, imageMatrix.rows() * THUMB_ROWS, BufferedImage.TYPE_3BYTE_BGR); } byte[] data = new byte[imageMatrix.rows() * imageMatrix.cols() * (int) (imageMatrix.elemSize())]; imageMatrix.get(0, 0, data); //copy the image to data //todo: this looks like we are swapping the first and third channels. so we can use BufferedImage.TYPE_3BYTE_BGR if (imageMatrix.channels() == 3) { for (int k = 0; k < data.length; k += 3) { byte temp = data[k]; data[k] = data[k + 2]; data[k + 2] = temp; } } bufferedImage.getRaster().setDataElements(imageMatrix.cols() * x, imageMatrix.rows() * y, imageMatrix.cols(), imageMatrix.rows(), data); } } videoFile.release(); // close the file return bufferedImage == null ? null : ScalrWrapper.resizeFast(bufferedImage, iconSize); }
From source file:overwatchteampicker.OverwatchTeamPicker.java
public static ReturnValues findImage(String template, String source, int flag) { File lib = null;/* w w w.j a v a 2s. c o m*/ BufferedImage image = null; try { image = ImageIO.read(new File(source)); } catch (Exception e) { e.printStackTrace(); } String os = System.getProperty("os.name"); String bitness = System.getProperty("sun.arch.data.model"); if (os.toUpperCase().contains("WINDOWS")) { if (bitness.endsWith("64")) { lib = new File("C:\\Users\\POWERUSER\\Downloads\\opencv\\build\\java\\x64\\" + System.mapLibraryName("opencv_java2413")); } else { lib = new File("libs//x86//" + System.mapLibraryName("opencv_java2413")); } } System.load(lib.getAbsolutePath()); String tempObject = "images\\hero_templates\\" + template + ".png"; String source_pic = source; Mat objectImage = Highgui.imread(tempObject, Highgui.CV_LOAD_IMAGE_GRAYSCALE); Mat sceneImage = Highgui.imread(source_pic, Highgui.CV_LOAD_IMAGE_GRAYSCALE); MatOfKeyPoint objectKeyPoints = new MatOfKeyPoint(); FeatureDetector featureDetector = FeatureDetector.create(FeatureDetector.SURF); featureDetector.detect(objectImage, objectKeyPoints); KeyPoint[] keypoints = objectKeyPoints.toArray(); MatOfKeyPoint objectDescriptors = new MatOfKeyPoint(); DescriptorExtractor descriptorExtractor = DescriptorExtractor.create(DescriptorExtractor.SURF); descriptorExtractor.compute(objectImage, objectKeyPoints, objectDescriptors); // Create the matrix for output image. Mat outputImage = new Mat(objectImage.rows(), objectImage.cols(), Highgui.CV_LOAD_IMAGE_COLOR); Scalar newKeypointColor = new Scalar(255, 0, 0); Features2d.drawKeypoints(objectImage, objectKeyPoints, outputImage, newKeypointColor, 0); // Match object image with the scene image MatOfKeyPoint sceneKeyPoints = new MatOfKeyPoint(); MatOfKeyPoint sceneDescriptors = new MatOfKeyPoint(); featureDetector.detect(sceneImage, sceneKeyPoints); descriptorExtractor.compute(sceneImage, sceneKeyPoints, sceneDescriptors); Mat matchoutput = new Mat(sceneImage.rows() * 2, sceneImage.cols() * 2, Highgui.CV_LOAD_IMAGE_COLOR); Scalar matchestColor = new Scalar(0, 255, 25); List<MatOfDMatch> matches = new LinkedList<MatOfDMatch>(); DescriptorMatcher descriptorMatcher = DescriptorMatcher.create(DescriptorMatcher.FLANNBASED); descriptorMatcher.knnMatch(objectDescriptors, sceneDescriptors, matches, 2); LinkedList<DMatch> goodMatchesList = new LinkedList<DMatch>(); float nndrRatio = .78f; for (int i = 0; i < matches.size(); i++) { MatOfDMatch matofDMatch = matches.get(i); DMatch[] dmatcharray = matofDMatch.toArray(); DMatch m1 = dmatcharray[0]; DMatch m2 = dmatcharray[1]; if (m1.distance <= m2.distance * nndrRatio) { goodMatchesList.addLast(m1); } } if (goodMatchesList.size() >= 4) { List<KeyPoint> objKeypointlist = objectKeyPoints.toList(); List<KeyPoint> scnKeypointlist = sceneKeyPoints.toList(); LinkedList<Point> objectPoints = new LinkedList<>(); LinkedList<Point> scenePoints = new LinkedList<>(); for (int i = 0; i < goodMatchesList.size(); i++) { objectPoints.addLast(objKeypointlist.get(goodMatchesList.get(i).queryIdx).pt); scenePoints.addLast(scnKeypointlist.get(goodMatchesList.get(i).trainIdx).pt); } MatOfPoint2f objMatOfPoint2f = new MatOfPoint2f(); objMatOfPoint2f.fromList(objectPoints); MatOfPoint2f scnMatOfPoint2f = new MatOfPoint2f(); scnMatOfPoint2f.fromList(scenePoints); Mat homography = Calib3d.findHomography(objMatOfPoint2f, scnMatOfPoint2f, Calib3d.RANSAC, 3); Mat obj_corners = new Mat(4, 1, CvType.CV_32FC2); Mat scene_corners = new Mat(4, 1, CvType.CV_32FC2); obj_corners.put(0, 0, new double[] { 0, 0 }); obj_corners.put(1, 0, new double[] { objectImage.cols(), 0 }); obj_corners.put(2, 0, new double[] { objectImage.cols(), objectImage.rows() }); obj_corners.put(3, 0, new double[] { 0, objectImage.rows() }); Core.perspectiveTransform(obj_corners, scene_corners, homography); Mat img = Highgui.imread(source_pic, Highgui.CV_LOAD_IMAGE_COLOR); Core.line(img, new Point(scene_corners.get(0, 0)), new Point(scene_corners.get(1, 0)), new Scalar(0, 255, 255), 4); Core.line(img, new Point(scene_corners.get(1, 0)), new Point(scene_corners.get(2, 0)), new Scalar(255, 255, 0), 4); Core.line(img, new Point(scene_corners.get(2, 0)), new Point(scene_corners.get(3, 0)), new Scalar(0, 255, 0), 4); Core.line(img, new Point(scene_corners.get(3, 0)), new Point(scene_corners.get(0, 0)), new Scalar(0, 255, 0), 4); MatOfDMatch goodMatches = new MatOfDMatch(); goodMatches.fromList(goodMatchesList); Features2d.drawMatches(objectImage, objectKeyPoints, sceneImage, sceneKeyPoints, goodMatches, matchoutput, matchestColor, newKeypointColor, new MatOfByte(), 2); if (new Point(scene_corners.get(0, 0)).x < new Point(scene_corners.get(1, 0)).x && new Point(scene_corners.get(0, 0)).y < new Point(scene_corners.get(2, 0)).y) { System.out.println("found " + template); Highgui.imwrite("points.jpg", outputImage); Highgui.imwrite("matches.jpg", matchoutput); Highgui.imwrite("final.jpg", img); if (flag == 0) { ReturnValues retVal = null; int y = (int) new Point(scene_corners.get(3, 0)).y; int yHeight = (int) new Point(scene_corners.get(3, 0)).y - (int) new Point(scene_corners.get(2, 0)).y; if (y < image.getHeight() * .6) { //if found hero is in upper half of image then return point 3,0 retVal = new ReturnValues(y + (int) (image.getHeight() * .01), yHeight); } else { //if found hero is in lower half of image then return point 2,0 y = (int) new Point(scene_corners.get(2, 0)).y; retVal = new ReturnValues(y + (int) (image.getHeight() * .3), yHeight); } return retVal; } else if (flag == 1) { int[] xPoints = new int[4]; int[] yPoints = new int[4]; xPoints[0] = (int) (new Point(scene_corners.get(0, 0)).x); xPoints[1] = (int) (new Point(scene_corners.get(1, 0)).x); xPoints[2] = (int) (new Point(scene_corners.get(2, 0)).x); xPoints[3] = (int) (new Point(scene_corners.get(3, 0)).x); yPoints[0] = (int) (new Point(scene_corners.get(0, 0)).y); yPoints[1] = (int) (new Point(scene_corners.get(1, 0)).y); yPoints[2] = (int) (new Point(scene_corners.get(2, 0)).y); yPoints[3] = (int) (new Point(scene_corners.get(3, 0)).y); ReturnValues retVal = new ReturnValues(xPoints, yPoints); return retVal; } } } return null; }