List of usage examples for org.opencv.core Mat clone
public Mat clone()
From source file:org.openpnp.vision.FluentCv.java
License:Open Source License
private Mat get(String tag) { Mat mat = stored.get(tag); if (mat == null) { return null; }//from ww w. ja va2 s .co m // Clone so that future writes to the pipeline Mat // don't overwrite our stored one. return mat.clone(); }
From source file:org.surmon.pattern.editor2d.components.Mapping.java
public static List<MatOfPoint> process(Mat source, List<Particle> particles) { Mat partImage = new Mat(source.size(), CvType.CV_8UC1); // Draw particles as images Point p;//from ww w. jav a 2 s . c om for (Particle part : particles) { p = new Point(part.getPosition().toArray()); Core.circle(partImage, p, 1, new Scalar(255)); } // Blur with Gaussian kernel Mat blured = new Mat(); Imgproc.GaussianBlur(partImage, blured, new Size(101, 101), -1, -1); // Equalize histogram List<Mat> eqChannels = new ArrayList<>(); List<Mat> channels = new ArrayList<>(); Core.split(blured, channels); for (Mat channel : channels) { Mat eqImage = new Mat(); Imgproc.equalizeHist(channel, eqImage); eqChannels.add(eqImage); } Mat eqResult = new Mat(); Core.merge(eqChannels, eqResult); // Binary threshold Mat bin = new Mat(); Imgproc.threshold(eqResult, bin, 0, 255, Imgproc.THRESH_OTSU); // Imgproc.threshold(eqResult, bin, 10, 255, Imgproc.THRESH_BINARY); // Find contours Mat imMat = bin.clone(); Mat canny_output = new Mat(); Mat hierarchy = new Mat(); int thresh = 100; //median filter: List<MatOfPoint> borders = new ArrayList<>(); Imgproc.Canny(imMat, canny_output, thresh, thresh * 2); Imgproc.findContours(canny_output, borders, hierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE); // Find contours return borders; // Mat result = source.clone(); // Imgproc.drawContours(result, borders, -1, new Scalar(255, 0, 255)); // // return result; }
From source file:processdata.ExperimentalDataProcessingUI.java
private void jButtonProcessImageActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jButtonProcessImageActionPerformed try {/*from w w w. ja v a 2 s .c o m*/ // TODO add your handling code here: //load library System.loadLibrary(Core.NATIVE_LIBRARY_NAME); // TODO add your handling code here: folderName = textField2.getText(); int currentFrameIndex = Integer.parseInt(initialFrameIndexBox.getText()) - 1; datasetIndex = Integer.parseInt(textField1.getText()); String videoImageFileName = "./videoFrames//" + folderName + "//" + "frame_outVideo_" + currentFrameIndex + ".jpg"; String depthFrameFileName = initialImagePath + datasetIndex + "//" + folderName + "//" + "depthData//" + "outDepthByte_" + currentFrameIndex; rgbFrame = Highgui.imread(videoImageFileName, Highgui.CV_LOAD_IMAGE_GRAYSCALE); depthFrame = depthDataProcessingUtilities.processDepthDataFile(depthFrameFileName, jSlider2.getValue(), jSlider1.getValue()); Mat[] backgroundFrames = readBackground(); rgbBackgroundFrame = backgroundFrames[0]; depthBackgroundFrame = backgroundFrames[1]; //subtract depth background Mat depthFrameBackgroundSubtracted = new Mat(); Core.subtract(depthBackgroundFrame, depthFrame, depthFrameBackgroundSubtracted); Imgproc.threshold(depthFrameBackgroundSubtracted, depthFrameBackgroundSubtracted, 0, 255, Imgproc.THRESH_BINARY); displayImage(Mat2BufferedImage( videoProcessingUtilities.resizeImage(depthFrameBackgroundSubtracted, new Size(448, 234))), depthBckgSubtractedFrames); //remove the red-colored elements from depth image and leave only blue ones Mat depthImageCleaned = new Mat(); Core.inRange(depthFrameBackgroundSubtracted, new Scalar(253, 0, 0), new Scalar(255, 0, 0), depthImageCleaned); //apply morphologic opening to remove noise Imgproc.morphologyEx(depthImageCleaned, depthImageCleaned, 2, Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(3, 3))); displayImage( Mat2BufferedImage(videoProcessingUtilities.resizeImage(depthImageCleaned, new Size(448, 234))), depthCleanedFramesPanel); //apply the homographic transform to cleaned depth image Mat hDepthImageCleaned = videoProcessingUtilities.performHomographyTransformation(depthImageCleaned, new Size(1920, 1080)); //extract all contours //sort all extracted contours and choose top 2 //overlay top 2 contours on the image and fill them with white color //mask the rgb frame // do all necessary rotation operations //offer user to save the image //extract all suitable contours between MIN and MAX areas: MatOfPoint[] contours = videoProcessingUtilities.extractLargestContours(hDepthImageCleaned, 100000, 160000); System.out.println("Number of contorus extracted " + contours.length); //draw contours List<MatOfPoint> tempContours = new ArrayList<MatOfPoint>(); Mat hDepthImageCleanedContours = hDepthImageCleaned.clone(); for (MatOfPoint cnt : contours) { System.out.println("Extracted Contour Area is " + Imgproc.contourArea(cnt)); tempContours.add(cnt); } Imgproc.cvtColor(hDepthImageCleanedContours, hDepthImageCleanedContours, Imgproc.COLOR_GRAY2BGR); Imgproc.drawContours(hDepthImageCleanedContours, tempContours, -1, new Scalar(0, 0, 255), 5); displayImage( Mat2BufferedImage( videoProcessingUtilities.resizeImage(hDepthImageCleanedContours, new Size(448, 234))), extractedContoursPanel); //prepare final mask Mat hDepthImageFilledContours = new Mat(hDepthImageCleaned.rows(), hDepthImageCleaned.cols(), hDepthImageCleaned.type()); Imgproc.drawContours(hDepthImageFilledContours, tempContours, -1, new Scalar(255, 255, 255), -1); displayImage( Mat2BufferedImage( videoProcessingUtilities.resizeImage(hDepthImageFilledContours, new Size(448, 234))), maskedContoursPanel); //subtract video background // Mat rgbFrameBackgroundSubtracted = new Mat(); // Core.subtract(rgbBackgroundFrame,rgbFrame, rgbFrameBackgroundSubtracted, hDepthImageCleaned); // displayImage(Mat2BufferedImage(videoProcessingUtilities.resizeImage(rgbFrameBackgroundSubtracted, new Size(448,234))),videoBckgSubtractedFrames); // //mask Mat preMaskedRGBFrame = new Mat(); rgbFrame.copyTo(preMaskedRGBFrame, hDepthImageCleaned); displayImage( Mat2BufferedImage(videoProcessingUtilities.resizeImage(preMaskedRGBFrame, new Size(448, 234))), videoBckgSubtractedFrames); //postmask Mat betterMaskedRGBFrame = new Mat(); rgbFrame.copyTo(betterMaskedRGBFrame, hDepthImageFilledContours); displayImage( Mat2BufferedImage( videoProcessingUtilities.resizeImage(betterMaskedRGBFrame, new Size(448, 234))), videoMaskedPanel); //clear ArrayList containig all processed images finalImages.clear(); javax.swing.JLabel[] jLabArray = { extractedShoePanel1, extractedShoePanel2 }; //segment all images int panelIdx = 0; for (MatOfPoint contour : tempContours) { MatOfPoint2f newMatOfPoint2fContour = new MatOfPoint2f(contour.toArray()); RotatedRect finalROI = Imgproc.minAreaRect(newMatOfPoint2fContour); Mat newMask = videoProcessingUtilities.getContourMasked(hDepthImageFilledContours.clone(), contour); Mat imageROIRegistred = new Mat(); betterMaskedRGBFrame.copyTo(imageROIRegistred, newMask); Mat maskedRGBFrameROI = videoProcessingUtilities.rotateExtractedShoeprint(imageROIRegistred, finalROI, new Size(500, 750), 2); finalImages.add(maskedRGBFrameROI); displayImage( Mat2BufferedImage( videoProcessingUtilities.resizeImage(maskedRGBFrameROI, new Size(203, 250))), jLabArray[panelIdx]); panelIdx++; } //MatOfInt parameters = new MatOfInt(); //parameters.fromArray(Highgui.CV_IMWRITE_JPEG_QUALITY, 100); //Highgui.imwrite(".//backgrounds//"+"test.jpg", depthFrameBackgroundSubtracted, parameters); } catch (FileNotFoundException ex) { Logger.getLogger(ExperimentalDataProcessingUI.class.getName()).log(Level.SEVERE, null, ex); } }
From source file:readnumber.ImageProcessor.java
public Mat blur(Mat input, int numberOfTimes) { Mat sourceImage = new Mat(); Mat destImage = input.clone(); for (int i = 0; i < numberOfTimes; i++) { sourceImage = destImage.clone(); Imgproc.blur(sourceImage, destImage, new Size(3.0, 3.0)); }/*from w ww. ja v a2s. com*/ return destImage; }
From source file:Reconhecimento.SaltoSegmentacaoCirculo.java
public static void medirExtensao() { long tempoInicial = System.currentTimeMillis(); Mat img = new Mat(); Mat img2 = new Mat(); VideoCapture cap = new VideoCapture(Video.videoAtual); // capturar primeiro frame do video cap.read(img);// ww w. j av a 2 s . com // capturar ultimo frame do video boolean continuar = true; Mat aux = new Mat(); while (continuar) { cap.read(aux); if (aux.size().area() > 0) { img2 = aux.clone(); } else { continuar = false; } } Point centroCirculoInicial = segmentarCirculo(img); Point centroCirculoFinal = segmentarCirculo(img2); // extensao do salto em centimetros extensao = (centroCirculoInicial.x - centroCirculoFinal.x) * Regua.centimetrosPorPixel; // converter extensao do salto para metros extensao = extensao / 100.0; System.out.println(String.valueOf(extensao).substring(0, 4) + " m"); Mat show1 = new Mat(); Mat show2 = new Mat(); Imgproc.resize(img, show1, new Size(420, 240)); Imgproc.resize(img2, show2, new Size(420, 240)); Highgui.imwrite("imagens/" + Video.nomeVideo + 420 + "x" + 2400 + ".jpg", show1); Highgui.imwrite("imagens/" + Video.nomeVideo + 420 + "x" + 240 + "u.jpg", show2); TelaMedirDistSegmentacaoCirculo tela = new TelaMedirDistSegmentacaoCirculo(); tela.setLocation(200, 200); tela.jLabel1.setIcon(new ImageIcon("imagens/" + Video.nomeVideo + 420 + "x" + 2400 + ".jpg")); tela.jLabel2.setIcon(new ImageIcon("imagens/" + Video.nomeVideo + 420 + "x" + 240 + "u.jpg")); tela.jLabel4.setText(String.valueOf(extensao).substring(0, 4) + " m"); tela.jLabel6.setText(String.valueOf(System.currentTimeMillis() - tempoInicial) + " ms"); tela.setDefaultCloseOperation(WindowConstants.DISPOSE_ON_CLOSE); tela.setVisible(true); }
From source file:saliency.saliency.java
/** * @param args the command line arguments *///from www.j a v a 2 s . c om public static void main(String[] args) { // TODO code application logic here System.loadLibrary(Core.NATIVE_LIBRARY_NAME); Mat input_img = imread("input_img/sea.jpg"); //fot temp test start Imgproc.resize(input_img, input_img, new Size(1980, 1080), 0, 0, Imgproc.INTER_LINEAR); //fot temp test end if (input_img.cols() == 0) { return; } //benchmark ///////////////////////step 1 : Extraction of Early Visual Deatures/////////////////////////////// //intensity image: intensity_img Mat intensity_img = new Mat(input_img.rows(), input_img.cols(), CV_16UC1); //intensity = (R+G+B)/3 int img_width = intensity_img.cols(); int img_height = intensity_img.rows(); int x, y; int i, c, s; int max_intensity = 0; for (x = 0; x < img_width; x++) { for (y = 0; y < img_height; y++) { int temp_intensity = ((int) input_img.get(y, x)[0] + (int) input_img.get(y, x)[1] + (int) input_img.get(y, x)[2]) / 3; intensity_img.put(y, x, temp_intensity); if (max_intensity < temp_intensity) { max_intensity = temp_intensity; } } } //create Guassian pyramid for intensity Mat[] i_gaussian_pyramid = new Mat[9]; i_gaussian_pyramid[0] = intensity_img.clone(); for (i = 0; i < 8; i++) { i_gaussian_pyramid[i + 1] = i_gaussian_pyramid[i].clone(); Imgproc.pyrDown(i_gaussian_pyramid[i + 1], i_gaussian_pyramid[i + 1], new Size()); } //create intensity feature map using center-surround differences Mat[][] intensity_feature_map = new Mat[3][2]; for (c = 0; c < 3; c++) { for (s = 0; s < 2; s++) { intensity_feature_map[c][s] = center_surround.main(i_gaussian_pyramid[c + 2], i_gaussian_pyramid[s + c + 5], 0); } } //benchmark //imwrite("intensity_feature_map_00.bmp", intensity_feature_map[0][0]); //get normalized color image by I. Mat norm_input_img = input_img.clone(); norm_input_img.convertTo(norm_input_img, CV_64F); for (x = 0; x < img_width; x++) { for (y = 0; y < img_height; y++) { //normalization is only applied at the locations where I is larger than 1/10 of its maximum over entire image double[] temp = new double[3]; if (intensity_img.get(y, x)[0] > (max_intensity / 10)) { temp[0] = norm_input_img.get(y, x)[0] / intensity_img.get(y, x)[0]; temp[1] = norm_input_img.get(y, x)[1] / intensity_img.get(y, x)[0]; temp[2] = norm_input_img.get(y, x)[2] / intensity_img.get(y, x)[0]; norm_input_img.put(y, x, temp); } else { temp[0] = 0; temp[1] = 0; temp[2] = 0; norm_input_img.put(y, x, temp); } } } //get R G B Y(Yellow) single color channel images Mat r_img = new Mat(input_img.rows(), input_img.cols(), CV_64FC1); Mat g_img = new Mat(input_img.rows(), input_img.cols(), CV_64FC1); Mat b_img = new Mat(input_img.rows(), input_img.cols(), CV_64FC1); Mat y_img = new Mat(input_img.rows(), input_img.cols(), CV_64FC1); //[0]: b [1]:g [2]:r for (x = 0; x < img_width; x++) { for (y = 0; y < img_height; y++) { //R = min(0,r-(g+b)/2) double temp_chroma = max(0, (norm_input_img.get(y, x)[2] - (norm_input_img.get(y, x)[1] + norm_input_img.get(y, x)[0]) / 2)); r_img.put(y, x, temp_chroma); //G = max(0,g-(r+b)/2) temp_chroma = max(0, (norm_input_img.get(y, x)[1] - (norm_input_img.get(y, x)[2] + norm_input_img.get(y, x)[0]) / 2)); g_img.put(y, x, temp_chroma); //B = max(0,b-(r+g)/2) temp_chroma = max(0, (norm_input_img.get(y, x)[0] - (norm_input_img.get(y, x)[2] + norm_input_img.get(y, x)[1]) / 2)); b_img.put(y, x, temp_chroma); //Y = max(0,(r+g)/2-|r-g|/2-b) temp_chroma = max(0, ((norm_input_img.get(y, x)[2] + norm_input_img.get(y, x)[1]) / 2 - abs(norm_input_img.get(y, x)[2] + norm_input_img.get(y, x)[1]) / 2 - norm_input_img.get(y, x)[0])); y_img.put(y, x, temp_chroma); } } //create Gaussian pyramid for 4 color channels Mat[] b_gaussian_pyramid = new Mat[9]; b_gaussian_pyramid[0] = b_img.clone(); for (i = 0; i < 8; i++) { b_gaussian_pyramid[i + 1] = b_gaussian_pyramid[i].clone(); Imgproc.pyrDown(b_gaussian_pyramid[i + 1], b_gaussian_pyramid[i + 1], new Size()); } Mat[] g_gaussian_pyramid = new Mat[9]; g_gaussian_pyramid[0] = g_img.clone(); for (i = 0; i < 8; i++) { g_gaussian_pyramid[i + 1] = g_gaussian_pyramid[i].clone(); Imgproc.pyrDown(g_gaussian_pyramid[i + 1], g_gaussian_pyramid[i + 1], new Size()); } Mat[] r_gaussian_pyramid = new Mat[9]; r_gaussian_pyramid[0] = r_img.clone(); for (i = 0; i < 8; i++) { r_gaussian_pyramid[i + 1] = r_gaussian_pyramid[i].clone(); Imgproc.pyrDown(r_gaussian_pyramid[i + 1], r_gaussian_pyramid[i + 1], new Size()); } Mat[] y_gaussian_pyramid = new Mat[9]; y_gaussian_pyramid[0] = y_img.clone(); for (i = 0; i < 8; i++) { y_gaussian_pyramid[i + 1] = y_gaussian_pyramid[i].clone(); Imgproc.pyrDown(y_gaussian_pyramid[i + 1], y_gaussian_pyramid[i + 1], new Size()); } //create color feature map using center-surround differences //RG(c,s) = |(R(c)-G(c))(-)(G(c)-R(c))| Mat[][] rg_feature_map = new Mat[3][2]; for (c = 0; c < 3; c++) { for (s = 0; s < 2; s++) { Mat r_minus_g = r_gaussian_pyramid[c + 2].clone(); Core.subtract(r_gaussian_pyramid[c + 2], g_gaussian_pyramid[c + 2], r_minus_g); Mat g_minus_r = g_gaussian_pyramid[s + c + 5].clone(); Core.subtract(g_gaussian_pyramid[s + c + 5], r_gaussian_pyramid[s + c + 5], g_minus_r); rg_feature_map[c][s] = center_surround.main(r_minus_g, g_minus_r, 1); } } //BY(c,s) = |(B(c)-Y(c))(-)(Y(c)-B(c))| Mat[][] by_feature_map = new Mat[3][2]; for (c = 0; c < 3; c++) { for (s = 0; s < 2; s++) { Mat b_minus_g = b_gaussian_pyramid[c + 2].clone(); Core.subtract(b_gaussian_pyramid[c + 2], y_gaussian_pyramid[c + 2], b_minus_g); Mat y_minus_b = y_gaussian_pyramid[s + c + 5].clone(); Core.subtract(y_gaussian_pyramid[s + c + 5], b_gaussian_pyramid[s + c + 5], y_minus_b); by_feature_map[c][s] = center_surround.main(b_minus_g, y_minus_b, 1); } } //benchmark //create oriented Gabor pyramid from intensity image int kernel_size = 10;//31;//adjust value according to reference double sigma = 3;//default: = 0.56 . the larger , the support of the Gabor function and the number of visible parallel excitatory and inhibitory stripe zones increases. double[] theta = new double[4]; theta[0] = 0; theta[1] = Math.PI / 4; theta[2] = Math.PI / 2; theta[3] = Math.PI * 3 / 4; double lambda = 5;//36; minimum 3 double gamma = 0.5;//0.02; // double psi = 0; Mat[][] gabor_pyramid = new Mat[4][9]; int theta_index; for (theta_index = 0; theta_index < 4; theta_index++) { Mat gabor_kernel = Imgproc.getGaborKernel(new Size(kernel_size, kernel_size), sigma, theta[theta_index], lambda, gamma); //gabor_pyramid[theta_index][0] = intensity_img.clone(); for (i = 0; i < 9; i++) { //gabor_pyramid[theta_index][i] = gabor_pyramid[theta_index][i].clone(); gabor_pyramid[theta_index][i] = i_gaussian_pyramid[i].clone(); Imgproc.filter2D(i_gaussian_pyramid[i], gabor_pyramid[theta_index][i], -1, gabor_kernel); //Imgproc.resize(gabor_pyramid[theta_index][i], gabor_pyramid[theta_index][i], new Size(), 0.5, 0.5, Imgproc.INTER_AREA); } } //imwrite("gabor_pyramid_01.bmp", gabor_pyramid[0][1]); //imwrite("gabor_pyramid_11.bmp", gabor_pyramid[1][1]); //imwrite("gabor_pyramid_21.bmp", gabor_pyramid[2][1]); //imwrite("gabor_pyramid_31.bmp", gabor_pyramid[3][1]); //imwrite("gabor_pyramid_03.bmp", gabor_pyramid[0][3]); //get orientation feature map using center-surround differences Mat[][][] orientation_feature_map = new Mat[4][3][2]; for (theta_index = 0; theta_index < 4; theta_index++) { for (c = 0; c < 3; c++) { for (s = 0; s < 2; s++) { orientation_feature_map[theta_index][c][s] = center_surround .main(gabor_pyramid[theta_index][c + 2], gabor_pyramid[theta_index][s + c + 5], 0); } } } //benchmark //imwrite("orientation_test_00.bmp", orientation_feature_map[0][0][0]); ///////////////////////step 2 : the saliency map/////////////////////////////// //get intensity conspicuity map Mat intensity_conspicuity_map = Mat.zeros(intensity_feature_map[2][0].size(), CV_16UC1); for (c = 0; c < 3; c++) { for (s = 0; s < 2; s++) { Mat norm_out = map_norm.main(intensity_feature_map[c][s]); Mat resized_feature_map = Mat.zeros(intensity_feature_map[2][0].size(), CV_16UC1); Imgproc.resize(norm_out, resized_feature_map, intensity_feature_map[2][0].size(), 0, 0, Imgproc.INTER_LINEAR); Core.addWeighted(intensity_conspicuity_map, 1, resized_feature_map, 1.0 / 6, 0, intensity_conspicuity_map); /*if(c == 0 && s == 0){ imwrite("in.bmp", intensity_feature_map[c][s]); imwrite("map_norm.bmp",norm_out); imwrite("resized_feature_map.bmp", resized_feature_map); }*/ } } //benchmark //Core.normalize(intensity_conspicuity_map, intensity_conspicuity_map, 0, 255, Core.NORM_MINMAX); //imwrite("intensity_conspicuity_map.bmp", intensity_conspicuity_map); //get color conspicuity map for (c = 0; c < 3; c++) { for (s = 0; s < 2; s++) { Core.normalize(rg_feature_map[c][s], rg_feature_map[c][s], 0, 255, Core.NORM_MINMAX); rg_feature_map[c][s].convertTo(rg_feature_map[c][s], CV_16UC1); Core.normalize(by_feature_map[c][s], by_feature_map[c][s], 0, 255, Core.NORM_MINMAX); by_feature_map[c][s].convertTo(by_feature_map[c][s], CV_16UC1); } } //imwrite("test_rg.bmp",rg_feature_map[0][0]); Mat color_conspicuity_map = Mat.zeros(rg_feature_map[2][0].size(), CV_16UC1); for (c = 0; c < 3; c++) { for (s = 0; s < 2; s++) { Mat norm_out = map_norm.main(rg_feature_map[c][s]); Mat resized_feature_map = Mat.zeros(rg_feature_map[2][0].size(), CV_16UC1); Imgproc.resize(norm_out, resized_feature_map, rg_feature_map[2][0].size(), 0, 0, Imgproc.INTER_LINEAR); Core.addWeighted(color_conspicuity_map, 1, resized_feature_map, 1.0 / 12, 0, color_conspicuity_map); norm_out = map_norm.main(by_feature_map[c][s]); resized_feature_map = Mat.zeros(by_feature_map[2][0].size(), CV_16UC1); Imgproc.resize(norm_out, resized_feature_map, by_feature_map[2][0].size(), 0, 0, Imgproc.INTER_LINEAR); Core.addWeighted(color_conspicuity_map, 1, resized_feature_map, 1.0 / 12, 0, color_conspicuity_map); } } //benchmark //get orientation conspicuity map Mat orientation_conspicuity_map_0 = Mat.zeros(orientation_feature_map[0][2][0].size(), CV_16UC1); for (c = 0; c < 3; c++) { for (s = 0; s < 2; s++) { Mat norm_out = map_norm.main(orientation_feature_map[0][c][s]); Mat resized_feature_map = Mat.zeros(orientation_feature_map[0][2][0].size(), CV_16UC1); Imgproc.resize(norm_out, resized_feature_map, orientation_feature_map[0][2][0].size(), 0, 0, Imgproc.INTER_LINEAR); Core.addWeighted(orientation_conspicuity_map_0, 1, resized_feature_map, 1.0 / 6, 0, orientation_conspicuity_map_0); } } Mat orientation_conspicuity_map_1 = Mat.zeros(orientation_feature_map[1][2][0].size(), CV_16UC1); for (c = 0; c < 3; c++) { for (s = 0; s < 2; s++) { Mat norm_out = map_norm.main(orientation_feature_map[1][c][s]); Mat resized_feature_map = Mat.zeros(orientation_feature_map[1][2][0].size(), CV_16UC1); Imgproc.resize(norm_out, resized_feature_map, orientation_feature_map[1][2][0].size(), 0, 0, Imgproc.INTER_LINEAR); Core.addWeighted(orientation_conspicuity_map_1, 1, resized_feature_map, 1.0 / 6, 0, orientation_conspicuity_map_1); } } Mat orientation_conspicuity_map_2 = Mat.zeros(orientation_feature_map[2][2][0].size(), CV_16UC1); for (c = 0; c < 3; c++) { for (s = 0; s < 2; s++) { Mat norm_out = map_norm.main(orientation_feature_map[2][c][s]); Mat resized_feature_map = Mat.zeros(orientation_feature_map[2][2][0].size(), CV_16UC1); Imgproc.resize(norm_out, resized_feature_map, orientation_feature_map[2][2][0].size(), 0, 0, Imgproc.INTER_LINEAR); Core.addWeighted(orientation_conspicuity_map_2, 1, resized_feature_map, 1.0 / 6, 0, orientation_conspicuity_map_2); } } Mat orientation_conspicuity_map_3 = Mat.zeros(orientation_feature_map[3][2][0].size(), CV_16UC1); for (c = 0; c < 3; c++) { for (s = 0; s < 2; s++) { Mat norm_out = map_norm.main(orientation_feature_map[3][c][s]); Mat resized_feature_map = Mat.zeros(orientation_feature_map[3][2][0].size(), CV_16UC1); Imgproc.resize(norm_out, resized_feature_map, orientation_feature_map[3][2][0].size(), 0, 0, Imgproc.INTER_LINEAR); Core.addWeighted(orientation_conspicuity_map_3, 1, resized_feature_map, 1.0 / 6, 0, orientation_conspicuity_map_3); } } Mat orientation_conspicuity_map = Mat.zeros(orientation_feature_map[0][2][0].size(), CV_16UC1); Core.addWeighted(orientation_conspicuity_map, 1, map_norm.main(orientation_conspicuity_map_0), 1.0 / 4, 0, orientation_conspicuity_map); Core.addWeighted(orientation_conspicuity_map, 1, map_norm.main(orientation_conspicuity_map_1), 1.0 / 4, 0, orientation_conspicuity_map); Core.addWeighted(orientation_conspicuity_map, 1, map_norm.main(orientation_conspicuity_map_2), 1.0 / 4, 0, orientation_conspicuity_map); Core.addWeighted(orientation_conspicuity_map, 1, map_norm.main(orientation_conspicuity_map_3), 1.0 / 4, 0, orientation_conspicuity_map); //benchmark Mat saliency = Mat.zeros(intensity_conspicuity_map.size(), CV_16UC1); Core.addWeighted(saliency, 1, map_norm.main(intensity_conspicuity_map), 1.0 / 3, 0, saliency); Core.addWeighted(saliency, 1, map_norm.main(color_conspicuity_map), 1.0 / 3, 0, saliency); Core.addWeighted(saliency, 1, map_norm.main(orientation_conspicuity_map), 1.0 / 3, 0, saliency); //benchmark Core.normalize(saliency, saliency, 0, 255, Core.NORM_MINMAX); //fot temp test Imgproc.resize(saliency, saliency, new Size(720, 480), 0, 0, Imgproc.INTER_LINEAR); imwrite("saliency.bmp", saliency); Core.normalize(intensity_conspicuity_map, intensity_conspicuity_map, 0, 255, Core.NORM_MINMAX); Imgproc.resize(intensity_conspicuity_map, intensity_conspicuity_map, new Size(720, 480), 0, 0, Imgproc.INTER_LINEAR); imwrite("intensity_conspicuity_map.bmp", intensity_conspicuity_map); Core.normalize(color_conspicuity_map, color_conspicuity_map, 0, 255, Core.NORM_MINMAX); Imgproc.resize(color_conspicuity_map, color_conspicuity_map, new Size(720, 480), 0, 0, Imgproc.INTER_LINEAR); imwrite("color_conspicuity_map.bmp", color_conspicuity_map); Core.normalize(orientation_conspicuity_map, orientation_conspicuity_map, 0, 255, Core.NORM_MINMAX); Imgproc.resize(orientation_conspicuity_map, orientation_conspicuity_map, new Size(720, 480), 0, 0, Imgproc.INTER_LINEAR); imwrite("orientation_conspicuity_map.bmp", orientation_conspicuity_map); Imgproc.resize(input_img, input_img, new Size(720, 480), 0, 0, Imgproc.INTER_LINEAR); imwrite("input_img.bmp", input_img); //for testing algorithm /* Mat temp1 = Mat.zeros(intensity_conspicuity_map.size(), CV_16UC1); temp1 = map_norm.main(intensity_conspicuity_map); Core.normalize(temp1, temp1, 0, 255, Core.NORM_MINMAX); Imgproc.resize(temp1, temp1, new Size(720,480), 0, 0, Imgproc.INTER_LINEAR); imwrite("intensity.bmp", temp1); temp1 = map_norm.main(color_conspicuity_map); Core.normalize(temp1, temp1, 0, 255, Core.NORM_MINMAX); Imgproc.resize(temp1, temp1, new Size(720,480), 0, 0, Imgproc.INTER_LINEAR); imwrite("color.bmp", temp1); temp1 = map_norm.main(orientation_conspicuity_map); Core.normalize(temp1, temp1, 0, 255, Core.NORM_MINMAX); Imgproc.resize(temp1, temp1, new Size(720,480), 0, 0, Imgproc.INTER_LINEAR); imwrite("orientation.bmp", temp1); Mat temp2 = Mat.zeros(orientation_conspicuity_map_0.size(), CV_16UC1); temp2 = map_norm.main(orientation_conspicuity_map_0); Core.normalize(temp2, temp2, 0, 255, Core.NORM_MINMAX); Imgproc.resize(temp2, temp2, new Size(720,480), 0, 0, Imgproc.INTER_LINEAR); imwrite("orientation_conspicuity_map_0.bmp", temp2); temp2 = map_norm.main(orientation_conspicuity_map_1); Core.normalize(temp2, temp2, 0, 255, Core.NORM_MINMAX); Imgproc.resize(temp2, temp2, new Size(720,480), 0, 0, Imgproc.INTER_LINEAR); imwrite("orientation_conspicuity_map_1.bmp", temp2); temp2 = map_norm.main(orientation_conspicuity_map_2); Core.normalize(temp2, temp2, 0, 255, Core.NORM_MINMAX); Imgproc.resize(temp2, temp2, new Size(720,480), 0, 0, Imgproc.INTER_LINEAR); imwrite("orientation_conspicuity_map_2.bmp", temp2); temp2 = map_norm.main(orientation_conspicuity_map_3); Core.normalize(temp2, temp2, 0, 255, Core.NORM_MINMAX); Imgproc.resize(temp2, temp2, new Size(720,480), 0, 0, Imgproc.INTER_LINEAR); imwrite("orientation_conspicuity_map_3.bmp", temp2); */ }
From source file:samples.LWF.java
private static void save_meshed_images(double[][] puntos, File carpetaalmacen, File image, Mat mat, int[][] delaunay_triangles) { Mat mat_copy = mat.clone(); int radii = 1000; for (double[] punto : puntos) { Imgproc.ellipse(mat_copy, new Point(punto), new Size(radii, radii), 0, 0, 0, new Scalar(0, 255, 0)); // Imgproc.line(mat_copy, null, null, null); }//from w ww . j a v a2 s . co m for (int[] tri : faceTemplateTriangles) { Imgproc.line(mat_copy, new Point(puntos[tri[0] - 1]), new Point(puntos[tri[1] - 1]), new Scalar(0, 255, 0)); Imgproc.line(mat_copy, new Point(puntos[tri[1] - 1]), new Point(puntos[tri[2] - 1]), new Scalar(0, 255, 0)); Imgproc.line(mat_copy, new Point(puntos[tri[2] - 1]), new Point(puntos[tri[0] - 1]), new Scalar(0, 255, 0)); } Imgcodecs.imwrite(carpetaalmacen.getAbsolutePath() + "\\" + image.getName(), mat_copy); }
From source file:servlets.FillAreaByScribble.java
/** * Processes requests for both HTTP <code>GET</code> and <code>POST</code> * methods.// w w w .ja va2 s. c o m * * @param request servlet request * @param response servlet response * @throws ServletException if a servlet-specific error occurs * @throws IOException if an I/O error occurs */ protected void processRequest(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { response.setContentType("text/html;charset=UTF-8"); try (PrintWriter out = response.getWriter()) { String imageForTextRecognition = request.getParameter("imageForTextRecognition") + ".png"; String isSingleRegion = request.getParameter("isSingleRegion"); boolean makeSingleRegion = isSingleRegion.toLowerCase().equals("true"); Mat original = ImageUtils.loadImage(imageForTextRecognition, request); Mat image = original.clone(); Mat mask = Mat.zeros(image.rows() + 2, image.cols() + 2, CvType.CV_8UC1); String samplingPoints = request.getParameter("samplingPoints"); Gson gson = new Gson(); Point[] tmpPoints = gson.fromJson(samplingPoints, Point[].class); ArrayList<Point> userPoints = new ArrayList<Point>(Arrays.asList(tmpPoints)); Mat userPointsImage = image.clone(); ArrayList<Mat> maskRegions = new ArrayList<>(); Random random = new Random(); int b = random.nextInt(256); int g = random.nextInt(256); int r = random.nextInt(256); Scalar newVal = new Scalar(b, g, r); FloodFillFacade floodFillFacade = new FloodFillFacade(); int k = 0; for (int i = 0; i < userPoints.size(); i++) { Point point = userPoints.get(i); image = floodFillFacade.fill(image, mask, (int) point.x, (int) point.y, newVal); Mat seedImage = original.clone(); Core.circle(seedImage, point, 9, new Scalar(0, 0, 255), -1); Core.putText(userPointsImage, "" + k, new Point(point.x + 5, point.y + 5), 3, 0.5, new Scalar(0, 0, 0)); // ImageUtils.saveImage(seedImage, "mask_" + k + "_seed" + imageForTextRecognition + ".png", request); if (!makeSingleRegion) { Mat element = new Mat(3, 3, CvType.CV_8U, new Scalar(1)); Imgproc.morphologyEx(mask, mask, Imgproc.MORPH_CLOSE, element, new Point(-1, -1), 3); Imgproc.resize(mask, mask, original.size()); } // ImageUtils.saveImage(mask, "mask_" + k + "" + imageForTextRecognition + ".png", request); Mat dilatedMask = new Mat(); int elementSide = 21; Mat element = new Mat(elementSide, elementSide, CvType.CV_8U, new Scalar(1)); Imgproc.morphologyEx(mask, dilatedMask, Imgproc.MORPH_DILATE, element, new Point(-1, -1), 1); Imgproc.resize(dilatedMask, dilatedMask, original.size()); // ImageUtils.saveImage(dilatedMask, "mask_" + k + "_dilated" + imageForTextRecognition + ".png", request); maskRegions.add(mask); if (!makeSingleRegion) { int totalRemovedPoints = filterPoints(userPoints, dilatedMask); if (totalRemovedPoints > 0) { i = -1; // so that the algorithm starts again at the first element of the userPoints array } } else { filterPoints(userPoints, mask); } // System.out.println("Total points after filtering:"); // System.out.println(userPoints.size()); if (!makeSingleRegion) { mask = Mat.zeros(original.rows() + 2, original.cols() + 2, CvType.CV_8UC1); } k++; } ArrayList<FindingResponse> findingResponses = new ArrayList<>(); if (makeSingleRegion) { Mat element = new Mat(3, 3, CvType.CV_8U, new Scalar(1)); Imgproc.morphologyEx(mask, mask, Imgproc.MORPH_CLOSE, element, new Point(-1, -1), 3); Imgproc.resize(mask, mask, image.size()); List<MatOfPoint> contours = new ArrayList<MatOfPoint>(); Imgproc.findContours(mask.clone(), contours, new Mat(), Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_NONE); MatOfPoint biggestContour = contours.get(0); // getting the biggest contour double contourArea = Imgproc.contourArea(biggestContour); if (contours.size() > 1) { biggestContour = Collections.max(contours, new ContourComparator()); // getting the biggest contour in case there are more than one } Point[] biggestContourPoints = biggestContour.toArray(); String path = "M " + (int) biggestContourPoints[0].x + " " + (int) biggestContourPoints[0].y + " "; for (int i = 1; i < biggestContourPoints.length; ++i) { Point v = biggestContourPoints[i]; path += "L " + (int) v.x + " " + (int) v.y + " "; } path += "Z"; // System.out.println("path:"); // System.out.println(path); Rect computedSearchWindow = Imgproc.boundingRect(biggestContour); Point massCenter = computedSearchWindow.tl(); Scalar meanColor = Core.mean(original, mask); // ImageUtils.saveImage(mask, "single_mask_" + imageForTextRecognition + ".png", request); FindingResponse findingResponse = new FindingResponse(path, meanColor, massCenter, -1, contourArea); findingResponses.add(findingResponse); } else { float imageArea = image.cols() * image.rows(); for (int j = 0; j < maskRegions.size(); j++) { Mat region = maskRegions.get(j); List<MatOfPoint> contours = new ArrayList<MatOfPoint>(); Imgproc.findContours(region.clone(), contours, new Mat(), Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_NONE); MatOfPoint biggestContour = contours.get(0); // getting the biggest contour if (contours.size() > 1) { biggestContour = Collections.max(contours, new ContourComparator()); // getting the biggest contour in case there are more than one } double contourArea = Imgproc.contourArea(biggestContour); if (contourArea / imageArea < 0.8) { // only areas less than 80% of that of the image are accepted Point[] biggestContourPoints = biggestContour.toArray(); String path = "M " + (int) biggestContourPoints[0].x + " " + (int) biggestContourPoints[0].y + " "; for (int i = 1; i < biggestContourPoints.length; ++i) { Point v = biggestContourPoints[i]; path += "L " + (int) v.x + " " + (int) v.y + " "; } path += "Z"; Rect computedSearchWindow = Imgproc.boundingRect(biggestContour); Point massCenter = computedSearchWindow.tl(); // System.out.println("Contour area: " + contourArea); Mat contoursImage = userPointsImage.clone(); Imgproc.drawContours(contoursImage, contours, 0, newVal, 1); Scalar meanColor = Core.mean(original, region); FindingResponse findingResponse = new FindingResponse(path, meanColor, massCenter, -1, contourArea); findingResponses.add(findingResponse); // ImageUtils.saveImage(contoursImage, "mask_" + j + "_contourned" + imageForTextRecognition + ".png", request); } } } String jsonResponse = gson.toJson(findingResponses, ArrayList.class); out.println(jsonResponse); } }
From source file:servlets.processScribble.java
/** * Processes requests for both HTTP <code>GET</code> and <code>POST</code> * methods.//from w ww .ja v a 2s. c o m * * @param request servlet request * @param response servlet response * @throws ServletException if a servlet-specific error occurs * @throws IOException if an I/O error occurs */ protected void processRequest(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { response.setContentType("text/html;charset=UTF-8"); try (PrintWriter out = response.getWriter()) { String imageForTextRecognition = request.getParameter("imageForTextRecognition") + ".png"; Mat original = ImageUtils.loadImage(imageForTextRecognition, request); Mat image = original.clone(); Mat mask = Mat.zeros(image.rows() + 2, image.cols() + 2, CvType.CV_8UC1); String samplingPoints = request.getParameter("samplingPoints"); Gson gson = new Gson(); Point[] userPoints = gson.fromJson(samplingPoints, Point[].class); MatOfPoint points = new MatOfPoint(new Mat(userPoints.length, 1, CvType.CV_32SC2)); int cont = 0; for (Point point : userPoints) { int y = (int) point.y; int x = (int) point.x; int[] data = { x, y }; points.put(cont++, 0, data); } MatOfInt hull = new MatOfInt(); Imgproc.convexHull(points, hull); MatOfPoint mopOut = new MatOfPoint(); mopOut.create((int) hull.size().height, 1, CvType.CV_32SC2); int totalPoints = (int) hull.size().height; Point[] convexHullPoints = new Point[totalPoints]; ArrayList<Point> seeds = new ArrayList<>(); for (int i = 0; i < totalPoints; i++) { int index = (int) hull.get(i, 0)[0]; double[] point = new double[] { points.get(index, 0)[0], points.get(index, 0)[1] }; mopOut.put(i, 0, point); convexHullPoints[i] = new Point(point[0], point[1]); seeds.add(new Point(point[0], point[1])); } MatOfPoint mop = new MatOfPoint(); mop.fromArray(convexHullPoints); ArrayList<MatOfPoint> arrayList = new ArrayList<MatOfPoint>(); arrayList.add(mop); Random random = new Random(); int b = random.nextInt(256); int g = random.nextInt(256); int r = random.nextInt(256); Scalar newVal = new Scalar(b, g, r); FloodFillFacade floodFillFacade = new FloodFillFacade(); for (int i = 0; i < seeds.size(); i++) { Point seed = seeds.get(i); image = floodFillFacade.fill(image, mask, (int) seed.x, (int) seed.y, newVal); } Imgproc.drawContours(image, arrayList, 0, newVal, -1); Imgproc.resize(mask, mask, image.size()); Scalar meanColor = Core.mean(original, mask); // Highgui.imwrite("C:\\Users\\Gonzalo\\Documents\\NetBeansProjects\\iVoLVER\\uploads\\the_convexHull.png", image); ImageUtils.saveImage(image, imageForTextRecognition + "_the_convexHull.png", request); newVal = new Scalar(255, 255, 0); floodFillFacade.setMasked(false); System.out.println("Last one:"); floodFillFacade.fill(image, mask, 211, 194, newVal); Core.circle(image, new Point(211, 194), 5, new Scalar(0, 0, 0), -1); ImageUtils.saveImage(image, imageForTextRecognition + "_final.png", request); // Highgui.imwrite("C:\\Users\\Gonzalo\\Documents\\NetBeansProjects\\iVoLVER\\uploads\\final.png", image); Mat element = new Mat(3, 3, CvType.CV_8U, new Scalar(1)); Imgproc.morphologyEx(mask, mask, Imgproc.MORPH_CLOSE, element, new Point(-1, -1), 3); Imgproc.resize(mask, mask, image.size()); // ImageUtils.saveImage(mask, "final_mask_dilated.png", request); // Highgui.imwrite("C:\\Users\\Gonzalo\\Documents\\NetBeansProjects\\iVoLVER\\uploads\\final_mask_dilated.png", mask); List<MatOfPoint> contours = new ArrayList<MatOfPoint>(); Imgproc.findContours(mask.clone(), contours, new Mat(), Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_NONE); double contourArea = 0; String path = ""; MatOfPoint biggestContour = contours.get(0); // getting the biggest contour contourArea = Imgproc.contourArea(biggestContour); if (contours.size() > 1) { biggestContour = Collections.max(contours, new ContourComparator()); // getting the biggest contour in case there are more than one } Point[] biggestContourPoints = biggestContour.toArray(); path = "M " + (int) biggestContourPoints[0].x + " " + (int) biggestContourPoints[0].y + " "; for (int i = 1; i < biggestContourPoints.length; ++i) { Point v = biggestContourPoints[i]; path += "L " + (int) v.x + " " + (int) v.y + " "; } path += "Z"; System.out.println("path:"); System.out.println(path); Rect computedSearchWindow = Imgproc.boundingRect(biggestContour); Point massCenter = computedSearchWindow.tl(); FindingResponse findingResponse = new FindingResponse(path, meanColor, massCenter, -1, contourArea); String jsonResponse = gson.toJson(findingResponse, FindingResponse.class); out.println(jsonResponse); // String jsonResponse = gson.toJson(path); // out.println(jsonResponse); } }
From source file:simeav.filtros.instanciaciones.DetectorConectoresEstandar.java
@Override public Mat detectarConectores(Mat original, Mat mascaraModulos, Diagrama diagrama) { Mat sinCuadrados = Utils.borrarMascara(original, mascaraModulos); // dilato los conectores para que se superpongan con los cuadrados sinCuadrados = Utils.dilate(sinCuadrados); sinCuadrados = Utils.dilate(sinCuadrados); sinCuadrados = Utils.dilate(sinCuadrados); //elimino puntos que pueden haber quedado de la eliminacion de cuadrados ArrayList<MatOfPoint> contornos = Utils.detectarContornos(sinCuadrados); for (int i = 0; i < contornos.size(); i++) { double area = Imgproc.contourArea(contornos.get(i)); if (area <= 50) { Imgproc.drawContours(sinCuadrados, contornos, i, new Scalar(0, 0, 0), -1); }//from www . ja va2 s . c o m } this.extremos = original.clone(); Mat mascara; String tipo_extremo1, tipo_extremo2; // Imagen en la que se va a dibuja el resultado Mat conectores = Mat.zeros(sinCuadrados.size(), CvType.CV_8UC3); Mat contorno; contornos = Utils.detectarContornos(sinCuadrados); Mat intersec = new Mat(); ArrayList<MatOfPoint> contornos_intersec; int r, g, b; for (int j = contornos.size() - 1; j >= 0; j--) { //Dibujo el contorno relleno, para despues sacar la interseccion con los cuadrados contorno = Mat.zeros(sinCuadrados.size(), CvType.CV_8UC3); Imgproc.drawContours(contorno, contornos, j, new Scalar(180, 255, 255), -1); Imgproc.cvtColor(contorno, contorno, Imgproc.COLOR_BGR2GRAY); //Calculo la interseccion con los cuadrados (se dibujan en intersec) Core.bitwise_and(contorno, mascaraModulos, intersec); //Saco los contornos de las intersecciones para saber donde estan contornos_intersec = Utils.detectarContornos(intersec); if (contornos_intersec.size() > 1) { Scalar color = Utils.getColorRandom(); for (int z = 0; z < contornos_intersec.size(); z++) { Imgproc.drawContours(conectores, contornos_intersec, z, color, -1); } ArrayList<Point> centros_extremos = Utils.getCentros(contornos_intersec); for (Point centros_extremo : centros_extremos) { Core.circle(conectores, centros_extremo, 4, color, -1); } analizarExtremos(j, centros_extremos, diagrama); Conector c = diagrama.getConector(j); Core.rectangle(conectores, c.getModuloDesde().getRectangulo().tl(), c.getModuloDesde().getRectangulo().br(), color, 3); Core.rectangle(conectores, c.getModuloHasta().getRectangulo().tl(), c.getModuloHasta().getRectangulo().br(), color, 3); Point tl_desde = new Point(c.getDesde().x - 20, c.getDesde().y - 20); Point br_desde = new Point(c.getDesde().x + 20, c.getDesde().y + 20); Point tl_hasta = new Point(c.getHasta().x - 20, c.getHasta().y - 20); Point br_hasta = new Point(c.getHasta().x + 20, c.getHasta().y + 20); mascara = new Mat(sinCuadrados.size(), CvType.CV_8U, new Scalar(255, 255, 255)); Core.rectangle(mascara, tl_desde, br_desde, new Scalar(0, 0, 0), -1); tipo_extremo1 = clasificarExtremo(Utils.borrarMascara(original, mascara)); mascara = new Mat(sinCuadrados.size(), CvType.CV_8U, new Scalar(255, 255, 255)); Core.rectangle(mascara, tl_hasta, br_hasta, new Scalar(0, 0, 0), -1); tipo_extremo2 = clasificarExtremo(Utils.borrarMascara(original, mascara)); if (!tipo_extremo1.equals(tipo_extremo2)) { if (tipo_extremo1.equals("Normal")) c.setTipo(tipo_extremo2); else if (tipo_extremo2.equals("Normal")) { Modulo aux = c.getModuloDesde(); c.setDesde(c.getModuloHasta()); c.setHacia(aux); Point p_aux = c.getDesde(); c.setDesde(c.getHasta()); c.setHasta(p_aux); c.setTipo(tipo_extremo1); } else { c.setTipo("Indeterminado"); } } else { c.setTipo("Indeterminado"); } } } return conectores; }