List of usage examples for org.opencv.core Mat size
public Size size()
From source file:org.surmon.pattern.editor2d.components.Mapping.java
public static List<MatOfPoint> process(Mat source, List<Particle> particles) { Mat partImage = new Mat(source.size(), CvType.CV_8UC1); // Draw particles as images Point p;// ww w . ja v a 2s . c o m for (Particle part : particles) { p = new Point(part.getPosition().toArray()); Core.circle(partImage, p, 1, new Scalar(255)); } // Blur with Gaussian kernel Mat blured = new Mat(); Imgproc.GaussianBlur(partImage, blured, new Size(101, 101), -1, -1); // Equalize histogram List<Mat> eqChannels = new ArrayList<>(); List<Mat> channels = new ArrayList<>(); Core.split(blured, channels); for (Mat channel : channels) { Mat eqImage = new Mat(); Imgproc.equalizeHist(channel, eqImage); eqChannels.add(eqImage); } Mat eqResult = new Mat(); Core.merge(eqChannels, eqResult); // Binary threshold Mat bin = new Mat(); Imgproc.threshold(eqResult, bin, 0, 255, Imgproc.THRESH_OTSU); // Imgproc.threshold(eqResult, bin, 10, 255, Imgproc.THRESH_BINARY); // Find contours Mat imMat = bin.clone(); Mat canny_output = new Mat(); Mat hierarchy = new Mat(); int thresh = 100; //median filter: List<MatOfPoint> borders = new ArrayList<>(); Imgproc.Canny(imMat, canny_output, thresh, thresh * 2); Imgproc.findContours(canny_output, borders, hierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE); // Find contours return borders; // Mat result = source.clone(); // Imgproc.drawContours(result, borders, -1, new Scalar(255, 0, 255)); // // return result; }
From source file:org.usfirst.frc.team2084.CMonster2016.vision.CoordinateMath.java
License:Open Source License
/** * Multiply two matrices./* www .jav a 2 s . co m*/ * * @param src1 the first matrix * @param src2 the second matrix * @param dest the output matrix */ public static void matMult(Mat src1, Mat src2, Mat dest) { Core.gemm(src1, src2, 1, Mat.zeros(dest.size(), dest.type()), 1, dest); }
From source file:processdata.depthDataProcessingUtilities.java
/** * converts depth data to opencv Mat object leaving depth values that are only within min and max thresholds * @param path/*w w w . j a v a 2 s. co m*/ * @param minThreshold * @param maxThreshold * @return * @throws FileNotFoundException */ public static Mat processDepthDataFile(String path, int minThreshold, int maxThreshold) throws FileNotFoundException { File depthData = new File(path); double[][] depthDataArray = new double[1][217088]; //read depth data into array int count = 0; inDepthDataFile = new Scanner(depthData);//.useDelimiter(",\\s*"); while (inDepthDataFile.hasNext()) { String currentStr = inDepthDataFile.nextLine(); if (!currentStr.isEmpty()) depthDataArray[0][count++] = Double.parseDouble(currentStr); } double depthDataMatrix[][] = new double[512][424]; depthDataMatrix = reshape(depthDataArray, 512, 424); Mat matDepthDataMatrix = new Mat(512, 424, CvType.CV_64F); //cut-off the remaining depth values for (int i = 0; i < depthDataMatrix.length; i++) { for (int j = 0; j < depthDataMatrix[0].length; j++) { if (depthDataMatrix[i][j] > maxThreshold || depthDataMatrix[i][j] < minThreshold) depthDataMatrix[i][j] = 0; } } //find max value double max = 0; for (int i = 0; i < depthDataMatrix.length; i++) { for (int j = 0; j < depthDataMatrix[0].length; j++) { if (depthDataMatrix[i][j] > max) max = depthDataMatrix[i][j]; } } //FILL THE DEPTH MATRIX //System.out.println("Max Element "+ max); for (int i = 0; i < depthDataMatrix.length; i++) { for (int j = 0; j < depthDataMatrix[0].length; j++) { matDepthDataMatrix.put(i, j, depthDataMatrix[i][j] / max * 255.0); } } // //printout the depth matrix // for(int i = 0;i<depthDataMatrix.length;i++){ // for(int j = 0;j<depthDataMatrix[0].length;j++){ // System.out.print(depthDataMatrix[i][j]+"\t"); // } // System.out.println(); // } // //apply colormap to visualize Mat processedMathDepthImage = new Mat(matDepthDataMatrix.size(), CvType.CV_8U); matDepthDataMatrix.convertTo(processedMathDepthImage, CvType.CV_8UC1); Core.transpose(processedMathDepthImage, processedMathDepthImage); org.opencv.contrib.Contrib.applyColorMap(processedMathDepthImage, processedMathDepthImage, org.opencv.contrib.Contrib.COLORMAP_JET); return processedMathDepthImage; }
From source file:qupath.opencv.classify.OpenCvClassifier.java
License:Open Source License
protected void createAndTrainClassifier() { // Create the required Mats int nMeasurements = measurements.size(); Mat matTraining = new Mat(arrayTraining.length / nMeasurements, nMeasurements, CvType.CV_32FC1); matTraining.put(0, 0, arrayTraining); Mat matResponses = new Mat(arrayResponses.length, 1, CvType.CV_32SC1); matResponses.put(0, 0, arrayResponses); // // Clear any existing classifier // if (classifier != null) // classifier.clear(); logger.info("Training size: " + matTraining.size()); logger.info("Responses size: " + matResponses.size()); // Create & train the classifier try {/*from w w w. java 2 s . co m*/ classifier = createClassifier(); classifier.train(matTraining, Ml.ROW_SAMPLE, matResponses); } catch (CvException e) { // For reasons I haven't yet discerned, sometimes OpenCV throws an exception with the following message: // OpenCV Error: Assertion failed ((int)_sleft.size() < n && (int)_sright.size() < n) in calcDir, file /tmp/opencv320150620-1681-1u5iwhh/opencv-3.0.0/modules/ml/src/tree.cpp, line 1190 // With one sample fewer, it can often recover... so attempt that, rather than failing miserably... // logger.error("Classifier training error", e); logger.info("Will attempt retraining classifier with one sample fewer..."); matTraining = matTraining.rowRange(0, matTraining.rows() - 1); matResponses = matResponses.rowRange(0, matResponses.rows() - 1); classifier = createClassifier(); classifier.train(matTraining, Ml.ROW_SAMPLE, matResponses); } matTraining.release(); matResponses.release(); logger.info("Classifier trained with " + arrayResponses.length + " samples"); }
From source file:qupath.opencv.processing.OpenCVTools.java
License:Open Source License
public static void watershedDistanceTransformSplit(Mat matBinary, int maxFilterRadius) { Mat matWatershedSeedsBinary;//from ww w. j a va 2 s. co m // Create a background mask Mat matBackground = new Mat(); Core.compare(matBinary, new Scalar(255), matBackground, Core.CMP_NE); // Separate by shape using the watershed transform Mat matDistanceTransform = new Mat(); Imgproc.distanceTransform(matBinary, matDistanceTransform, Imgproc.CV_DIST_L2, Imgproc.CV_DIST_MASK_PRECISE); // Find local maxima matWatershedSeedsBinary = new Mat(); Imgproc.dilate(matDistanceTransform, matWatershedSeedsBinary, OpenCVTools.getCircularStructuringElement(maxFilterRadius)); Core.compare(matDistanceTransform, matWatershedSeedsBinary, matWatershedSeedsBinary, Core.CMP_EQ); matWatershedSeedsBinary.setTo(new Scalar(0), matBackground); // Dilate slightly to merge nearby maxima Imgproc.dilate(matWatershedSeedsBinary, matWatershedSeedsBinary, OpenCVTools.getCircularStructuringElement(2)); // Create labels for watershed Mat matLabels = new Mat(matDistanceTransform.size(), CvType.CV_32F, new Scalar(0)); labelImage(matWatershedSeedsBinary, matLabels, Imgproc.RETR_CCOMP); // Remove everything outside the thresholded region matLabels.setTo(new Scalar(0), matBackground); // Do watershed // 8-connectivity is essential for the watershed lines to be preserved - otherwise OpenCV's findContours could not be used ProcessingCV.doWatershed(matDistanceTransform, matLabels, 0.1, true); // Update the binary image to remove the watershed lines Core.multiply(matBinary, matLabels, matBinary, 1, matBinary.type()); }
From source file:qupath.opencv.processing.OpenCVTools.java
License:Open Source License
/** * Apply a watershed transform to refine a binary image, guided either by a distance transform or a supplied intensity image. * /*from w ww .jav a2s . co m*/ * @param matBinary - thresholded, 8-bit unsigned integer binary image * @param matIntensities - optional intensity image for applying watershed transform; if not set, distance transform of binary will be used * @param threshold */ public static void watershedIntensitySplit(Mat matBinary, Mat matWatershedIntensities, double threshold, int maximaRadius) { // Separate by intensity using the watershed transform // Find local maxima Mat matTemp = new Mat(); Mat strel = getCircularStructuringElement(maximaRadius); Imgproc.dilate(matWatershedIntensities, matTemp, strel); Core.compare(matWatershedIntensities, matTemp, matTemp, Core.CMP_EQ); Imgproc.dilate(matTemp, matTemp, getCircularStructuringElement(2)); Mat matWatershedSeedsBinary = matTemp; // Remove everything outside the thresholded region Core.min(matWatershedSeedsBinary, matBinary, matWatershedSeedsBinary); // Create labels for watershed Mat matLabels = new Mat(matWatershedIntensities.size(), CvType.CV_32F, new Scalar(0)); labelImage(matWatershedSeedsBinary, matLabels, Imgproc.RETR_CCOMP); // Do watershed // 8-connectivity is essential for the watershed lines to be preserved - otherwise OpenCV's findContours could not be used ProcessingCV.doWatershed(matWatershedIntensities, matLabels, threshold, true); // Update the binary image to remove the watershed lines Core.multiply(matBinary, matLabels, matBinary, 1, matBinary.type()); }
From source file:qupath.opencv.processing.PixelImageCV.java
License:Open Source License
public PixelImageCV(Mat mat) { // Extract dimensions and pixels this.width = (int) mat.size().width; this.height = (int) mat.size().height; pixels = new float[(int) mat.total()]; if (mat.depth() == CvType.CV_32F) mat.get(0, 0, pixels);/*from www .j a v a 2 s . c o m*/ else { Mat mat2 = new Mat(); mat.convertTo(mat2, CvType.CV_32F); mat2.get(0, 0, pixels); } }
From source file:Reconhecimento.Regua.java
public static void segmentarRegua() { long tempoInicio = System.currentTimeMillis(); // coordenadas do retangulo de selecao int x0 = TelaSegmentarRegua.localizarReguaPanel1.x0; int y0 = TelaSegmentarRegua.localizarReguaPanel1.y0; int x = TelaSegmentarRegua.localizarReguaPanel1.xf; int y = TelaSegmentarRegua.localizarReguaPanel1.yf; if (x0 > x) { int aux = x0; x0 = x;/*from w ww . j a va 2 s. c o m*/ x = aux; } if (y0 > y) { int aux = y0; y0 = y; y = aux; } Mat bigImage = Highgui.imread(TelaSegmentarRegua.localizarReguaPanel1.imagem); // cortar imagem de acordo com a selecao Mat img = new Mat(bigImage, new Rect(x0, y0, x - x0, y - y0)); Mat grayImg = new Mat(); // passar imagem para tons de cinza Imgproc.cvtColor(img, grayImg, Imgproc.COLOR_BGR2GRAY); // limiarizacao Imgproc.threshold(grayImg, grayImg, 190, 255, THRESH_BINARY_INV); Core.bitwise_not(grayImg, grayImg); List<Point> pontos = new ArrayList<Point>(); // adicionar todos os pontos da referentes a regua em um vetor for (int i = 0; i < grayImg.rows(); i++) { for (int j = 0; j < grayImg.cols(); j++) { if (Arrays.toString(grayImg.get(i, j)).equals("[255.0]")) { pontos.add(new Point(j, i)); Core.line(img, new Point(j, i), new Point(j, i), new Scalar(255, 0, 0)); } } } String filename = "imagens/regua_segmentada" + Math.random() * 1000 + ".jpg"; Mat img2 = new Mat(); Imgproc.resize(img, img2, new Size(img.size().width * 3.0, img.size().height * 3.0)); Highgui.imwrite(filename, img2); int xMin = 5000, yMin = 5000; int xMax = 0, yMax = 0; // pontos extremos da regua for (Point ponto : pontos) { if (ponto.x > xMax) { xMax = (int) ponto.x; } if (ponto.x < xMin) { xMin = (int) ponto.x; } if (ponto.y > yMax) { yMax = (int) ponto.y; } if (ponto.y < yMin) { yMin = (int) ponto.y; } } // regua na posicao horizontal if (xMax - xMin > yMax - yMin) { /* a proporcao da imagem utilizada no processamento torna necessario a multiplicacao por 2 para manter a proporcao das medidas */ larguraPixels = (xMax - xMin) * 2; } // regua na posicao vertical else { larguraPixels = (yMax - yMin) * 2; } long tempoFim = System.currentTimeMillis() - tempoInicio; centimetrosPorPixel = 30.0 / larguraPixels; TelaSegmentarRegua2 telaResposta = new TelaSegmentarRegua2(); telaResposta.jLabel1.setIcon(new ImageIcon(filename)); telaResposta.jLabel4.setText(larguraPixels + " pixels"); telaResposta.jLabel5.setText(String.valueOf(centimetrosPorPixel).substring(0, 5)); telaResposta.jLabel7.setText(tempoFim + " ms"); telaResposta.setDefaultCloseOperation(WindowConstants.DISPOSE_ON_CLOSE); telaResposta.setLocation(200, 200); telaResposta.setVisible(true); }
From source file:Reconhecimento.SaltoSegmentacaoCirculo.java
public static void medirExtensao() { long tempoInicial = System.currentTimeMillis(); Mat img = new Mat(); Mat img2 = new Mat(); VideoCapture cap = new VideoCapture(Video.videoAtual); // capturar primeiro frame do video cap.read(img);//from w ww . j ava2 s .com // capturar ultimo frame do video boolean continuar = true; Mat aux = new Mat(); while (continuar) { cap.read(aux); if (aux.size().area() > 0) { img2 = aux.clone(); } else { continuar = false; } } Point centroCirculoInicial = segmentarCirculo(img); Point centroCirculoFinal = segmentarCirculo(img2); // extensao do salto em centimetros extensao = (centroCirculoInicial.x - centroCirculoFinal.x) * Regua.centimetrosPorPixel; // converter extensao do salto para metros extensao = extensao / 100.0; System.out.println(String.valueOf(extensao).substring(0, 4) + " m"); Mat show1 = new Mat(); Mat show2 = new Mat(); Imgproc.resize(img, show1, new Size(420, 240)); Imgproc.resize(img2, show2, new Size(420, 240)); Highgui.imwrite("imagens/" + Video.nomeVideo + 420 + "x" + 2400 + ".jpg", show1); Highgui.imwrite("imagens/" + Video.nomeVideo + 420 + "x" + 240 + "u.jpg", show2); TelaMedirDistSegmentacaoCirculo tela = new TelaMedirDistSegmentacaoCirculo(); tela.setLocation(200, 200); tela.jLabel1.setIcon(new ImageIcon("imagens/" + Video.nomeVideo + 420 + "x" + 2400 + ".jpg")); tela.jLabel2.setIcon(new ImageIcon("imagens/" + Video.nomeVideo + 420 + "x" + 240 + "u.jpg")); tela.jLabel4.setText(String.valueOf(extensao).substring(0, 4) + " m"); tela.jLabel6.setText(String.valueOf(System.currentTimeMillis() - tempoInicial) + " ms"); tela.setDefaultCloseOperation(WindowConstants.DISPOSE_ON_CLOSE); tela.setVisible(true); }
From source file:saliency.saliency.java
/** * @param args the command line arguments *//*from w ww .j ava 2 s .c om*/ public static void main(String[] args) { // TODO code application logic here System.loadLibrary(Core.NATIVE_LIBRARY_NAME); Mat input_img = imread("input_img/sea.jpg"); //fot temp test start Imgproc.resize(input_img, input_img, new Size(1980, 1080), 0, 0, Imgproc.INTER_LINEAR); //fot temp test end if (input_img.cols() == 0) { return; } //benchmark ///////////////////////step 1 : Extraction of Early Visual Deatures/////////////////////////////// //intensity image: intensity_img Mat intensity_img = new Mat(input_img.rows(), input_img.cols(), CV_16UC1); //intensity = (R+G+B)/3 int img_width = intensity_img.cols(); int img_height = intensity_img.rows(); int x, y; int i, c, s; int max_intensity = 0; for (x = 0; x < img_width; x++) { for (y = 0; y < img_height; y++) { int temp_intensity = ((int) input_img.get(y, x)[0] + (int) input_img.get(y, x)[1] + (int) input_img.get(y, x)[2]) / 3; intensity_img.put(y, x, temp_intensity); if (max_intensity < temp_intensity) { max_intensity = temp_intensity; } } } //create Guassian pyramid for intensity Mat[] i_gaussian_pyramid = new Mat[9]; i_gaussian_pyramid[0] = intensity_img.clone(); for (i = 0; i < 8; i++) { i_gaussian_pyramid[i + 1] = i_gaussian_pyramid[i].clone(); Imgproc.pyrDown(i_gaussian_pyramid[i + 1], i_gaussian_pyramid[i + 1], new Size()); } //create intensity feature map using center-surround differences Mat[][] intensity_feature_map = new Mat[3][2]; for (c = 0; c < 3; c++) { for (s = 0; s < 2; s++) { intensity_feature_map[c][s] = center_surround.main(i_gaussian_pyramid[c + 2], i_gaussian_pyramid[s + c + 5], 0); } } //benchmark //imwrite("intensity_feature_map_00.bmp", intensity_feature_map[0][0]); //get normalized color image by I. Mat norm_input_img = input_img.clone(); norm_input_img.convertTo(norm_input_img, CV_64F); for (x = 0; x < img_width; x++) { for (y = 0; y < img_height; y++) { //normalization is only applied at the locations where I is larger than 1/10 of its maximum over entire image double[] temp = new double[3]; if (intensity_img.get(y, x)[0] > (max_intensity / 10)) { temp[0] = norm_input_img.get(y, x)[0] / intensity_img.get(y, x)[0]; temp[1] = norm_input_img.get(y, x)[1] / intensity_img.get(y, x)[0]; temp[2] = norm_input_img.get(y, x)[2] / intensity_img.get(y, x)[0]; norm_input_img.put(y, x, temp); } else { temp[0] = 0; temp[1] = 0; temp[2] = 0; norm_input_img.put(y, x, temp); } } } //get R G B Y(Yellow) single color channel images Mat r_img = new Mat(input_img.rows(), input_img.cols(), CV_64FC1); Mat g_img = new Mat(input_img.rows(), input_img.cols(), CV_64FC1); Mat b_img = new Mat(input_img.rows(), input_img.cols(), CV_64FC1); Mat y_img = new Mat(input_img.rows(), input_img.cols(), CV_64FC1); //[0]: b [1]:g [2]:r for (x = 0; x < img_width; x++) { for (y = 0; y < img_height; y++) { //R = min(0,r-(g+b)/2) double temp_chroma = max(0, (norm_input_img.get(y, x)[2] - (norm_input_img.get(y, x)[1] + norm_input_img.get(y, x)[0]) / 2)); r_img.put(y, x, temp_chroma); //G = max(0,g-(r+b)/2) temp_chroma = max(0, (norm_input_img.get(y, x)[1] - (norm_input_img.get(y, x)[2] + norm_input_img.get(y, x)[0]) / 2)); g_img.put(y, x, temp_chroma); //B = max(0,b-(r+g)/2) temp_chroma = max(0, (norm_input_img.get(y, x)[0] - (norm_input_img.get(y, x)[2] + norm_input_img.get(y, x)[1]) / 2)); b_img.put(y, x, temp_chroma); //Y = max(0,(r+g)/2-|r-g|/2-b) temp_chroma = max(0, ((norm_input_img.get(y, x)[2] + norm_input_img.get(y, x)[1]) / 2 - abs(norm_input_img.get(y, x)[2] + norm_input_img.get(y, x)[1]) / 2 - norm_input_img.get(y, x)[0])); y_img.put(y, x, temp_chroma); } } //create Gaussian pyramid for 4 color channels Mat[] b_gaussian_pyramid = new Mat[9]; b_gaussian_pyramid[0] = b_img.clone(); for (i = 0; i < 8; i++) { b_gaussian_pyramid[i + 1] = b_gaussian_pyramid[i].clone(); Imgproc.pyrDown(b_gaussian_pyramid[i + 1], b_gaussian_pyramid[i + 1], new Size()); } Mat[] g_gaussian_pyramid = new Mat[9]; g_gaussian_pyramid[0] = g_img.clone(); for (i = 0; i < 8; i++) { g_gaussian_pyramid[i + 1] = g_gaussian_pyramid[i].clone(); Imgproc.pyrDown(g_gaussian_pyramid[i + 1], g_gaussian_pyramid[i + 1], new Size()); } Mat[] r_gaussian_pyramid = new Mat[9]; r_gaussian_pyramid[0] = r_img.clone(); for (i = 0; i < 8; i++) { r_gaussian_pyramid[i + 1] = r_gaussian_pyramid[i].clone(); Imgproc.pyrDown(r_gaussian_pyramid[i + 1], r_gaussian_pyramid[i + 1], new Size()); } Mat[] y_gaussian_pyramid = new Mat[9]; y_gaussian_pyramid[0] = y_img.clone(); for (i = 0; i < 8; i++) { y_gaussian_pyramid[i + 1] = y_gaussian_pyramid[i].clone(); Imgproc.pyrDown(y_gaussian_pyramid[i + 1], y_gaussian_pyramid[i + 1], new Size()); } //create color feature map using center-surround differences //RG(c,s) = |(R(c)-G(c))(-)(G(c)-R(c))| Mat[][] rg_feature_map = new Mat[3][2]; for (c = 0; c < 3; c++) { for (s = 0; s < 2; s++) { Mat r_minus_g = r_gaussian_pyramid[c + 2].clone(); Core.subtract(r_gaussian_pyramid[c + 2], g_gaussian_pyramid[c + 2], r_minus_g); Mat g_minus_r = g_gaussian_pyramid[s + c + 5].clone(); Core.subtract(g_gaussian_pyramid[s + c + 5], r_gaussian_pyramid[s + c + 5], g_minus_r); rg_feature_map[c][s] = center_surround.main(r_minus_g, g_minus_r, 1); } } //BY(c,s) = |(B(c)-Y(c))(-)(Y(c)-B(c))| Mat[][] by_feature_map = new Mat[3][2]; for (c = 0; c < 3; c++) { for (s = 0; s < 2; s++) { Mat b_minus_g = b_gaussian_pyramid[c + 2].clone(); Core.subtract(b_gaussian_pyramid[c + 2], y_gaussian_pyramid[c + 2], b_minus_g); Mat y_minus_b = y_gaussian_pyramid[s + c + 5].clone(); Core.subtract(y_gaussian_pyramid[s + c + 5], b_gaussian_pyramid[s + c + 5], y_minus_b); by_feature_map[c][s] = center_surround.main(b_minus_g, y_minus_b, 1); } } //benchmark //create oriented Gabor pyramid from intensity image int kernel_size = 10;//31;//adjust value according to reference double sigma = 3;//default: = 0.56 . the larger , the support of the Gabor function and the number of visible parallel excitatory and inhibitory stripe zones increases. double[] theta = new double[4]; theta[0] = 0; theta[1] = Math.PI / 4; theta[2] = Math.PI / 2; theta[3] = Math.PI * 3 / 4; double lambda = 5;//36; minimum 3 double gamma = 0.5;//0.02; // double psi = 0; Mat[][] gabor_pyramid = new Mat[4][9]; int theta_index; for (theta_index = 0; theta_index < 4; theta_index++) { Mat gabor_kernel = Imgproc.getGaborKernel(new Size(kernel_size, kernel_size), sigma, theta[theta_index], lambda, gamma); //gabor_pyramid[theta_index][0] = intensity_img.clone(); for (i = 0; i < 9; i++) { //gabor_pyramid[theta_index][i] = gabor_pyramid[theta_index][i].clone(); gabor_pyramid[theta_index][i] = i_gaussian_pyramid[i].clone(); Imgproc.filter2D(i_gaussian_pyramid[i], gabor_pyramid[theta_index][i], -1, gabor_kernel); //Imgproc.resize(gabor_pyramid[theta_index][i], gabor_pyramid[theta_index][i], new Size(), 0.5, 0.5, Imgproc.INTER_AREA); } } //imwrite("gabor_pyramid_01.bmp", gabor_pyramid[0][1]); //imwrite("gabor_pyramid_11.bmp", gabor_pyramid[1][1]); //imwrite("gabor_pyramid_21.bmp", gabor_pyramid[2][1]); //imwrite("gabor_pyramid_31.bmp", gabor_pyramid[3][1]); //imwrite("gabor_pyramid_03.bmp", gabor_pyramid[0][3]); //get orientation feature map using center-surround differences Mat[][][] orientation_feature_map = new Mat[4][3][2]; for (theta_index = 0; theta_index < 4; theta_index++) { for (c = 0; c < 3; c++) { for (s = 0; s < 2; s++) { orientation_feature_map[theta_index][c][s] = center_surround .main(gabor_pyramid[theta_index][c + 2], gabor_pyramid[theta_index][s + c + 5], 0); } } } //benchmark //imwrite("orientation_test_00.bmp", orientation_feature_map[0][0][0]); ///////////////////////step 2 : the saliency map/////////////////////////////// //get intensity conspicuity map Mat intensity_conspicuity_map = Mat.zeros(intensity_feature_map[2][0].size(), CV_16UC1); for (c = 0; c < 3; c++) { for (s = 0; s < 2; s++) { Mat norm_out = map_norm.main(intensity_feature_map[c][s]); Mat resized_feature_map = Mat.zeros(intensity_feature_map[2][0].size(), CV_16UC1); Imgproc.resize(norm_out, resized_feature_map, intensity_feature_map[2][0].size(), 0, 0, Imgproc.INTER_LINEAR); Core.addWeighted(intensity_conspicuity_map, 1, resized_feature_map, 1.0 / 6, 0, intensity_conspicuity_map); /*if(c == 0 && s == 0){ imwrite("in.bmp", intensity_feature_map[c][s]); imwrite("map_norm.bmp",norm_out); imwrite("resized_feature_map.bmp", resized_feature_map); }*/ } } //benchmark //Core.normalize(intensity_conspicuity_map, intensity_conspicuity_map, 0, 255, Core.NORM_MINMAX); //imwrite("intensity_conspicuity_map.bmp", intensity_conspicuity_map); //get color conspicuity map for (c = 0; c < 3; c++) { for (s = 0; s < 2; s++) { Core.normalize(rg_feature_map[c][s], rg_feature_map[c][s], 0, 255, Core.NORM_MINMAX); rg_feature_map[c][s].convertTo(rg_feature_map[c][s], CV_16UC1); Core.normalize(by_feature_map[c][s], by_feature_map[c][s], 0, 255, Core.NORM_MINMAX); by_feature_map[c][s].convertTo(by_feature_map[c][s], CV_16UC1); } } //imwrite("test_rg.bmp",rg_feature_map[0][0]); Mat color_conspicuity_map = Mat.zeros(rg_feature_map[2][0].size(), CV_16UC1); for (c = 0; c < 3; c++) { for (s = 0; s < 2; s++) { Mat norm_out = map_norm.main(rg_feature_map[c][s]); Mat resized_feature_map = Mat.zeros(rg_feature_map[2][0].size(), CV_16UC1); Imgproc.resize(norm_out, resized_feature_map, rg_feature_map[2][0].size(), 0, 0, Imgproc.INTER_LINEAR); Core.addWeighted(color_conspicuity_map, 1, resized_feature_map, 1.0 / 12, 0, color_conspicuity_map); norm_out = map_norm.main(by_feature_map[c][s]); resized_feature_map = Mat.zeros(by_feature_map[2][0].size(), CV_16UC1); Imgproc.resize(norm_out, resized_feature_map, by_feature_map[2][0].size(), 0, 0, Imgproc.INTER_LINEAR); Core.addWeighted(color_conspicuity_map, 1, resized_feature_map, 1.0 / 12, 0, color_conspicuity_map); } } //benchmark //get orientation conspicuity map Mat orientation_conspicuity_map_0 = Mat.zeros(orientation_feature_map[0][2][0].size(), CV_16UC1); for (c = 0; c < 3; c++) { for (s = 0; s < 2; s++) { Mat norm_out = map_norm.main(orientation_feature_map[0][c][s]); Mat resized_feature_map = Mat.zeros(orientation_feature_map[0][2][0].size(), CV_16UC1); Imgproc.resize(norm_out, resized_feature_map, orientation_feature_map[0][2][0].size(), 0, 0, Imgproc.INTER_LINEAR); Core.addWeighted(orientation_conspicuity_map_0, 1, resized_feature_map, 1.0 / 6, 0, orientation_conspicuity_map_0); } } Mat orientation_conspicuity_map_1 = Mat.zeros(orientation_feature_map[1][2][0].size(), CV_16UC1); for (c = 0; c < 3; c++) { for (s = 0; s < 2; s++) { Mat norm_out = map_norm.main(orientation_feature_map[1][c][s]); Mat resized_feature_map = Mat.zeros(orientation_feature_map[1][2][0].size(), CV_16UC1); Imgproc.resize(norm_out, resized_feature_map, orientation_feature_map[1][2][0].size(), 0, 0, Imgproc.INTER_LINEAR); Core.addWeighted(orientation_conspicuity_map_1, 1, resized_feature_map, 1.0 / 6, 0, orientation_conspicuity_map_1); } } Mat orientation_conspicuity_map_2 = Mat.zeros(orientation_feature_map[2][2][0].size(), CV_16UC1); for (c = 0; c < 3; c++) { for (s = 0; s < 2; s++) { Mat norm_out = map_norm.main(orientation_feature_map[2][c][s]); Mat resized_feature_map = Mat.zeros(orientation_feature_map[2][2][0].size(), CV_16UC1); Imgproc.resize(norm_out, resized_feature_map, orientation_feature_map[2][2][0].size(), 0, 0, Imgproc.INTER_LINEAR); Core.addWeighted(orientation_conspicuity_map_2, 1, resized_feature_map, 1.0 / 6, 0, orientation_conspicuity_map_2); } } Mat orientation_conspicuity_map_3 = Mat.zeros(orientation_feature_map[3][2][0].size(), CV_16UC1); for (c = 0; c < 3; c++) { for (s = 0; s < 2; s++) { Mat norm_out = map_norm.main(orientation_feature_map[3][c][s]); Mat resized_feature_map = Mat.zeros(orientation_feature_map[3][2][0].size(), CV_16UC1); Imgproc.resize(norm_out, resized_feature_map, orientation_feature_map[3][2][0].size(), 0, 0, Imgproc.INTER_LINEAR); Core.addWeighted(orientation_conspicuity_map_3, 1, resized_feature_map, 1.0 / 6, 0, orientation_conspicuity_map_3); } } Mat orientation_conspicuity_map = Mat.zeros(orientation_feature_map[0][2][0].size(), CV_16UC1); Core.addWeighted(orientation_conspicuity_map, 1, map_norm.main(orientation_conspicuity_map_0), 1.0 / 4, 0, orientation_conspicuity_map); Core.addWeighted(orientation_conspicuity_map, 1, map_norm.main(orientation_conspicuity_map_1), 1.0 / 4, 0, orientation_conspicuity_map); Core.addWeighted(orientation_conspicuity_map, 1, map_norm.main(orientation_conspicuity_map_2), 1.0 / 4, 0, orientation_conspicuity_map); Core.addWeighted(orientation_conspicuity_map, 1, map_norm.main(orientation_conspicuity_map_3), 1.0 / 4, 0, orientation_conspicuity_map); //benchmark Mat saliency = Mat.zeros(intensity_conspicuity_map.size(), CV_16UC1); Core.addWeighted(saliency, 1, map_norm.main(intensity_conspicuity_map), 1.0 / 3, 0, saliency); Core.addWeighted(saliency, 1, map_norm.main(color_conspicuity_map), 1.0 / 3, 0, saliency); Core.addWeighted(saliency, 1, map_norm.main(orientation_conspicuity_map), 1.0 / 3, 0, saliency); //benchmark Core.normalize(saliency, saliency, 0, 255, Core.NORM_MINMAX); //fot temp test Imgproc.resize(saliency, saliency, new Size(720, 480), 0, 0, Imgproc.INTER_LINEAR); imwrite("saliency.bmp", saliency); Core.normalize(intensity_conspicuity_map, intensity_conspicuity_map, 0, 255, Core.NORM_MINMAX); Imgproc.resize(intensity_conspicuity_map, intensity_conspicuity_map, new Size(720, 480), 0, 0, Imgproc.INTER_LINEAR); imwrite("intensity_conspicuity_map.bmp", intensity_conspicuity_map); Core.normalize(color_conspicuity_map, color_conspicuity_map, 0, 255, Core.NORM_MINMAX); Imgproc.resize(color_conspicuity_map, color_conspicuity_map, new Size(720, 480), 0, 0, Imgproc.INTER_LINEAR); imwrite("color_conspicuity_map.bmp", color_conspicuity_map); Core.normalize(orientation_conspicuity_map, orientation_conspicuity_map, 0, 255, Core.NORM_MINMAX); Imgproc.resize(orientation_conspicuity_map, orientation_conspicuity_map, new Size(720, 480), 0, 0, Imgproc.INTER_LINEAR); imwrite("orientation_conspicuity_map.bmp", orientation_conspicuity_map); Imgproc.resize(input_img, input_img, new Size(720, 480), 0, 0, Imgproc.INTER_LINEAR); imwrite("input_img.bmp", input_img); //for testing algorithm /* Mat temp1 = Mat.zeros(intensity_conspicuity_map.size(), CV_16UC1); temp1 = map_norm.main(intensity_conspicuity_map); Core.normalize(temp1, temp1, 0, 255, Core.NORM_MINMAX); Imgproc.resize(temp1, temp1, new Size(720,480), 0, 0, Imgproc.INTER_LINEAR); imwrite("intensity.bmp", temp1); temp1 = map_norm.main(color_conspicuity_map); Core.normalize(temp1, temp1, 0, 255, Core.NORM_MINMAX); Imgproc.resize(temp1, temp1, new Size(720,480), 0, 0, Imgproc.INTER_LINEAR); imwrite("color.bmp", temp1); temp1 = map_norm.main(orientation_conspicuity_map); Core.normalize(temp1, temp1, 0, 255, Core.NORM_MINMAX); Imgproc.resize(temp1, temp1, new Size(720,480), 0, 0, Imgproc.INTER_LINEAR); imwrite("orientation.bmp", temp1); Mat temp2 = Mat.zeros(orientation_conspicuity_map_0.size(), CV_16UC1); temp2 = map_norm.main(orientation_conspicuity_map_0); Core.normalize(temp2, temp2, 0, 255, Core.NORM_MINMAX); Imgproc.resize(temp2, temp2, new Size(720,480), 0, 0, Imgproc.INTER_LINEAR); imwrite("orientation_conspicuity_map_0.bmp", temp2); temp2 = map_norm.main(orientation_conspicuity_map_1); Core.normalize(temp2, temp2, 0, 255, Core.NORM_MINMAX); Imgproc.resize(temp2, temp2, new Size(720,480), 0, 0, Imgproc.INTER_LINEAR); imwrite("orientation_conspicuity_map_1.bmp", temp2); temp2 = map_norm.main(orientation_conspicuity_map_2); Core.normalize(temp2, temp2, 0, 255, Core.NORM_MINMAX); Imgproc.resize(temp2, temp2, new Size(720,480), 0, 0, Imgproc.INTER_LINEAR); imwrite("orientation_conspicuity_map_2.bmp", temp2); temp2 = map_norm.main(orientation_conspicuity_map_3); Core.normalize(temp2, temp2, 0, 255, Core.NORM_MINMAX); Imgproc.resize(temp2, temp2, new Size(720,480), 0, 0, Imgproc.INTER_LINEAR); imwrite("orientation_conspicuity_map_3.bmp", temp2); */ }