Example usage for org.opencv.core Mat convertTo

List of usage examples for org.opencv.core Mat convertTo

Introduction

In this page you can find the example usage for org.opencv.core Mat convertTo.

Prototype

public void convertTo(Mat m, int rtype) 

Source Link

Usage

From source file:Recognizer.Recognizer.java

public void SIFT(Image imQ, Image imDB) {
    Mat Q = imQ.Image1CtoMat_CV();
    Mat DB = imDB.Image1CtoMat_CV();

    Mat matQ = new Mat();
    Mat matDB = new Mat();

    Q.convertTo(matQ, CvType.CV_8U);
    DB.convertTo(matDB, CvType.CV_8U);/*ww  w.j  a va 2s  .co m*/

    FeatureDetector siftDet = FeatureDetector.create(FeatureDetector.SIFT);
    DescriptorExtractor siftExt = DescriptorExtractor.create(DescriptorExtractor.SIFT);

    MatOfKeyPoint kpQ = new MatOfKeyPoint();
    MatOfKeyPoint kpDB = new MatOfKeyPoint();

    siftDet.detect(matQ, kpQ);
    siftDet.detect(matDB, kpDB);

    Mat matDescriptorQ = new Mat(matQ.rows(), matQ.cols(), matQ.type());
    Mat matDescriptorDB = new Mat(matDB.rows(), matDB.cols(), matDB.type());

    siftExt.compute(matQ, kpQ, matDescriptorQ);
    siftExt.compute(matDB, kpDB, matDescriptorDB);

    MatOfDMatch matchs = new MatOfDMatch();

    DescriptorMatcher matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE);

    matcher.match(matDescriptorQ, matDescriptorDB, matchs);

    int N = 10;

    DMatch[] tmp01 = matchs.toArray();
    DMatch[] tmp02 = new DMatch[N];

    for (int i = 0; i < tmp02.length; i++) {
        tmp02[i] = tmp01[i];
    }

    matchs.fromArray(tmp02);

    Mat matchedImage = new Mat(matQ.rows(), matQ.cols() * 2, matQ.type());
    Features2d.drawMatches(matQ, kpQ, matDB, kpDB, matchs, matchedImage);

    Highgui.imwrite("./descriptedImageBySIFT.jpg", matchedImage);

}

From source file:saliency.saliency.java

/**
 * @param args the command line arguments
 *///  w  w  w . j  a  v  a  2 s. c o  m
public static void main(String[] args) {
    // TODO code application logic here

    System.loadLibrary(Core.NATIVE_LIBRARY_NAME);

    Mat input_img = imread("input_img/sea.jpg");
    //fot temp test start
    Imgproc.resize(input_img, input_img, new Size(1980, 1080), 0, 0, Imgproc.INTER_LINEAR);
    //fot temp test end
    if (input_img.cols() == 0) {
        return;
    }

    //benchmark
    ///////////////////////step 1 : Extraction of Early Visual Deatures///////////////////////////////
    //intensity image: intensity_img
    Mat intensity_img = new Mat(input_img.rows(), input_img.cols(), CV_16UC1);
    //intensity = (R+G+B)/3
    int img_width = intensity_img.cols();
    int img_height = intensity_img.rows();
    int x, y;
    int i, c, s;
    int max_intensity = 0;
    for (x = 0; x < img_width; x++) {
        for (y = 0; y < img_height; y++) {
            int temp_intensity = ((int) input_img.get(y, x)[0] + (int) input_img.get(y, x)[1]
                    + (int) input_img.get(y, x)[2]) / 3;
            intensity_img.put(y, x, temp_intensity);
            if (max_intensity < temp_intensity) {
                max_intensity = temp_intensity;
            }
        }
    }
    //create Guassian pyramid for intensity
    Mat[] i_gaussian_pyramid = new Mat[9];
    i_gaussian_pyramid[0] = intensity_img.clone();
    for (i = 0; i < 8; i++) {
        i_gaussian_pyramid[i + 1] = i_gaussian_pyramid[i].clone();
        Imgproc.pyrDown(i_gaussian_pyramid[i + 1], i_gaussian_pyramid[i + 1], new Size());
    }

    //create intensity feature map using center-surround differences
    Mat[][] intensity_feature_map = new Mat[3][2];
    for (c = 0; c < 3; c++) {
        for (s = 0; s < 2; s++) {
            intensity_feature_map[c][s] = center_surround.main(i_gaussian_pyramid[c + 2],
                    i_gaussian_pyramid[s + c + 5], 0);
        }
    }
    //benchmark
    //imwrite("intensity_feature_map_00.bmp", intensity_feature_map[0][0]);
    //get normalized color image by I.
    Mat norm_input_img = input_img.clone();
    norm_input_img.convertTo(norm_input_img, CV_64F);
    for (x = 0; x < img_width; x++) {
        for (y = 0; y < img_height; y++) {
            //normalization is only applied at the locations where I is larger than 1/10 of its maximum over entire image
            double[] temp = new double[3];
            if (intensity_img.get(y, x)[0] > (max_intensity / 10)) {
                temp[0] = norm_input_img.get(y, x)[0] / intensity_img.get(y, x)[0];
                temp[1] = norm_input_img.get(y, x)[1] / intensity_img.get(y, x)[0];
                temp[2] = norm_input_img.get(y, x)[2] / intensity_img.get(y, x)[0];
                norm_input_img.put(y, x, temp);
            } else {
                temp[0] = 0;
                temp[1] = 0;
                temp[2] = 0;
                norm_input_img.put(y, x, temp);
            }
        }
    }
    //get R G B Y(Yellow) single color channel images
    Mat r_img = new Mat(input_img.rows(), input_img.cols(), CV_64FC1);
    Mat g_img = new Mat(input_img.rows(), input_img.cols(), CV_64FC1);
    Mat b_img = new Mat(input_img.rows(), input_img.cols(), CV_64FC1);
    Mat y_img = new Mat(input_img.rows(), input_img.cols(), CV_64FC1);
    //[0]: b [1]:g [2]:r
    for (x = 0; x < img_width; x++) {
        for (y = 0; y < img_height; y++) {
            //R = min(0,r-(g+b)/2)
            double temp_chroma = max(0, (norm_input_img.get(y, x)[2]
                    - (norm_input_img.get(y, x)[1] + norm_input_img.get(y, x)[0]) / 2));
            r_img.put(y, x, temp_chroma);
            //G = max(0,g-(r+b)/2)
            temp_chroma = max(0, (norm_input_img.get(y, x)[1]
                    - (norm_input_img.get(y, x)[2] + norm_input_img.get(y, x)[0]) / 2));
            g_img.put(y, x, temp_chroma);
            //B = max(0,b-(r+g)/2)
            temp_chroma = max(0, (norm_input_img.get(y, x)[0]
                    - (norm_input_img.get(y, x)[2] + norm_input_img.get(y, x)[1]) / 2));
            b_img.put(y, x, temp_chroma);
            //Y = max(0,(r+g)/2-|r-g|/2-b)
            temp_chroma = max(0,
                    ((norm_input_img.get(y, x)[2] + norm_input_img.get(y, x)[1]) / 2
                            - abs(norm_input_img.get(y, x)[2] + norm_input_img.get(y, x)[1]) / 2
                            - norm_input_img.get(y, x)[0]));
            y_img.put(y, x, temp_chroma);
        }
    }
    //create Gaussian pyramid for 4 color channels
    Mat[] b_gaussian_pyramid = new Mat[9];
    b_gaussian_pyramid[0] = b_img.clone();
    for (i = 0; i < 8; i++) {
        b_gaussian_pyramid[i + 1] = b_gaussian_pyramid[i].clone();
        Imgproc.pyrDown(b_gaussian_pyramid[i + 1], b_gaussian_pyramid[i + 1], new Size());
    }
    Mat[] g_gaussian_pyramid = new Mat[9];
    g_gaussian_pyramid[0] = g_img.clone();
    for (i = 0; i < 8; i++) {
        g_gaussian_pyramid[i + 1] = g_gaussian_pyramid[i].clone();
        Imgproc.pyrDown(g_gaussian_pyramid[i + 1], g_gaussian_pyramid[i + 1], new Size());
    }
    Mat[] r_gaussian_pyramid = new Mat[9];
    r_gaussian_pyramid[0] = r_img.clone();
    for (i = 0; i < 8; i++) {
        r_gaussian_pyramid[i + 1] = r_gaussian_pyramid[i].clone();
        Imgproc.pyrDown(r_gaussian_pyramid[i + 1], r_gaussian_pyramid[i + 1], new Size());
    }
    Mat[] y_gaussian_pyramid = new Mat[9];
    y_gaussian_pyramid[0] = y_img.clone();
    for (i = 0; i < 8; i++) {
        y_gaussian_pyramid[i + 1] = y_gaussian_pyramid[i].clone();
        Imgproc.pyrDown(y_gaussian_pyramid[i + 1], y_gaussian_pyramid[i + 1], new Size());
    }
    //create color feature map using center-surround differences
    //RG(c,s) = |(R(c)-G(c))(-)(G(c)-R(c))|
    Mat[][] rg_feature_map = new Mat[3][2];
    for (c = 0; c < 3; c++) {
        for (s = 0; s < 2; s++) {
            Mat r_minus_g = r_gaussian_pyramid[c + 2].clone();
            Core.subtract(r_gaussian_pyramid[c + 2], g_gaussian_pyramid[c + 2], r_minus_g);
            Mat g_minus_r = g_gaussian_pyramid[s + c + 5].clone();
            Core.subtract(g_gaussian_pyramid[s + c + 5], r_gaussian_pyramid[s + c + 5], g_minus_r);
            rg_feature_map[c][s] = center_surround.main(r_minus_g, g_minus_r, 1);
        }
    }
    //BY(c,s) = |(B(c)-Y(c))(-)(Y(c)-B(c))|
    Mat[][] by_feature_map = new Mat[3][2];
    for (c = 0; c < 3; c++) {
        for (s = 0; s < 2; s++) {
            Mat b_minus_g = b_gaussian_pyramid[c + 2].clone();
            Core.subtract(b_gaussian_pyramid[c + 2], y_gaussian_pyramid[c + 2], b_minus_g);
            Mat y_minus_b = y_gaussian_pyramid[s + c + 5].clone();
            Core.subtract(y_gaussian_pyramid[s + c + 5], b_gaussian_pyramid[s + c + 5], y_minus_b);
            by_feature_map[c][s] = center_surround.main(b_minus_g, y_minus_b, 1);
        }
    }
    //benchmark
    //create oriented Gabor pyramid from intensity image
    int kernel_size = 10;//31;//adjust value according to reference
    double sigma = 3;//default:  = 0.56 .  the larger , the support of the Gabor function  and the number of visible parallel excitatory and inhibitory stripe zones increases.
    double[] theta = new double[4];
    theta[0] = 0;
    theta[1] = Math.PI / 4;
    theta[2] = Math.PI / 2;
    theta[3] = Math.PI * 3 / 4;
    double lambda = 5;//36; minimum 3
    double gamma = 0.5;//0.02;
    // double psi = 0;
    Mat[][] gabor_pyramid = new Mat[4][9];
    int theta_index;
    for (theta_index = 0; theta_index < 4; theta_index++) {
        Mat gabor_kernel = Imgproc.getGaborKernel(new Size(kernel_size, kernel_size), sigma, theta[theta_index],
                lambda, gamma);
        //gabor_pyramid[theta_index][0] = intensity_img.clone();
        for (i = 0; i < 9; i++) {
            //gabor_pyramid[theta_index][i] = gabor_pyramid[theta_index][i].clone();
            gabor_pyramid[theta_index][i] = i_gaussian_pyramid[i].clone();
            Imgproc.filter2D(i_gaussian_pyramid[i], gabor_pyramid[theta_index][i], -1, gabor_kernel);
            //Imgproc.resize(gabor_pyramid[theta_index][i], gabor_pyramid[theta_index][i], new Size(), 0.5, 0.5, Imgproc.INTER_AREA);
        }
    }
    //imwrite("gabor_pyramid_01.bmp", gabor_pyramid[0][1]);
    //imwrite("gabor_pyramid_11.bmp", gabor_pyramid[1][1]);
    //imwrite("gabor_pyramid_21.bmp", gabor_pyramid[2][1]);
    //imwrite("gabor_pyramid_31.bmp", gabor_pyramid[3][1]);
    //imwrite("gabor_pyramid_03.bmp", gabor_pyramid[0][3]);
    //get orientation feature map using center-surround differences
    Mat[][][] orientation_feature_map = new Mat[4][3][2];
    for (theta_index = 0; theta_index < 4; theta_index++) {
        for (c = 0; c < 3; c++) {
            for (s = 0; s < 2; s++) {
                orientation_feature_map[theta_index][c][s] = center_surround
                        .main(gabor_pyramid[theta_index][c + 2], gabor_pyramid[theta_index][s + c + 5], 0);
            }
        }
    }
    //benchmark
    //imwrite("orientation_test_00.bmp", orientation_feature_map[0][0][0]);
    ///////////////////////step 2 : the saliency map///////////////////////////////
    //get intensity conspicuity map
    Mat intensity_conspicuity_map = Mat.zeros(intensity_feature_map[2][0].size(), CV_16UC1);
    for (c = 0; c < 3; c++) {
        for (s = 0; s < 2; s++) {
            Mat norm_out = map_norm.main(intensity_feature_map[c][s]);
            Mat resized_feature_map = Mat.zeros(intensity_feature_map[2][0].size(), CV_16UC1);
            Imgproc.resize(norm_out, resized_feature_map, intensity_feature_map[2][0].size(), 0, 0,
                    Imgproc.INTER_LINEAR);
            Core.addWeighted(intensity_conspicuity_map, 1, resized_feature_map, 1.0 / 6, 0,
                    intensity_conspicuity_map);
            /*if(c == 0 && s == 0){
            imwrite("in.bmp", intensity_feature_map[c][s]);
            imwrite("map_norm.bmp",norm_out);
            imwrite("resized_feature_map.bmp", resized_feature_map);
            }*/
        }
    }
    //benchmark
    //Core.normalize(intensity_conspicuity_map, intensity_conspicuity_map, 0, 255, Core.NORM_MINMAX);
    //imwrite("intensity_conspicuity_map.bmp", intensity_conspicuity_map);
    //get color conspicuity map
    for (c = 0; c < 3; c++) {
        for (s = 0; s < 2; s++) {
            Core.normalize(rg_feature_map[c][s], rg_feature_map[c][s], 0, 255, Core.NORM_MINMAX);
            rg_feature_map[c][s].convertTo(rg_feature_map[c][s], CV_16UC1);
            Core.normalize(by_feature_map[c][s], by_feature_map[c][s], 0, 255, Core.NORM_MINMAX);
            by_feature_map[c][s].convertTo(by_feature_map[c][s], CV_16UC1);
        }
    }
    //imwrite("test_rg.bmp",rg_feature_map[0][0]);      
    Mat color_conspicuity_map = Mat.zeros(rg_feature_map[2][0].size(), CV_16UC1);
    for (c = 0; c < 3; c++) {
        for (s = 0; s < 2; s++) {
            Mat norm_out = map_norm.main(rg_feature_map[c][s]);
            Mat resized_feature_map = Mat.zeros(rg_feature_map[2][0].size(), CV_16UC1);
            Imgproc.resize(norm_out, resized_feature_map, rg_feature_map[2][0].size(), 0, 0,
                    Imgproc.INTER_LINEAR);
            Core.addWeighted(color_conspicuity_map, 1, resized_feature_map, 1.0 / 12, 0, color_conspicuity_map);
            norm_out = map_norm.main(by_feature_map[c][s]);
            resized_feature_map = Mat.zeros(by_feature_map[2][0].size(), CV_16UC1);
            Imgproc.resize(norm_out, resized_feature_map, by_feature_map[2][0].size(), 0, 0,
                    Imgproc.INTER_LINEAR);
            Core.addWeighted(color_conspicuity_map, 1, resized_feature_map, 1.0 / 12, 0, color_conspicuity_map);
        }
    }
    //benchmark
    //get orientation conspicuity map
    Mat orientation_conspicuity_map_0 = Mat.zeros(orientation_feature_map[0][2][0].size(), CV_16UC1);
    for (c = 0; c < 3; c++) {
        for (s = 0; s < 2; s++) {
            Mat norm_out = map_norm.main(orientation_feature_map[0][c][s]);
            Mat resized_feature_map = Mat.zeros(orientation_feature_map[0][2][0].size(), CV_16UC1);
            Imgproc.resize(norm_out, resized_feature_map, orientation_feature_map[0][2][0].size(), 0, 0,
                    Imgproc.INTER_LINEAR);
            Core.addWeighted(orientation_conspicuity_map_0, 1, resized_feature_map, 1.0 / 6, 0,
                    orientation_conspicuity_map_0);
        }
    }

    Mat orientation_conspicuity_map_1 = Mat.zeros(orientation_feature_map[1][2][0].size(), CV_16UC1);
    for (c = 0; c < 3; c++) {
        for (s = 0; s < 2; s++) {
            Mat norm_out = map_norm.main(orientation_feature_map[1][c][s]);
            Mat resized_feature_map = Mat.zeros(orientation_feature_map[1][2][0].size(), CV_16UC1);
            Imgproc.resize(norm_out, resized_feature_map, orientation_feature_map[1][2][0].size(), 0, 0,
                    Imgproc.INTER_LINEAR);
            Core.addWeighted(orientation_conspicuity_map_1, 1, resized_feature_map, 1.0 / 6, 0,
                    orientation_conspicuity_map_1);
        }
    }
    Mat orientation_conspicuity_map_2 = Mat.zeros(orientation_feature_map[2][2][0].size(), CV_16UC1);
    for (c = 0; c < 3; c++) {
        for (s = 0; s < 2; s++) {
            Mat norm_out = map_norm.main(orientation_feature_map[2][c][s]);
            Mat resized_feature_map = Mat.zeros(orientation_feature_map[2][2][0].size(), CV_16UC1);
            Imgproc.resize(norm_out, resized_feature_map, orientation_feature_map[2][2][0].size(), 0, 0,
                    Imgproc.INTER_LINEAR);
            Core.addWeighted(orientation_conspicuity_map_2, 1, resized_feature_map, 1.0 / 6, 0,
                    orientation_conspicuity_map_2);
        }
    }
    Mat orientation_conspicuity_map_3 = Mat.zeros(orientation_feature_map[3][2][0].size(), CV_16UC1);
    for (c = 0; c < 3; c++) {
        for (s = 0; s < 2; s++) {
            Mat norm_out = map_norm.main(orientation_feature_map[3][c][s]);
            Mat resized_feature_map = Mat.zeros(orientation_feature_map[3][2][0].size(), CV_16UC1);
            Imgproc.resize(norm_out, resized_feature_map, orientation_feature_map[3][2][0].size(), 0, 0,
                    Imgproc.INTER_LINEAR);
            Core.addWeighted(orientation_conspicuity_map_3, 1, resized_feature_map, 1.0 / 6, 0,
                    orientation_conspicuity_map_3);
        }
    }
    Mat orientation_conspicuity_map = Mat.zeros(orientation_feature_map[0][2][0].size(), CV_16UC1);
    Core.addWeighted(orientation_conspicuity_map, 1, map_norm.main(orientation_conspicuity_map_0), 1.0 / 4, 0,
            orientation_conspicuity_map);
    Core.addWeighted(orientation_conspicuity_map, 1, map_norm.main(orientation_conspicuity_map_1), 1.0 / 4, 0,
            orientation_conspicuity_map);
    Core.addWeighted(orientation_conspicuity_map, 1, map_norm.main(orientation_conspicuity_map_2), 1.0 / 4, 0,
            orientation_conspicuity_map);
    Core.addWeighted(orientation_conspicuity_map, 1, map_norm.main(orientation_conspicuity_map_3), 1.0 / 4, 0,
            orientation_conspicuity_map);
    //benchmark
    Mat saliency = Mat.zeros(intensity_conspicuity_map.size(), CV_16UC1);
    Core.addWeighted(saliency, 1, map_norm.main(intensity_conspicuity_map), 1.0 / 3, 0, saliency);
    Core.addWeighted(saliency, 1, map_norm.main(color_conspicuity_map), 1.0 / 3, 0, saliency);
    Core.addWeighted(saliency, 1, map_norm.main(orientation_conspicuity_map), 1.0 / 3, 0, saliency);
    //benchmark
    Core.normalize(saliency, saliency, 0, 255, Core.NORM_MINMAX);
    //fot temp test
    Imgproc.resize(saliency, saliency, new Size(720, 480), 0, 0, Imgproc.INTER_LINEAR);
    imwrite("saliency.bmp", saliency);

    Core.normalize(intensity_conspicuity_map, intensity_conspicuity_map, 0, 255, Core.NORM_MINMAX);
    Imgproc.resize(intensity_conspicuity_map, intensity_conspicuity_map, new Size(720, 480), 0, 0,
            Imgproc.INTER_LINEAR);
    imwrite("intensity_conspicuity_map.bmp", intensity_conspicuity_map);
    Core.normalize(color_conspicuity_map, color_conspicuity_map, 0, 255, Core.NORM_MINMAX);
    Imgproc.resize(color_conspicuity_map, color_conspicuity_map, new Size(720, 480), 0, 0,
            Imgproc.INTER_LINEAR);
    imwrite("color_conspicuity_map.bmp", color_conspicuity_map);
    Core.normalize(orientation_conspicuity_map, orientation_conspicuity_map, 0, 255, Core.NORM_MINMAX);
    Imgproc.resize(orientation_conspicuity_map, orientation_conspicuity_map, new Size(720, 480), 0, 0,
            Imgproc.INTER_LINEAR);
    imwrite("orientation_conspicuity_map.bmp", orientation_conspicuity_map);
    Imgproc.resize(input_img, input_img, new Size(720, 480), 0, 0, Imgproc.INTER_LINEAR);
    imwrite("input_img.bmp", input_img);
    //for testing algorithm
    /*
    Mat temp1 = Mat.zeros(intensity_conspicuity_map.size(), CV_16UC1);
    temp1 = map_norm.main(intensity_conspicuity_map);
    Core.normalize(temp1, temp1, 0, 255, Core.NORM_MINMAX);
    Imgproc.resize(temp1, temp1, new Size(720,480), 0, 0, Imgproc.INTER_LINEAR);
    imwrite("intensity.bmp", temp1);
    temp1 = map_norm.main(color_conspicuity_map);
    Core.normalize(temp1, temp1, 0, 255, Core.NORM_MINMAX);
    Imgproc.resize(temp1, temp1, new Size(720,480), 0, 0, Imgproc.INTER_LINEAR);
    imwrite("color.bmp", temp1);
    temp1 = map_norm.main(orientation_conspicuity_map);
    Core.normalize(temp1, temp1, 0, 255, Core.NORM_MINMAX);
    Imgproc.resize(temp1, temp1, new Size(720,480), 0, 0, Imgproc.INTER_LINEAR);
    imwrite("orientation.bmp", temp1);
            
    Mat temp2 = Mat.zeros(orientation_conspicuity_map_0.size(), CV_16UC1);
    temp2 = map_norm.main(orientation_conspicuity_map_0);
    Core.normalize(temp2, temp2, 0, 255, Core.NORM_MINMAX);
    Imgproc.resize(temp2, temp2, new Size(720,480), 0, 0, Imgproc.INTER_LINEAR);
    imwrite("orientation_conspicuity_map_0.bmp", temp2);
    temp2 = map_norm.main(orientation_conspicuity_map_1);
    Core.normalize(temp2, temp2, 0, 255, Core.NORM_MINMAX);
    Imgproc.resize(temp2, temp2, new Size(720,480), 0, 0, Imgproc.INTER_LINEAR);
    imwrite("orientation_conspicuity_map_1.bmp", temp2);
    temp2 = map_norm.main(orientation_conspicuity_map_2);
    Core.normalize(temp2, temp2, 0, 255, Core.NORM_MINMAX);
    Imgproc.resize(temp2, temp2, new Size(720,480), 0, 0, Imgproc.INTER_LINEAR);
    imwrite("orientation_conspicuity_map_2.bmp", temp2);
    temp2 = map_norm.main(orientation_conspicuity_map_3);
    Core.normalize(temp2, temp2, 0, 255, Core.NORM_MINMAX);
    Imgproc.resize(temp2, temp2, new Size(720,480), 0, 0, Imgproc.INTER_LINEAR);
    imwrite("orientation_conspicuity_map_3.bmp", temp2);
    */
}

From source file:src.model.filters.DotsFilter.java

public void doGet(HttpServletRequest request, HttpServletResponse response)
        throws ServletException, IOException {
    System.out.println("**______________DOTS_______________**");

    try {/*  w  w  w.  j  av a  2 s  . c  o m*/

        String imgInput = request.getParameter("name").toString();
        String savePath = savePath(request);
        //____________________________________
        int elementSize = 2;
        int bsize = 10;
        Mat source = Imgcodecs.imread(savePath);

        Mat dst = zeros(source.size(), CV_8UC3);
        Mat cir = zeros(source.size(), CV_8UC1);
        Mat destination = new Mat(source.rows(), source.cols(), source.type());
        Mat element = Imgproc.getStructuringElement(Imgproc.CV_SHAPE_RECT,
                new Size(elementSize * 3 + 1, elementSize * 3 + 1), new Point(elementSize, elementSize));

        for (int i = 0; i < source.rows(); i += bsize) {
            for (int j = 0; j < source.cols(); j += bsize) {

                circle(cir, new Point(j + bsize / (double) 2, i + bsize / (double) 2), bsize / 2 - 1,
                        new Scalar(255, 255, 255), -1, -1, Core.LINE_AA);

            }
        }

        Imgproc.morphologyEx(source, dst, Imgproc.MORPH_CLOSE, element);

        Mat cir_32f = new Mat(source.rows(), source.cols(), CV_32F);
        cir.convertTo(cir_32f, CV_32F);
        normalize(cir_32f, cir_32f, 0, 1, NORM_MINMAX);

        Mat dst_32f = new Mat(source.rows(), source.cols(), CV_32F);
        dst.convertTo(dst_32f, CV_32F);

        Vector<Mat> channels = new Vector();
        split(dst_32f, channels);
        System.out.println(channels.size());
        for (int i = 0; i < channels.size(); ++i) {
            channels.set(i, channels.get(i).mul(cir_32f));
        }
        merge(channels, dst_32f);
        dst_32f.convertTo(dst, CV_8U);

        // Core.gemm(source, source, bsize, source, bsize, dst);
        // Core.gemm(cir, destination, 1, new Mat(), 0,dst , 0);
        //            Imgcodecs.imwrite("images\\outddput.jpg", dst);
        String output = savePath.substring(0, savePath.lastIndexOf(".")) + "_DOTS_temp.jpg";
        imgInput = request.getParameter("name").toString();
        String imgOutput = imgInput.substring(0, imgInput.lastIndexOf(".")) + "_DOTS_temp.jpg";
        Imgcodecs.imwrite(output, dst);

        //____________________________________
        System.out.println("output: " + output);
        System.out.println("imgOutput: " + imgOutput);

        publishImg(response, imgOutput);

    } catch (Exception e) {
        System.out.println("Error: " + e.getMessage());
    }
}

From source file:udp.server.ObjectTracker.java

private void trackColors() {

    while (true) {
        //capture.read(webcam_image);  
        //System.out.println(this.camCap.getFrame().size());
        webcam_image = this.camCap.getFrame().clone();
        if (!webcam_image.empty()) {

            //Adjusting brightness and contrast
            webcam_image.convertTo(webcam_image, -1, brightness, contrast);

            //Adding blur to remove noise
            Imgproc.blur(webcam_image, webcam_image, new Size(7, 7));

            // converting to HSV image
            Imgproc.cvtColor(webcam_image, hsv_image, Imgproc.COLOR_BGR2HSV);

            //Checking if the hsv image is in range.
            Core.inRange(hsv_image, hsv_min, hsv_max, thresholded);

            Imgproc.erode(thresholded, thresholded,
                    Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(8, 8)));
            Imgproc.dilate(thresholded, thresholded,
                    Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(8, 8)));
            Core.split(hsv_image, lhsv); // We get 3 2D one channel Mats  
            Mat S = lhsv.get(1);
            Mat V = lhsv.get(2);//from  www. j  a v a 2  s.c  o  m
            Core.subtract(array255, S, S);
            Core.subtract(array255, V, V);
            S.convertTo(S, CvType.CV_32F);
            V.convertTo(V, CvType.CV_32F);
            Core.magnitude(S, V, distance);
            Core.inRange(distance, new Scalar(0.0), new Scalar(200.0), thresholded2);

            Imgproc.GaussianBlur(thresholded, thresholded, new Size(9, 9), 0, 0);
            Imgproc.HoughCircles(thresholded, circles, Imgproc.CV_HOUGH_GRADIENT, 2, thresholded.height() / 8,
                    200, 100, 0, 0);
            Imgproc.findContours(thresholded, contours, thresholded2, Imgproc.RETR_LIST,
                    Imgproc.CHAIN_APPROX_SIMPLE);
            //------Imgproc.drawContours(webcam_image, contours, -1, new Scalar(255, 0, 0), 2);   

            //------Core.circle(webcam_image, new Point(210,210), 10, new Scalar(100,10,10),3);  
            data = webcam_image.get(210, 210);
            //------Core.putText(webcam_image,String.format("("+String.valueOf(data[0])+","+String.valueOf(data[1])+","+String.valueOf(data[2])+")"),new Point(30, 30) , 3 //FONT_HERSHEY_SCRIPT_SIMPLEX  
            //------   ,1.0,new Scalar(100,10,10,255),3); 
            //ArrayList<Float> errorAngles = new ArrayList<>();
            ArrayList<Float> errorAngles = getTargetError();

            if (errorAngles != null) {

                try {
                    semaphore.acquire();
                } catch (InterruptedException ex) {
                    Logger.getLogger(ObjectTracker.class.getName()).log(Level.SEVERE, null, ex);
                }
                float eXa = (errorAngles.get(0));
                float eYa = (errorAngles.get(1));
                this.dh.setPixyXvalue(eXa);
                this.dh.setPixyYvalue(eYa);

                //System.out.print("AngleErrorX: "+errorAngles.get(0));
                //System.out.println("       AngleErrorY: "+ errorAngles.get(1));

                semaphore.release();

            }
        }

        /*else {
                
        System.out.println(" --(!) No captured frame -- Break!"); 
        } */
    }
}