Example usage for org.opencv.core Mat rows

List of usage examples for org.opencv.core Mat rows

Introduction

In this page you can find the example usage for org.opencv.core Mat rows.

Prototype

public int rows() 

Source Link

Usage

From source file:ru.caramel.juniperbot.web.service.OpenCVService.java

License:Open Source License

public BufferedImage blur(BufferedImage source, int radius) throws IOException {
    if (!initialized) {
        throw new IOException("OpenCV unavailable");
    }/*from   ww w  .j  av a  2  s .  c o m*/
    Mat sourceMat = getMat(source);
    Mat destination = new Mat(sourceMat.rows(), sourceMat.cols(), sourceMat.type());
    Imgproc.GaussianBlur(sourceMat, destination, new Size(radius, radius), 0);
    return getImage(destination);
}

From source file:saliency.saliency.java

/**
 * @param args the command line arguments
 *///  ww  w  .  j ava 2  s  .  c  o  m
public static void main(String[] args) {
    // TODO code application logic here

    System.loadLibrary(Core.NATIVE_LIBRARY_NAME);

    Mat input_img = imread("input_img/sea.jpg");
    //fot temp test start
    Imgproc.resize(input_img, input_img, new Size(1980, 1080), 0, 0, Imgproc.INTER_LINEAR);
    //fot temp test end
    if (input_img.cols() == 0) {
        return;
    }

    //benchmark
    ///////////////////////step 1 : Extraction of Early Visual Deatures///////////////////////////////
    //intensity image: intensity_img
    Mat intensity_img = new Mat(input_img.rows(), input_img.cols(), CV_16UC1);
    //intensity = (R+G+B)/3
    int img_width = intensity_img.cols();
    int img_height = intensity_img.rows();
    int x, y;
    int i, c, s;
    int max_intensity = 0;
    for (x = 0; x < img_width; x++) {
        for (y = 0; y < img_height; y++) {
            int temp_intensity = ((int) input_img.get(y, x)[0] + (int) input_img.get(y, x)[1]
                    + (int) input_img.get(y, x)[2]) / 3;
            intensity_img.put(y, x, temp_intensity);
            if (max_intensity < temp_intensity) {
                max_intensity = temp_intensity;
            }
        }
    }
    //create Guassian pyramid for intensity
    Mat[] i_gaussian_pyramid = new Mat[9];
    i_gaussian_pyramid[0] = intensity_img.clone();
    for (i = 0; i < 8; i++) {
        i_gaussian_pyramid[i + 1] = i_gaussian_pyramid[i].clone();
        Imgproc.pyrDown(i_gaussian_pyramid[i + 1], i_gaussian_pyramid[i + 1], new Size());
    }

    //create intensity feature map using center-surround differences
    Mat[][] intensity_feature_map = new Mat[3][2];
    for (c = 0; c < 3; c++) {
        for (s = 0; s < 2; s++) {
            intensity_feature_map[c][s] = center_surround.main(i_gaussian_pyramid[c + 2],
                    i_gaussian_pyramid[s + c + 5], 0);
        }
    }
    //benchmark
    //imwrite("intensity_feature_map_00.bmp", intensity_feature_map[0][0]);
    //get normalized color image by I.
    Mat norm_input_img = input_img.clone();
    norm_input_img.convertTo(norm_input_img, CV_64F);
    for (x = 0; x < img_width; x++) {
        for (y = 0; y < img_height; y++) {
            //normalization is only applied at the locations where I is larger than 1/10 of its maximum over entire image
            double[] temp = new double[3];
            if (intensity_img.get(y, x)[0] > (max_intensity / 10)) {
                temp[0] = norm_input_img.get(y, x)[0] / intensity_img.get(y, x)[0];
                temp[1] = norm_input_img.get(y, x)[1] / intensity_img.get(y, x)[0];
                temp[2] = norm_input_img.get(y, x)[2] / intensity_img.get(y, x)[0];
                norm_input_img.put(y, x, temp);
            } else {
                temp[0] = 0;
                temp[1] = 0;
                temp[2] = 0;
                norm_input_img.put(y, x, temp);
            }
        }
    }
    //get R G B Y(Yellow) single color channel images
    Mat r_img = new Mat(input_img.rows(), input_img.cols(), CV_64FC1);
    Mat g_img = new Mat(input_img.rows(), input_img.cols(), CV_64FC1);
    Mat b_img = new Mat(input_img.rows(), input_img.cols(), CV_64FC1);
    Mat y_img = new Mat(input_img.rows(), input_img.cols(), CV_64FC1);
    //[0]: b [1]:g [2]:r
    for (x = 0; x < img_width; x++) {
        for (y = 0; y < img_height; y++) {
            //R = min(0,r-(g+b)/2)
            double temp_chroma = max(0, (norm_input_img.get(y, x)[2]
                    - (norm_input_img.get(y, x)[1] + norm_input_img.get(y, x)[0]) / 2));
            r_img.put(y, x, temp_chroma);
            //G = max(0,g-(r+b)/2)
            temp_chroma = max(0, (norm_input_img.get(y, x)[1]
                    - (norm_input_img.get(y, x)[2] + norm_input_img.get(y, x)[0]) / 2));
            g_img.put(y, x, temp_chroma);
            //B = max(0,b-(r+g)/2)
            temp_chroma = max(0, (norm_input_img.get(y, x)[0]
                    - (norm_input_img.get(y, x)[2] + norm_input_img.get(y, x)[1]) / 2));
            b_img.put(y, x, temp_chroma);
            //Y = max(0,(r+g)/2-|r-g|/2-b)
            temp_chroma = max(0,
                    ((norm_input_img.get(y, x)[2] + norm_input_img.get(y, x)[1]) / 2
                            - abs(norm_input_img.get(y, x)[2] + norm_input_img.get(y, x)[1]) / 2
                            - norm_input_img.get(y, x)[0]));
            y_img.put(y, x, temp_chroma);
        }
    }
    //create Gaussian pyramid for 4 color channels
    Mat[] b_gaussian_pyramid = new Mat[9];
    b_gaussian_pyramid[0] = b_img.clone();
    for (i = 0; i < 8; i++) {
        b_gaussian_pyramid[i + 1] = b_gaussian_pyramid[i].clone();
        Imgproc.pyrDown(b_gaussian_pyramid[i + 1], b_gaussian_pyramid[i + 1], new Size());
    }
    Mat[] g_gaussian_pyramid = new Mat[9];
    g_gaussian_pyramid[0] = g_img.clone();
    for (i = 0; i < 8; i++) {
        g_gaussian_pyramid[i + 1] = g_gaussian_pyramid[i].clone();
        Imgproc.pyrDown(g_gaussian_pyramid[i + 1], g_gaussian_pyramid[i + 1], new Size());
    }
    Mat[] r_gaussian_pyramid = new Mat[9];
    r_gaussian_pyramid[0] = r_img.clone();
    for (i = 0; i < 8; i++) {
        r_gaussian_pyramid[i + 1] = r_gaussian_pyramid[i].clone();
        Imgproc.pyrDown(r_gaussian_pyramid[i + 1], r_gaussian_pyramid[i + 1], new Size());
    }
    Mat[] y_gaussian_pyramid = new Mat[9];
    y_gaussian_pyramid[0] = y_img.clone();
    for (i = 0; i < 8; i++) {
        y_gaussian_pyramid[i + 1] = y_gaussian_pyramid[i].clone();
        Imgproc.pyrDown(y_gaussian_pyramid[i + 1], y_gaussian_pyramid[i + 1], new Size());
    }
    //create color feature map using center-surround differences
    //RG(c,s) = |(R(c)-G(c))(-)(G(c)-R(c))|
    Mat[][] rg_feature_map = new Mat[3][2];
    for (c = 0; c < 3; c++) {
        for (s = 0; s < 2; s++) {
            Mat r_minus_g = r_gaussian_pyramid[c + 2].clone();
            Core.subtract(r_gaussian_pyramid[c + 2], g_gaussian_pyramid[c + 2], r_minus_g);
            Mat g_minus_r = g_gaussian_pyramid[s + c + 5].clone();
            Core.subtract(g_gaussian_pyramid[s + c + 5], r_gaussian_pyramid[s + c + 5], g_minus_r);
            rg_feature_map[c][s] = center_surround.main(r_minus_g, g_minus_r, 1);
        }
    }
    //BY(c,s) = |(B(c)-Y(c))(-)(Y(c)-B(c))|
    Mat[][] by_feature_map = new Mat[3][2];
    for (c = 0; c < 3; c++) {
        for (s = 0; s < 2; s++) {
            Mat b_minus_g = b_gaussian_pyramid[c + 2].clone();
            Core.subtract(b_gaussian_pyramid[c + 2], y_gaussian_pyramid[c + 2], b_minus_g);
            Mat y_minus_b = y_gaussian_pyramid[s + c + 5].clone();
            Core.subtract(y_gaussian_pyramid[s + c + 5], b_gaussian_pyramid[s + c + 5], y_minus_b);
            by_feature_map[c][s] = center_surround.main(b_minus_g, y_minus_b, 1);
        }
    }
    //benchmark
    //create oriented Gabor pyramid from intensity image
    int kernel_size = 10;//31;//adjust value according to reference
    double sigma = 3;//default:  = 0.56 .  the larger , the support of the Gabor function  and the number of visible parallel excitatory and inhibitory stripe zones increases.
    double[] theta = new double[4];
    theta[0] = 0;
    theta[1] = Math.PI / 4;
    theta[2] = Math.PI / 2;
    theta[3] = Math.PI * 3 / 4;
    double lambda = 5;//36; minimum 3
    double gamma = 0.5;//0.02;
    // double psi = 0;
    Mat[][] gabor_pyramid = new Mat[4][9];
    int theta_index;
    for (theta_index = 0; theta_index < 4; theta_index++) {
        Mat gabor_kernel = Imgproc.getGaborKernel(new Size(kernel_size, kernel_size), sigma, theta[theta_index],
                lambda, gamma);
        //gabor_pyramid[theta_index][0] = intensity_img.clone();
        for (i = 0; i < 9; i++) {
            //gabor_pyramid[theta_index][i] = gabor_pyramid[theta_index][i].clone();
            gabor_pyramid[theta_index][i] = i_gaussian_pyramid[i].clone();
            Imgproc.filter2D(i_gaussian_pyramid[i], gabor_pyramid[theta_index][i], -1, gabor_kernel);
            //Imgproc.resize(gabor_pyramid[theta_index][i], gabor_pyramid[theta_index][i], new Size(), 0.5, 0.5, Imgproc.INTER_AREA);
        }
    }
    //imwrite("gabor_pyramid_01.bmp", gabor_pyramid[0][1]);
    //imwrite("gabor_pyramid_11.bmp", gabor_pyramid[1][1]);
    //imwrite("gabor_pyramid_21.bmp", gabor_pyramid[2][1]);
    //imwrite("gabor_pyramid_31.bmp", gabor_pyramid[3][1]);
    //imwrite("gabor_pyramid_03.bmp", gabor_pyramid[0][3]);
    //get orientation feature map using center-surround differences
    Mat[][][] orientation_feature_map = new Mat[4][3][2];
    for (theta_index = 0; theta_index < 4; theta_index++) {
        for (c = 0; c < 3; c++) {
            for (s = 0; s < 2; s++) {
                orientation_feature_map[theta_index][c][s] = center_surround
                        .main(gabor_pyramid[theta_index][c + 2], gabor_pyramid[theta_index][s + c + 5], 0);
            }
        }
    }
    //benchmark
    //imwrite("orientation_test_00.bmp", orientation_feature_map[0][0][0]);
    ///////////////////////step 2 : the saliency map///////////////////////////////
    //get intensity conspicuity map
    Mat intensity_conspicuity_map = Mat.zeros(intensity_feature_map[2][0].size(), CV_16UC1);
    for (c = 0; c < 3; c++) {
        for (s = 0; s < 2; s++) {
            Mat norm_out = map_norm.main(intensity_feature_map[c][s]);
            Mat resized_feature_map = Mat.zeros(intensity_feature_map[2][0].size(), CV_16UC1);
            Imgproc.resize(norm_out, resized_feature_map, intensity_feature_map[2][0].size(), 0, 0,
                    Imgproc.INTER_LINEAR);
            Core.addWeighted(intensity_conspicuity_map, 1, resized_feature_map, 1.0 / 6, 0,
                    intensity_conspicuity_map);
            /*if(c == 0 && s == 0){
            imwrite("in.bmp", intensity_feature_map[c][s]);
            imwrite("map_norm.bmp",norm_out);
            imwrite("resized_feature_map.bmp", resized_feature_map);
            }*/
        }
    }
    //benchmark
    //Core.normalize(intensity_conspicuity_map, intensity_conspicuity_map, 0, 255, Core.NORM_MINMAX);
    //imwrite("intensity_conspicuity_map.bmp", intensity_conspicuity_map);
    //get color conspicuity map
    for (c = 0; c < 3; c++) {
        for (s = 0; s < 2; s++) {
            Core.normalize(rg_feature_map[c][s], rg_feature_map[c][s], 0, 255, Core.NORM_MINMAX);
            rg_feature_map[c][s].convertTo(rg_feature_map[c][s], CV_16UC1);
            Core.normalize(by_feature_map[c][s], by_feature_map[c][s], 0, 255, Core.NORM_MINMAX);
            by_feature_map[c][s].convertTo(by_feature_map[c][s], CV_16UC1);
        }
    }
    //imwrite("test_rg.bmp",rg_feature_map[0][0]);      
    Mat color_conspicuity_map = Mat.zeros(rg_feature_map[2][0].size(), CV_16UC1);
    for (c = 0; c < 3; c++) {
        for (s = 0; s < 2; s++) {
            Mat norm_out = map_norm.main(rg_feature_map[c][s]);
            Mat resized_feature_map = Mat.zeros(rg_feature_map[2][0].size(), CV_16UC1);
            Imgproc.resize(norm_out, resized_feature_map, rg_feature_map[2][0].size(), 0, 0,
                    Imgproc.INTER_LINEAR);
            Core.addWeighted(color_conspicuity_map, 1, resized_feature_map, 1.0 / 12, 0, color_conspicuity_map);
            norm_out = map_norm.main(by_feature_map[c][s]);
            resized_feature_map = Mat.zeros(by_feature_map[2][0].size(), CV_16UC1);
            Imgproc.resize(norm_out, resized_feature_map, by_feature_map[2][0].size(), 0, 0,
                    Imgproc.INTER_LINEAR);
            Core.addWeighted(color_conspicuity_map, 1, resized_feature_map, 1.0 / 12, 0, color_conspicuity_map);
        }
    }
    //benchmark
    //get orientation conspicuity map
    Mat orientation_conspicuity_map_0 = Mat.zeros(orientation_feature_map[0][2][0].size(), CV_16UC1);
    for (c = 0; c < 3; c++) {
        for (s = 0; s < 2; s++) {
            Mat norm_out = map_norm.main(orientation_feature_map[0][c][s]);
            Mat resized_feature_map = Mat.zeros(orientation_feature_map[0][2][0].size(), CV_16UC1);
            Imgproc.resize(norm_out, resized_feature_map, orientation_feature_map[0][2][0].size(), 0, 0,
                    Imgproc.INTER_LINEAR);
            Core.addWeighted(orientation_conspicuity_map_0, 1, resized_feature_map, 1.0 / 6, 0,
                    orientation_conspicuity_map_0);
        }
    }

    Mat orientation_conspicuity_map_1 = Mat.zeros(orientation_feature_map[1][2][0].size(), CV_16UC1);
    for (c = 0; c < 3; c++) {
        for (s = 0; s < 2; s++) {
            Mat norm_out = map_norm.main(orientation_feature_map[1][c][s]);
            Mat resized_feature_map = Mat.zeros(orientation_feature_map[1][2][0].size(), CV_16UC1);
            Imgproc.resize(norm_out, resized_feature_map, orientation_feature_map[1][2][0].size(), 0, 0,
                    Imgproc.INTER_LINEAR);
            Core.addWeighted(orientation_conspicuity_map_1, 1, resized_feature_map, 1.0 / 6, 0,
                    orientation_conspicuity_map_1);
        }
    }
    Mat orientation_conspicuity_map_2 = Mat.zeros(orientation_feature_map[2][2][0].size(), CV_16UC1);
    for (c = 0; c < 3; c++) {
        for (s = 0; s < 2; s++) {
            Mat norm_out = map_norm.main(orientation_feature_map[2][c][s]);
            Mat resized_feature_map = Mat.zeros(orientation_feature_map[2][2][0].size(), CV_16UC1);
            Imgproc.resize(norm_out, resized_feature_map, orientation_feature_map[2][2][0].size(), 0, 0,
                    Imgproc.INTER_LINEAR);
            Core.addWeighted(orientation_conspicuity_map_2, 1, resized_feature_map, 1.0 / 6, 0,
                    orientation_conspicuity_map_2);
        }
    }
    Mat orientation_conspicuity_map_3 = Mat.zeros(orientation_feature_map[3][2][0].size(), CV_16UC1);
    for (c = 0; c < 3; c++) {
        for (s = 0; s < 2; s++) {
            Mat norm_out = map_norm.main(orientation_feature_map[3][c][s]);
            Mat resized_feature_map = Mat.zeros(orientation_feature_map[3][2][0].size(), CV_16UC1);
            Imgproc.resize(norm_out, resized_feature_map, orientation_feature_map[3][2][0].size(), 0, 0,
                    Imgproc.INTER_LINEAR);
            Core.addWeighted(orientation_conspicuity_map_3, 1, resized_feature_map, 1.0 / 6, 0,
                    orientation_conspicuity_map_3);
        }
    }
    Mat orientation_conspicuity_map = Mat.zeros(orientation_feature_map[0][2][0].size(), CV_16UC1);
    Core.addWeighted(orientation_conspicuity_map, 1, map_norm.main(orientation_conspicuity_map_0), 1.0 / 4, 0,
            orientation_conspicuity_map);
    Core.addWeighted(orientation_conspicuity_map, 1, map_norm.main(orientation_conspicuity_map_1), 1.0 / 4, 0,
            orientation_conspicuity_map);
    Core.addWeighted(orientation_conspicuity_map, 1, map_norm.main(orientation_conspicuity_map_2), 1.0 / 4, 0,
            orientation_conspicuity_map);
    Core.addWeighted(orientation_conspicuity_map, 1, map_norm.main(orientation_conspicuity_map_3), 1.0 / 4, 0,
            orientation_conspicuity_map);
    //benchmark
    Mat saliency = Mat.zeros(intensity_conspicuity_map.size(), CV_16UC1);
    Core.addWeighted(saliency, 1, map_norm.main(intensity_conspicuity_map), 1.0 / 3, 0, saliency);
    Core.addWeighted(saliency, 1, map_norm.main(color_conspicuity_map), 1.0 / 3, 0, saliency);
    Core.addWeighted(saliency, 1, map_norm.main(orientation_conspicuity_map), 1.0 / 3, 0, saliency);
    //benchmark
    Core.normalize(saliency, saliency, 0, 255, Core.NORM_MINMAX);
    //fot temp test
    Imgproc.resize(saliency, saliency, new Size(720, 480), 0, 0, Imgproc.INTER_LINEAR);
    imwrite("saliency.bmp", saliency);

    Core.normalize(intensity_conspicuity_map, intensity_conspicuity_map, 0, 255, Core.NORM_MINMAX);
    Imgproc.resize(intensity_conspicuity_map, intensity_conspicuity_map, new Size(720, 480), 0, 0,
            Imgproc.INTER_LINEAR);
    imwrite("intensity_conspicuity_map.bmp", intensity_conspicuity_map);
    Core.normalize(color_conspicuity_map, color_conspicuity_map, 0, 255, Core.NORM_MINMAX);
    Imgproc.resize(color_conspicuity_map, color_conspicuity_map, new Size(720, 480), 0, 0,
            Imgproc.INTER_LINEAR);
    imwrite("color_conspicuity_map.bmp", color_conspicuity_map);
    Core.normalize(orientation_conspicuity_map, orientation_conspicuity_map, 0, 255, Core.NORM_MINMAX);
    Imgproc.resize(orientation_conspicuity_map, orientation_conspicuity_map, new Size(720, 480), 0, 0,
            Imgproc.INTER_LINEAR);
    imwrite("orientation_conspicuity_map.bmp", orientation_conspicuity_map);
    Imgproc.resize(input_img, input_img, new Size(720, 480), 0, 0, Imgproc.INTER_LINEAR);
    imwrite("input_img.bmp", input_img);
    //for testing algorithm
    /*
    Mat temp1 = Mat.zeros(intensity_conspicuity_map.size(), CV_16UC1);
    temp1 = map_norm.main(intensity_conspicuity_map);
    Core.normalize(temp1, temp1, 0, 255, Core.NORM_MINMAX);
    Imgproc.resize(temp1, temp1, new Size(720,480), 0, 0, Imgproc.INTER_LINEAR);
    imwrite("intensity.bmp", temp1);
    temp1 = map_norm.main(color_conspicuity_map);
    Core.normalize(temp1, temp1, 0, 255, Core.NORM_MINMAX);
    Imgproc.resize(temp1, temp1, new Size(720,480), 0, 0, Imgproc.INTER_LINEAR);
    imwrite("color.bmp", temp1);
    temp1 = map_norm.main(orientation_conspicuity_map);
    Core.normalize(temp1, temp1, 0, 255, Core.NORM_MINMAX);
    Imgproc.resize(temp1, temp1, new Size(720,480), 0, 0, Imgproc.INTER_LINEAR);
    imwrite("orientation.bmp", temp1);
            
    Mat temp2 = Mat.zeros(orientation_conspicuity_map_0.size(), CV_16UC1);
    temp2 = map_norm.main(orientation_conspicuity_map_0);
    Core.normalize(temp2, temp2, 0, 255, Core.NORM_MINMAX);
    Imgproc.resize(temp2, temp2, new Size(720,480), 0, 0, Imgproc.INTER_LINEAR);
    imwrite("orientation_conspicuity_map_0.bmp", temp2);
    temp2 = map_norm.main(orientation_conspicuity_map_1);
    Core.normalize(temp2, temp2, 0, 255, Core.NORM_MINMAX);
    Imgproc.resize(temp2, temp2, new Size(720,480), 0, 0, Imgproc.INTER_LINEAR);
    imwrite("orientation_conspicuity_map_1.bmp", temp2);
    temp2 = map_norm.main(orientation_conspicuity_map_2);
    Core.normalize(temp2, temp2, 0, 255, Core.NORM_MINMAX);
    Imgproc.resize(temp2, temp2, new Size(720,480), 0, 0, Imgproc.INTER_LINEAR);
    imwrite("orientation_conspicuity_map_2.bmp", temp2);
    temp2 = map_norm.main(orientation_conspicuity_map_3);
    Core.normalize(temp2, temp2, 0, 255, Core.NORM_MINMAX);
    Imgproc.resize(temp2, temp2, new Size(720,480), 0, 0, Imgproc.INTER_LINEAR);
    imwrite("orientation_conspicuity_map_3.bmp", temp2);
    */
}

From source file:samples.FtcTestOpenCv.java

License:Open Source License

/**
 * This method rotate the image to the specified angle.
 *
 * @param src specifies the image to be rotated.
 * @param dst specifies the destination to put the rotated image.
 * @param angle specifies the rotation angle.
 *///from   w w  w.  ja  v  a 2s .co m
private void rotateImage(Mat src, Mat dst, double angle) {
    angle %= 360.0;
    if (angle == 0.0) {
        src.copyTo(dst);
    } else if (angle == 90.0 || angle == -270.0) {
        Core.transpose(src, dst);
        Core.flip(dst, dst, 1);
    } else if (angle == 180.0 || angle == -180.0) {
        Core.flip(src, dst, -1);
    } else if (angle == 270.0 || angle == -90.0) {
        Core.transpose(src, dst);
        Core.flip(dst, dst, 0);
    } else {
        Mat rotMat = Imgproc.getRotationMatrix2D(new Point(src.cols() / 2.0, src.rows() / 2.0), angle, 1.0);
        Imgproc.warpAffine(src, dst, rotMat, src.size());
    }
}

From source file:samples.FtcTestOpenCv.java

License:Open Source License

/**
 * This method combines an overlay image to the given background image at the specified location.
 * It is expecting both the background and overlay are color images. It also expects the overlay
 * image contains an alpha channel for opacity information.
 *
 * @param background specifies the background image.
 * @param overlay specifies the overlay image.
 * @param locX specifies the X location on the background image where the upper left corner of the overlay
 *        image should be at/*from  w  w w  .j  a v  a 2s. c o  m*/
 * @param locY specifies the Y location on the backgorund image where the upper left corner of the overlay
 *        image should be at.
 */
private void combineImage(Mat background, Mat overlay, int locX, int locY) {
    //
    // Make sure the background image has at least 3 channels and the overlay image has
    // at least 4 channels.
    //
    if (background.channels() >= 3 && overlay.channels() >= 4) {
        //
        // For each row of the overlay image.
        //
        for (int row = 0; row < overlay.rows(); row++) {
            //
            // Calculate the corresponding row number of the background image.
            // Skip the row if it is outside of the background image.
            //
            int destRow = locY + row;
            if (destRow < 0 || destRow >= background.rows())
                continue;
            //
            // For each column of the overlay image.
            //
            for (int col = 0; col < overlay.cols(); col++) {
                //
                // Calculate the corresponding column number of background image.
                // Skip the column if it is outside of the background image.
                //
                int destCol = locX + col;
                if (destCol < 0 || destCol >= background.cols())
                    continue;
                //
                // Get the source pixel from the overlay image and the destination pixel from the
                // background image. Calculate the opacity as a percentage.
                //
                double[] srcPixel = overlay.get(row, col);
                double[] destPixel = background.get(destRow, destCol);
                double opacity = srcPixel[3] / 255.0;
                //
                // Merge the source pixel to the destination pixel with the proper opacity.
                // Each color pixel consists of 3 channels: BGR (Blue, Green, Red).
                // The fourth channel is opacity and is only applicable for the overlay image.
                //
                for (int channel = 0; channel < 3; channel++) {
                    destPixel[channel] = destPixel[channel] * (1.0 - opacity) + srcPixel[channel] * opacity;
                }
                //
                // Put the resulting pixel into the background image.
                //
                background.put(destRow, destCol, destPixel);
            }
        }
    } else {
        throw new RuntimeException(
                "Invalid image format (src=" + overlay.channels() + ",dest=" + background.channels() + ").");
    }
}

From source file:sanntidvideo.Main.java

public BufferedImage toBufferedImage(Mat m) {
    int type = BufferedImage.TYPE_BYTE_GRAY;
    if (m.channels() > 1) {
        type = BufferedImage.TYPE_3BYTE_BGR;
    }//from   w  ww .java2  s .  c  o m
    int bufferSize = m.channels() * m.cols() * m.rows();
    byte[] b = new byte[bufferSize];
    m.get(0, 0, b); // get all the pixels
    BufferedImage image = new BufferedImage(m.cols(), m.rows(), type);
    final byte[] targetPixels = ((DataBufferByte) image.getRaster().getDataBuffer()).getData();
    System.arraycopy(b, 0, targetPixels, 0, b.length);
    return image;
}

From source file:sanntidvideo.VideoCap.java

public Image toBufferedImage(Mat m) {
    int type = BufferedImage.TYPE_BYTE_GRAY;
    if (m.channels() > 1) {
        type = BufferedImage.TYPE_3BYTE_BGR;
    }/*  w  w w .j av  a 2  s .  c  o  m*/
    int bufferSize = m.channels() * m.cols() * m.rows();
    byte[] b = new byte[bufferSize];
    m.get(0, 0, b); // get all the pixels
    BufferedImage image = new BufferedImage(m.cols(), m.rows(), type);
    final byte[] targetPixels = ((DataBufferByte) image.getRaster().getDataBuffer()).getData();
    System.arraycopy(b, 0, targetPixels, 0, b.length);
    return image;
}

From source file:SearchSystem.SearchingAlgorithms.CBIRSearchAlgorithm.java

@Override
public byte extractFeatures(Mat image, String outputFile) {
    int rows = image.rows();
    int cols = image.cols();
    int blockHeigth = rows / numberBlocksHeigth;
    int blockWidth = cols / numberBlocksWidth;
    int sizeOfBlocks = blockHeigth * blockWidth;
    rows = numberBlocksHeigth * blockHeigth;
    cols = numberBlocksWidth * blockWidth;
    Imgproc.resize(image, image, new Size(cols, rows));

    double[] vectors = new double[numberBlocksWidth * numberBlocksHeigth * 6]; // 3 channels, average and variance for each
    int counter = 0;

    for (int i = 0; i < rows; i += blockHeigth)
        for (int j = 0; j < cols; j += blockWidth) {
            for (int ii = i; ii < i + blockHeigth; ++ii)
                for (int jj = j; jj < j + blockWidth; ++jj) {
                    double pixel[] = image.get(ii, jj);
                    vectors[counter] += pixel[0]; // H mean
                    vectors[counter + 1] += pixel[1]; // S mean
                    vectors[counter + 2] += pixel[2]; // V mean
                }//from w ww . jav  a  2  s  . c  o m
            vectors[counter] /= sizeOfBlocks; // H mean
            vectors[counter + 1] /= sizeOfBlocks; // S mean
            vectors[counter + 2] /= sizeOfBlocks; // V mean

            for (int ii = i; ii < i + blockHeigth; ++ii)
                for (int jj = j; jj < j + blockWidth; ++jj) {
                    double pixel[] = image.get(ii, jj);
                    vectors[counter + 3] += Math.pow(pixel[0] - vectors[counter], 2); // H variation
                    vectors[counter + 4] += Math.pow(pixel[1] - vectors[counter + 1], 2); // S variation
                    vectors[counter + 5] += Math.pow(pixel[2] - vectors[counter + 2], 2); // V variation
                }
            vectors[counter + 3] = Math.sqrt(vectors[counter + 3] / sizeOfBlocks); // H variation
            vectors[counter + 4] = Math.sqrt(vectors[counter + 4] / sizeOfBlocks); // S variation
            vectors[counter + 5] = Math.sqrt(vectors[counter + 5] / sizeOfBlocks); // V variation

            counter += 6;
        }

    writeVectorToFile(vectors, outputFile);
    image.release();
    return 1;
}

From source file:SearchSystem.SearchingAlgorithms.CBIRSearchAlgorithm.java

public byte extractFeaturesHUE(Mat image, String outputFile) {
    int rows = image.rows();
    int cols = image.cols();
    int blockHeigth = rows / numberBlocksHeigth;
    int blockWidth = cols / numberBlocksWidth;
    int sizeOfBlocks = blockHeigth * blockWidth;
    rows = numberBlocksHeigth * blockHeigth;
    cols = numberBlocksWidth * blockWidth;
    Imgproc.resize(image, image, new Size(cols, rows));
    Imgproc.cvtColor(image, image, Imgproc.COLOR_BGR2HSV);

    double[] vectors = new double[numberBlocksWidth * numberBlocksHeigth * 6]; // 3 channels, average and variance for each
    int counter = 0;

    double Hmean = 0;
    double Smean = 0;
    double Vmean = 0;

    for (int i = 0; i < rows; i += blockHeigth)
        for (int j = 0; j < cols; j += blockWidth) {
            double pixel[] = image.get(i, j);
            for (int ii = i; ii < i + blockHeigth; ++ii)
                for (int jj = j; jj < j + blockWidth; ++jj) {
                    pixel = image.get(ii, jj);
                    if (vectors[counter] < pixel[0])
                        vectors[counter] = pixel[0]; // H max
                    if (vectors[counter + 1] < pixel[1])
                        vectors[counter + 1] = pixel[1]; // H max
                    if (vectors[counter + 2] < pixel[2])
                        vectors[counter + 2] = pixel[1]; // H max

                    Hmean += pixel[0]; // H mean
                    Smean += pixel[1]; // S mean
                    Vmean += pixel[2]; // V mean
                }//from w w w .j  a va  2 s.c o  m
            vectors[counter] *= 2; // OpenCV scales H to H/2 to fit uchar;

            Hmean = Hmean * 2 / sizeOfBlocks; // H mean
            Smean /= sizeOfBlocks; // S mean
            Vmean /= sizeOfBlocks; // V mean

            for (int ii = i; ii < i + blockHeigth; ++ii)
                for (int jj = j; jj < j + blockWidth; ++jj) {
                    pixel = image.get(ii, jj);
                    vectors[counter + 3] += Math.pow(pixel[0] * 2 - Hmean, 2); // H variation
                    vectors[counter + 4] += Math.pow(pixel[1] - Smean, 2); // S variation
                    vectors[counter + 5] += Math.pow(pixel[2] - Vmean, 2); // V variation
                }
            vectors[counter + 3] = Math.sqrt(vectors[counter + 3] / sizeOfBlocks); // H variation
            vectors[counter + 4] = Math.sqrt(vectors[counter + 4] / sizeOfBlocks); // S variation
            vectors[counter + 5] = Math.sqrt(vectors[counter + 5] / sizeOfBlocks); // V variation

            counter += 6;
        }

    writeVectorToFile(vectors, outputFile);
    image.release();
    return 1;
}

From source file:SearchSystem.SearchingAlgorithms.CBIRSearchAlgorithm.java

public double[] extractFeaturesHUE(Mat image) {
    int rows = image.rows();
    int cols = image.cols();
    int blockHeigth = rows / numberBlocksHeigth;
    int blockWidth = cols / numberBlocksWidth;
    int sizeOfBlocks = blockHeigth * blockWidth;
    rows = numberBlocksHeigth * blockHeigth;
    cols = numberBlocksWidth * blockWidth;
    Imgproc.resize(image, image, new Size(cols, rows));
    Imgproc.cvtColor(image, image, Imgproc.COLOR_BGR2HSV);

    double[] vectors = new double[numberBlocksWidth * numberBlocksHeigth * 6]; // 3 channels, average and variance for each
    int counter = 0;

    double Hmean = 0;
    double Smean = 0;
    double Vmean = 0;

    for (int i = 0; i < rows; i += blockHeigth)
        for (int j = 0; j < cols; j += blockWidth) {
            double pixel[] = image.get(i, j);
            for (int ii = i; ii < i + blockHeigth; ++ii)
                for (int jj = j; jj < j + blockWidth; ++jj) {
                    pixel = image.get(ii, jj);
                    if (vectors[counter] < pixel[0])
                        vectors[counter] = pixel[0]; // H max
                    if (vectors[counter + 1] < pixel[1])
                        vectors[counter + 1] = pixel[1]; // H max
                    if (vectors[counter + 2] < pixel[2])
                        vectors[counter + 2] = pixel[1]; // H max

                    Hmean += pixel[0]; // H mean
                    Smean += pixel[1]; // S mean
                    Vmean += pixel[2]; // V mean
                }//from   w ww .j a  v  a 2s  . com
            vectors[counter] *= 2; // OpenCV scales H to H/2 to fit uchar;

            Hmean = Hmean * 2 / sizeOfBlocks; // H mean
            Smean /= sizeOfBlocks; // S mean
            Vmean /= sizeOfBlocks; // V mean

            for (int ii = i; ii < i + blockHeigth; ++ii)
                for (int jj = j; jj < j + blockWidth; ++jj) {
                    pixel = image.get(ii, jj);
                    vectors[counter + 3] += Math.pow(pixel[0] * 2 - Hmean, 2); // H variation
                    vectors[counter + 4] += Math.pow(pixel[1] - Smean, 2); // S variation
                    vectors[counter + 5] += Math.pow(pixel[2] - Vmean, 2); // V variation
                }
            vectors[counter + 3] = Math.sqrt(vectors[counter + 3] / sizeOfBlocks); // H variation
            vectors[counter + 4] = Math.sqrt(vectors[counter + 4] / sizeOfBlocks); // S variation
            vectors[counter + 5] = Math.sqrt(vectors[counter + 5] / sizeOfBlocks); // V variation

            counter += 6;
        }

    image.release();
    return vectors;
}

From source file:SearchSystem.SearchingAlgorithms.CBIRSearchAlgorithm.java

public double[] extractFeatures(Mat image) {
    //       Imgproc.cvtColor(image, image, Imgproc.COLOR_BGR2HSV);
    int rows = image.rows();
    int cols = image.cols();
    int blockHeigth = rows / numberBlocksHeigth;
    int blockWidth = cols / numberBlocksWidth;
    int sizeOfBlocks = blockHeigth * blockWidth;
    System.out.println(sizeOfBlocks);
    rows = numberBlocksHeigth * blockHeigth;
    cols = numberBlocksWidth * blockWidth;
    Imgproc.resize(image, image, new Size(cols, rows));

    double[] vectors = new double[numberBlocksWidth * numberBlocksHeigth * 6]; // 3 channels, average and variance for each
    int counter = 0;

    for (int i = 0; i < rows; i += blockHeigth)
        for (int j = 0; j < cols; j += blockWidth) {
            for (int ii = i; ii < i + blockHeigth; ++ii)
                for (int jj = j; jj < j + blockWidth; ++jj) {
                    double pixel[] = image.get(ii, jj);
                    vectors[counter] += pixel[0]; // H mean
                    vectors[counter + 1] += pixel[1]; // S mean
                    vectors[counter + 2] += pixel[2]; // V mean
                }/*from  www .j  a  v a  2  s .c o m*/
            vectors[counter] /= sizeOfBlocks; // H mean
            vectors[counter + 1] /= sizeOfBlocks; // S mean
            vectors[counter + 2] /= sizeOfBlocks; // V mean

            for (int ii = i; ii < i + blockHeigth; ++ii)
                for (int jj = j; jj < j + blockWidth; ++jj) {
                    double pixel[] = image.get(ii, jj);
                    vectors[counter + 3] += Math.pow(pixel[0] - vectors[counter], 2); // H variation
                    vectors[counter + 4] += Math.pow(pixel[1] - vectors[counter + 1], 2); // S variation
                    vectors[counter + 5] += Math.pow(pixel[2] - vectors[counter + 2], 2); // V varation
                }
            vectors[counter + 3] = Math.sqrt(vectors[counter + 3] / sizeOfBlocks); // H variation
            vectors[counter + 4] = Math.sqrt(vectors[counter + 4] / sizeOfBlocks); // S variation
            vectors[counter + 5] = Math.sqrt(vectors[counter + 5] / sizeOfBlocks); // V varation

            counter += 6;
        }
    image.release();
    return vectors;
}