Example usage for org.opencv.core Mat put

List of usage examples for org.opencv.core Mat put

Introduction

In this page you can find the example usage for org.opencv.core Mat put.

Prototype

public int put(int row, int col, byte[] data) 

Source Link

Usage

From source file:qupath.opencv.processing.PixelImageCV.java

License:Open Source License

public void put(Mat mat) {
    if (mat.depth() == CvType.CV_32F)
        mat.put(0, 0, pixels);
    else {/*from   w  w w . j a v a2s.  c om*/
        Mat mat2 = new Mat(new Size(width, height), CvType.CV_32F);
        mat2.put(0, 0, pixels);
        mat2.convertTo(mat, mat.depth());
    }
}

From source file:qupath.opencv.TissueSegmentationCommand.java

License:Open Source License

@Override
public void hierarchyChanged(PathObjectHierarchyEvent event) {
    if (img == null || isChanging || event.isChanging())
        return;/*www.jav a 2s .c  om*/

    List<PathObject> annotations = hierarchy.getObjects(null, PathAnnotationObject.class);
    if (annotation != null)
        annotations.remove(annotation);
    List<PathObject> background = new ArrayList<>();
    List<PathObject> foreground = new ArrayList<>();
    PathClass whitespaceClass = PathClassFactory.getDefaultPathClass(PathClasses.WHITESPACE);
    for (PathObject a : annotations) {
        if (a == annotation)
            continue;
        if (a.getPathClass() == whitespaceClass)
            background.add(a);
        else
            foreground.add(a);
    }

    if (background.isEmpty() || foreground.isEmpty())
        return;

    // Create labels
    Graphics2D g2d = imgMask.createGraphics();
    g2d.setColor(Color.BLACK);
    g2d.fillRect(0, 0, img.getWidth(), img.getHeight());
    g2d.scale((double) img.getWidth() / imageData.getServer().getWidth(),
            (double) img.getHeight() / imageData.getServer().getHeight());
    g2d.setColor(Color.GRAY);
    for (PathObject a : background) {
        g2d.draw(PathROIToolsAwt.getShape(a.getROI()));
    }
    g2d.setColor(Color.WHITE);
    for (PathObject a : foreground) {
        g2d.draw(PathROIToolsAwt.getShape(a.getROI()));
    }
    g2d.dispose();

    // Get the data to classify
    RTrees trees = RTrees.create();

    byte[] bytes = ((DataBufferByte) imgMask.getRaster().getDataBuffer()).getData();
    int n = 0;
    for (int i = 0; i < bytes.length; i++) {
        byte b = bytes[i];
        if (b == (byte) 0)
            continue;
        if (b == (byte) 255) {
            trainingResponses[n] = 2;
        } else {
            trainingResponses[n] = 1;
        }
        for (int k = 0; k < featureStride; k++)
            training[n * featureStride + k] = features[i * featureStride + k];
        n++;
    }

    Mat matTraining = new Mat(n, featureStride, CvType.CV_32FC1);
    matTraining.put(0, 0, Arrays.copyOf(training, n * featureStride));
    Mat matResponses = new Mat(n, 1, CvType.CV_32SC1);
    matResponses.put(0, 0, Arrays.copyOf(trainingResponses, n));

    trees.train(matTraining, Ml.ROW_SAMPLE, matResponses);

    matTraining.release();
    matResponses.release();

    Mat samples = new Mat(buf.length, featureStride, CvType.CV_32FC1);
    samples.put(0, 0, features);
    Mat results = new Mat(buf.length, 1, CvType.CV_32SC1);
    trees.predict(samples, results, RTrees.PREDICT_AUTO);
    BufferedImage imgOutput = new BufferedImage(img.getWidth(), img.getHeight(), BufferedImage.TYPE_INT_RGB);
    float[] resultsArray = new float[buf.length];
    results.get(0, 0, resultsArray);

    for (int i = 0; i < resultsArray.length; i++) {
        if (resultsArray[i] == 1f)
            imgOutput.setRGB(i % img.getWidth(), i / img.getWidth(), ColorTools.makeRGB(255, 0, 0));
        else if (resultsArray[i] == 2f)
            imgOutput.setRGB(i % img.getWidth(), i / img.getWidth(), ColorTools.makeRGB(255, 255, 255));
    }

    isChanging = true;
    hierarchy.fireHierarchyChangedEvent(this);
    isChanging = false;
}

From source file:saliency.saliency.java

/**
 * @param args the command line arguments
 *//*from w w  w  . ja  v a  2  s.  com*/
public static void main(String[] args) {
    // TODO code application logic here

    System.loadLibrary(Core.NATIVE_LIBRARY_NAME);

    Mat input_img = imread("input_img/sea.jpg");
    //fot temp test start
    Imgproc.resize(input_img, input_img, new Size(1980, 1080), 0, 0, Imgproc.INTER_LINEAR);
    //fot temp test end
    if (input_img.cols() == 0) {
        return;
    }

    //benchmark
    ///////////////////////step 1 : Extraction of Early Visual Deatures///////////////////////////////
    //intensity image: intensity_img
    Mat intensity_img = new Mat(input_img.rows(), input_img.cols(), CV_16UC1);
    //intensity = (R+G+B)/3
    int img_width = intensity_img.cols();
    int img_height = intensity_img.rows();
    int x, y;
    int i, c, s;
    int max_intensity = 0;
    for (x = 0; x < img_width; x++) {
        for (y = 0; y < img_height; y++) {
            int temp_intensity = ((int) input_img.get(y, x)[0] + (int) input_img.get(y, x)[1]
                    + (int) input_img.get(y, x)[2]) / 3;
            intensity_img.put(y, x, temp_intensity);
            if (max_intensity < temp_intensity) {
                max_intensity = temp_intensity;
            }
        }
    }
    //create Guassian pyramid for intensity
    Mat[] i_gaussian_pyramid = new Mat[9];
    i_gaussian_pyramid[0] = intensity_img.clone();
    for (i = 0; i < 8; i++) {
        i_gaussian_pyramid[i + 1] = i_gaussian_pyramid[i].clone();
        Imgproc.pyrDown(i_gaussian_pyramid[i + 1], i_gaussian_pyramid[i + 1], new Size());
    }

    //create intensity feature map using center-surround differences
    Mat[][] intensity_feature_map = new Mat[3][2];
    for (c = 0; c < 3; c++) {
        for (s = 0; s < 2; s++) {
            intensity_feature_map[c][s] = center_surround.main(i_gaussian_pyramid[c + 2],
                    i_gaussian_pyramid[s + c + 5], 0);
        }
    }
    //benchmark
    //imwrite("intensity_feature_map_00.bmp", intensity_feature_map[0][0]);
    //get normalized color image by I.
    Mat norm_input_img = input_img.clone();
    norm_input_img.convertTo(norm_input_img, CV_64F);
    for (x = 0; x < img_width; x++) {
        for (y = 0; y < img_height; y++) {
            //normalization is only applied at the locations where I is larger than 1/10 of its maximum over entire image
            double[] temp = new double[3];
            if (intensity_img.get(y, x)[0] > (max_intensity / 10)) {
                temp[0] = norm_input_img.get(y, x)[0] / intensity_img.get(y, x)[0];
                temp[1] = norm_input_img.get(y, x)[1] / intensity_img.get(y, x)[0];
                temp[2] = norm_input_img.get(y, x)[2] / intensity_img.get(y, x)[0];
                norm_input_img.put(y, x, temp);
            } else {
                temp[0] = 0;
                temp[1] = 0;
                temp[2] = 0;
                norm_input_img.put(y, x, temp);
            }
        }
    }
    //get R G B Y(Yellow) single color channel images
    Mat r_img = new Mat(input_img.rows(), input_img.cols(), CV_64FC1);
    Mat g_img = new Mat(input_img.rows(), input_img.cols(), CV_64FC1);
    Mat b_img = new Mat(input_img.rows(), input_img.cols(), CV_64FC1);
    Mat y_img = new Mat(input_img.rows(), input_img.cols(), CV_64FC1);
    //[0]: b [1]:g [2]:r
    for (x = 0; x < img_width; x++) {
        for (y = 0; y < img_height; y++) {
            //R = min(0,r-(g+b)/2)
            double temp_chroma = max(0, (norm_input_img.get(y, x)[2]
                    - (norm_input_img.get(y, x)[1] + norm_input_img.get(y, x)[0]) / 2));
            r_img.put(y, x, temp_chroma);
            //G = max(0,g-(r+b)/2)
            temp_chroma = max(0, (norm_input_img.get(y, x)[1]
                    - (norm_input_img.get(y, x)[2] + norm_input_img.get(y, x)[0]) / 2));
            g_img.put(y, x, temp_chroma);
            //B = max(0,b-(r+g)/2)
            temp_chroma = max(0, (norm_input_img.get(y, x)[0]
                    - (norm_input_img.get(y, x)[2] + norm_input_img.get(y, x)[1]) / 2));
            b_img.put(y, x, temp_chroma);
            //Y = max(0,(r+g)/2-|r-g|/2-b)
            temp_chroma = max(0,
                    ((norm_input_img.get(y, x)[2] + norm_input_img.get(y, x)[1]) / 2
                            - abs(norm_input_img.get(y, x)[2] + norm_input_img.get(y, x)[1]) / 2
                            - norm_input_img.get(y, x)[0]));
            y_img.put(y, x, temp_chroma);
        }
    }
    //create Gaussian pyramid for 4 color channels
    Mat[] b_gaussian_pyramid = new Mat[9];
    b_gaussian_pyramid[0] = b_img.clone();
    for (i = 0; i < 8; i++) {
        b_gaussian_pyramid[i + 1] = b_gaussian_pyramid[i].clone();
        Imgproc.pyrDown(b_gaussian_pyramid[i + 1], b_gaussian_pyramid[i + 1], new Size());
    }
    Mat[] g_gaussian_pyramid = new Mat[9];
    g_gaussian_pyramid[0] = g_img.clone();
    for (i = 0; i < 8; i++) {
        g_gaussian_pyramid[i + 1] = g_gaussian_pyramid[i].clone();
        Imgproc.pyrDown(g_gaussian_pyramid[i + 1], g_gaussian_pyramid[i + 1], new Size());
    }
    Mat[] r_gaussian_pyramid = new Mat[9];
    r_gaussian_pyramid[0] = r_img.clone();
    for (i = 0; i < 8; i++) {
        r_gaussian_pyramid[i + 1] = r_gaussian_pyramid[i].clone();
        Imgproc.pyrDown(r_gaussian_pyramid[i + 1], r_gaussian_pyramid[i + 1], new Size());
    }
    Mat[] y_gaussian_pyramid = new Mat[9];
    y_gaussian_pyramid[0] = y_img.clone();
    for (i = 0; i < 8; i++) {
        y_gaussian_pyramid[i + 1] = y_gaussian_pyramid[i].clone();
        Imgproc.pyrDown(y_gaussian_pyramid[i + 1], y_gaussian_pyramid[i + 1], new Size());
    }
    //create color feature map using center-surround differences
    //RG(c,s) = |(R(c)-G(c))(-)(G(c)-R(c))|
    Mat[][] rg_feature_map = new Mat[3][2];
    for (c = 0; c < 3; c++) {
        for (s = 0; s < 2; s++) {
            Mat r_minus_g = r_gaussian_pyramid[c + 2].clone();
            Core.subtract(r_gaussian_pyramid[c + 2], g_gaussian_pyramid[c + 2], r_minus_g);
            Mat g_minus_r = g_gaussian_pyramid[s + c + 5].clone();
            Core.subtract(g_gaussian_pyramid[s + c + 5], r_gaussian_pyramid[s + c + 5], g_minus_r);
            rg_feature_map[c][s] = center_surround.main(r_minus_g, g_minus_r, 1);
        }
    }
    //BY(c,s) = |(B(c)-Y(c))(-)(Y(c)-B(c))|
    Mat[][] by_feature_map = new Mat[3][2];
    for (c = 0; c < 3; c++) {
        for (s = 0; s < 2; s++) {
            Mat b_minus_g = b_gaussian_pyramid[c + 2].clone();
            Core.subtract(b_gaussian_pyramid[c + 2], y_gaussian_pyramid[c + 2], b_minus_g);
            Mat y_minus_b = y_gaussian_pyramid[s + c + 5].clone();
            Core.subtract(y_gaussian_pyramid[s + c + 5], b_gaussian_pyramid[s + c + 5], y_minus_b);
            by_feature_map[c][s] = center_surround.main(b_minus_g, y_minus_b, 1);
        }
    }
    //benchmark
    //create oriented Gabor pyramid from intensity image
    int kernel_size = 10;//31;//adjust value according to reference
    double sigma = 3;//default:  = 0.56 .  the larger , the support of the Gabor function  and the number of visible parallel excitatory and inhibitory stripe zones increases.
    double[] theta = new double[4];
    theta[0] = 0;
    theta[1] = Math.PI / 4;
    theta[2] = Math.PI / 2;
    theta[3] = Math.PI * 3 / 4;
    double lambda = 5;//36; minimum 3
    double gamma = 0.5;//0.02;
    // double psi = 0;
    Mat[][] gabor_pyramid = new Mat[4][9];
    int theta_index;
    for (theta_index = 0; theta_index < 4; theta_index++) {
        Mat gabor_kernel = Imgproc.getGaborKernel(new Size(kernel_size, kernel_size), sigma, theta[theta_index],
                lambda, gamma);
        //gabor_pyramid[theta_index][0] = intensity_img.clone();
        for (i = 0; i < 9; i++) {
            //gabor_pyramid[theta_index][i] = gabor_pyramid[theta_index][i].clone();
            gabor_pyramid[theta_index][i] = i_gaussian_pyramid[i].clone();
            Imgproc.filter2D(i_gaussian_pyramid[i], gabor_pyramid[theta_index][i], -1, gabor_kernel);
            //Imgproc.resize(gabor_pyramid[theta_index][i], gabor_pyramid[theta_index][i], new Size(), 0.5, 0.5, Imgproc.INTER_AREA);
        }
    }
    //imwrite("gabor_pyramid_01.bmp", gabor_pyramid[0][1]);
    //imwrite("gabor_pyramid_11.bmp", gabor_pyramid[1][1]);
    //imwrite("gabor_pyramid_21.bmp", gabor_pyramid[2][1]);
    //imwrite("gabor_pyramid_31.bmp", gabor_pyramid[3][1]);
    //imwrite("gabor_pyramid_03.bmp", gabor_pyramid[0][3]);
    //get orientation feature map using center-surround differences
    Mat[][][] orientation_feature_map = new Mat[4][3][2];
    for (theta_index = 0; theta_index < 4; theta_index++) {
        for (c = 0; c < 3; c++) {
            for (s = 0; s < 2; s++) {
                orientation_feature_map[theta_index][c][s] = center_surround
                        .main(gabor_pyramid[theta_index][c + 2], gabor_pyramid[theta_index][s + c + 5], 0);
            }
        }
    }
    //benchmark
    //imwrite("orientation_test_00.bmp", orientation_feature_map[0][0][0]);
    ///////////////////////step 2 : the saliency map///////////////////////////////
    //get intensity conspicuity map
    Mat intensity_conspicuity_map = Mat.zeros(intensity_feature_map[2][0].size(), CV_16UC1);
    for (c = 0; c < 3; c++) {
        for (s = 0; s < 2; s++) {
            Mat norm_out = map_norm.main(intensity_feature_map[c][s]);
            Mat resized_feature_map = Mat.zeros(intensity_feature_map[2][0].size(), CV_16UC1);
            Imgproc.resize(norm_out, resized_feature_map, intensity_feature_map[2][0].size(), 0, 0,
                    Imgproc.INTER_LINEAR);
            Core.addWeighted(intensity_conspicuity_map, 1, resized_feature_map, 1.0 / 6, 0,
                    intensity_conspicuity_map);
            /*if(c == 0 && s == 0){
            imwrite("in.bmp", intensity_feature_map[c][s]);
            imwrite("map_norm.bmp",norm_out);
            imwrite("resized_feature_map.bmp", resized_feature_map);
            }*/
        }
    }
    //benchmark
    //Core.normalize(intensity_conspicuity_map, intensity_conspicuity_map, 0, 255, Core.NORM_MINMAX);
    //imwrite("intensity_conspicuity_map.bmp", intensity_conspicuity_map);
    //get color conspicuity map
    for (c = 0; c < 3; c++) {
        for (s = 0; s < 2; s++) {
            Core.normalize(rg_feature_map[c][s], rg_feature_map[c][s], 0, 255, Core.NORM_MINMAX);
            rg_feature_map[c][s].convertTo(rg_feature_map[c][s], CV_16UC1);
            Core.normalize(by_feature_map[c][s], by_feature_map[c][s], 0, 255, Core.NORM_MINMAX);
            by_feature_map[c][s].convertTo(by_feature_map[c][s], CV_16UC1);
        }
    }
    //imwrite("test_rg.bmp",rg_feature_map[0][0]);      
    Mat color_conspicuity_map = Mat.zeros(rg_feature_map[2][0].size(), CV_16UC1);
    for (c = 0; c < 3; c++) {
        for (s = 0; s < 2; s++) {
            Mat norm_out = map_norm.main(rg_feature_map[c][s]);
            Mat resized_feature_map = Mat.zeros(rg_feature_map[2][0].size(), CV_16UC1);
            Imgproc.resize(norm_out, resized_feature_map, rg_feature_map[2][0].size(), 0, 0,
                    Imgproc.INTER_LINEAR);
            Core.addWeighted(color_conspicuity_map, 1, resized_feature_map, 1.0 / 12, 0, color_conspicuity_map);
            norm_out = map_norm.main(by_feature_map[c][s]);
            resized_feature_map = Mat.zeros(by_feature_map[2][0].size(), CV_16UC1);
            Imgproc.resize(norm_out, resized_feature_map, by_feature_map[2][0].size(), 0, 0,
                    Imgproc.INTER_LINEAR);
            Core.addWeighted(color_conspicuity_map, 1, resized_feature_map, 1.0 / 12, 0, color_conspicuity_map);
        }
    }
    //benchmark
    //get orientation conspicuity map
    Mat orientation_conspicuity_map_0 = Mat.zeros(orientation_feature_map[0][2][0].size(), CV_16UC1);
    for (c = 0; c < 3; c++) {
        for (s = 0; s < 2; s++) {
            Mat norm_out = map_norm.main(orientation_feature_map[0][c][s]);
            Mat resized_feature_map = Mat.zeros(orientation_feature_map[0][2][0].size(), CV_16UC1);
            Imgproc.resize(norm_out, resized_feature_map, orientation_feature_map[0][2][0].size(), 0, 0,
                    Imgproc.INTER_LINEAR);
            Core.addWeighted(orientation_conspicuity_map_0, 1, resized_feature_map, 1.0 / 6, 0,
                    orientation_conspicuity_map_0);
        }
    }

    Mat orientation_conspicuity_map_1 = Mat.zeros(orientation_feature_map[1][2][0].size(), CV_16UC1);
    for (c = 0; c < 3; c++) {
        for (s = 0; s < 2; s++) {
            Mat norm_out = map_norm.main(orientation_feature_map[1][c][s]);
            Mat resized_feature_map = Mat.zeros(orientation_feature_map[1][2][0].size(), CV_16UC1);
            Imgproc.resize(norm_out, resized_feature_map, orientation_feature_map[1][2][0].size(), 0, 0,
                    Imgproc.INTER_LINEAR);
            Core.addWeighted(orientation_conspicuity_map_1, 1, resized_feature_map, 1.0 / 6, 0,
                    orientation_conspicuity_map_1);
        }
    }
    Mat orientation_conspicuity_map_2 = Mat.zeros(orientation_feature_map[2][2][0].size(), CV_16UC1);
    for (c = 0; c < 3; c++) {
        for (s = 0; s < 2; s++) {
            Mat norm_out = map_norm.main(orientation_feature_map[2][c][s]);
            Mat resized_feature_map = Mat.zeros(orientation_feature_map[2][2][0].size(), CV_16UC1);
            Imgproc.resize(norm_out, resized_feature_map, orientation_feature_map[2][2][0].size(), 0, 0,
                    Imgproc.INTER_LINEAR);
            Core.addWeighted(orientation_conspicuity_map_2, 1, resized_feature_map, 1.0 / 6, 0,
                    orientation_conspicuity_map_2);
        }
    }
    Mat orientation_conspicuity_map_3 = Mat.zeros(orientation_feature_map[3][2][0].size(), CV_16UC1);
    for (c = 0; c < 3; c++) {
        for (s = 0; s < 2; s++) {
            Mat norm_out = map_norm.main(orientation_feature_map[3][c][s]);
            Mat resized_feature_map = Mat.zeros(orientation_feature_map[3][2][0].size(), CV_16UC1);
            Imgproc.resize(norm_out, resized_feature_map, orientation_feature_map[3][2][0].size(), 0, 0,
                    Imgproc.INTER_LINEAR);
            Core.addWeighted(orientation_conspicuity_map_3, 1, resized_feature_map, 1.0 / 6, 0,
                    orientation_conspicuity_map_3);
        }
    }
    Mat orientation_conspicuity_map = Mat.zeros(orientation_feature_map[0][2][0].size(), CV_16UC1);
    Core.addWeighted(orientation_conspicuity_map, 1, map_norm.main(orientation_conspicuity_map_0), 1.0 / 4, 0,
            orientation_conspicuity_map);
    Core.addWeighted(orientation_conspicuity_map, 1, map_norm.main(orientation_conspicuity_map_1), 1.0 / 4, 0,
            orientation_conspicuity_map);
    Core.addWeighted(orientation_conspicuity_map, 1, map_norm.main(orientation_conspicuity_map_2), 1.0 / 4, 0,
            orientation_conspicuity_map);
    Core.addWeighted(orientation_conspicuity_map, 1, map_norm.main(orientation_conspicuity_map_3), 1.0 / 4, 0,
            orientation_conspicuity_map);
    //benchmark
    Mat saliency = Mat.zeros(intensity_conspicuity_map.size(), CV_16UC1);
    Core.addWeighted(saliency, 1, map_norm.main(intensity_conspicuity_map), 1.0 / 3, 0, saliency);
    Core.addWeighted(saliency, 1, map_norm.main(color_conspicuity_map), 1.0 / 3, 0, saliency);
    Core.addWeighted(saliency, 1, map_norm.main(orientation_conspicuity_map), 1.0 / 3, 0, saliency);
    //benchmark
    Core.normalize(saliency, saliency, 0, 255, Core.NORM_MINMAX);
    //fot temp test
    Imgproc.resize(saliency, saliency, new Size(720, 480), 0, 0, Imgproc.INTER_LINEAR);
    imwrite("saliency.bmp", saliency);

    Core.normalize(intensity_conspicuity_map, intensity_conspicuity_map, 0, 255, Core.NORM_MINMAX);
    Imgproc.resize(intensity_conspicuity_map, intensity_conspicuity_map, new Size(720, 480), 0, 0,
            Imgproc.INTER_LINEAR);
    imwrite("intensity_conspicuity_map.bmp", intensity_conspicuity_map);
    Core.normalize(color_conspicuity_map, color_conspicuity_map, 0, 255, Core.NORM_MINMAX);
    Imgproc.resize(color_conspicuity_map, color_conspicuity_map, new Size(720, 480), 0, 0,
            Imgproc.INTER_LINEAR);
    imwrite("color_conspicuity_map.bmp", color_conspicuity_map);
    Core.normalize(orientation_conspicuity_map, orientation_conspicuity_map, 0, 255, Core.NORM_MINMAX);
    Imgproc.resize(orientation_conspicuity_map, orientation_conspicuity_map, new Size(720, 480), 0, 0,
            Imgproc.INTER_LINEAR);
    imwrite("orientation_conspicuity_map.bmp", orientation_conspicuity_map);
    Imgproc.resize(input_img, input_img, new Size(720, 480), 0, 0, Imgproc.INTER_LINEAR);
    imwrite("input_img.bmp", input_img);
    //for testing algorithm
    /*
    Mat temp1 = Mat.zeros(intensity_conspicuity_map.size(), CV_16UC1);
    temp1 = map_norm.main(intensity_conspicuity_map);
    Core.normalize(temp1, temp1, 0, 255, Core.NORM_MINMAX);
    Imgproc.resize(temp1, temp1, new Size(720,480), 0, 0, Imgproc.INTER_LINEAR);
    imwrite("intensity.bmp", temp1);
    temp1 = map_norm.main(color_conspicuity_map);
    Core.normalize(temp1, temp1, 0, 255, Core.NORM_MINMAX);
    Imgproc.resize(temp1, temp1, new Size(720,480), 0, 0, Imgproc.INTER_LINEAR);
    imwrite("color.bmp", temp1);
    temp1 = map_norm.main(orientation_conspicuity_map);
    Core.normalize(temp1, temp1, 0, 255, Core.NORM_MINMAX);
    Imgproc.resize(temp1, temp1, new Size(720,480), 0, 0, Imgproc.INTER_LINEAR);
    imwrite("orientation.bmp", temp1);
            
    Mat temp2 = Mat.zeros(orientation_conspicuity_map_0.size(), CV_16UC1);
    temp2 = map_norm.main(orientation_conspicuity_map_0);
    Core.normalize(temp2, temp2, 0, 255, Core.NORM_MINMAX);
    Imgproc.resize(temp2, temp2, new Size(720,480), 0, 0, Imgproc.INTER_LINEAR);
    imwrite("orientation_conspicuity_map_0.bmp", temp2);
    temp2 = map_norm.main(orientation_conspicuity_map_1);
    Core.normalize(temp2, temp2, 0, 255, Core.NORM_MINMAX);
    Imgproc.resize(temp2, temp2, new Size(720,480), 0, 0, Imgproc.INTER_LINEAR);
    imwrite("orientation_conspicuity_map_1.bmp", temp2);
    temp2 = map_norm.main(orientation_conspicuity_map_2);
    Core.normalize(temp2, temp2, 0, 255, Core.NORM_MINMAX);
    Imgproc.resize(temp2, temp2, new Size(720,480), 0, 0, Imgproc.INTER_LINEAR);
    imwrite("orientation_conspicuity_map_2.bmp", temp2);
    temp2 = map_norm.main(orientation_conspicuity_map_3);
    Core.normalize(temp2, temp2, 0, 255, Core.NORM_MINMAX);
    Imgproc.resize(temp2, temp2, new Size(720,480), 0, 0, Imgproc.INTER_LINEAR);
    imwrite("orientation_conspicuity_map_3.bmp", temp2);
    */
}

From source file:samples.FtcTestOpenCv.java

License:Open Source License

/**
 * This method combines an overlay image to the given background image at the specified location.
 * It is expecting both the background and overlay are color images. It also expects the overlay
 * image contains an alpha channel for opacity information.
 *
 * @param background specifies the background image.
 * @param overlay specifies the overlay image.
 * @param locX specifies the X location on the background image where the upper left corner of the overlay
 *        image should be at//from  w w w .j ava  2 s  .  c  o  m
 * @param locY specifies the Y location on the backgorund image where the upper left corner of the overlay
 *        image should be at.
 */
private void combineImage(Mat background, Mat overlay, int locX, int locY) {
    //
    // Make sure the background image has at least 3 channels and the overlay image has
    // at least 4 channels.
    //
    if (background.channels() >= 3 && overlay.channels() >= 4) {
        //
        // For each row of the overlay image.
        //
        for (int row = 0; row < overlay.rows(); row++) {
            //
            // Calculate the corresponding row number of the background image.
            // Skip the row if it is outside of the background image.
            //
            int destRow = locY + row;
            if (destRow < 0 || destRow >= background.rows())
                continue;
            //
            // For each column of the overlay image.
            //
            for (int col = 0; col < overlay.cols(); col++) {
                //
                // Calculate the corresponding column number of background image.
                // Skip the column if it is outside of the background image.
                //
                int destCol = locX + col;
                if (destCol < 0 || destCol >= background.cols())
                    continue;
                //
                // Get the source pixel from the overlay image and the destination pixel from the
                // background image. Calculate the opacity as a percentage.
                //
                double[] srcPixel = overlay.get(row, col);
                double[] destPixel = background.get(destRow, destCol);
                double opacity = srcPixel[3] / 255.0;
                //
                // Merge the source pixel to the destination pixel with the proper opacity.
                // Each color pixel consists of 3 channels: BGR (Blue, Green, Red).
                // The fourth channel is opacity and is only applicable for the overlay image.
                //
                for (int channel = 0; channel < 3; channel++) {
                    destPixel[channel] = destPixel[channel] * (1.0 - opacity) + srcPixel[channel] * opacity;
                }
                //
                // Put the resulting pixel into the background image.
                //
                background.put(destRow, destCol, destPixel);
            }
        }
    } else {
        throw new RuntimeException(
                "Invalid image format (src=" + overlay.channels() + ",dest=" + background.channels() + ").");
    }
}

From source file:shadowstrider.GeneratePlates.java

public void loadAndDisplayImage(JFrame frame) {
    System.out.println("Plates will be output at: " + System.getProperty("user.dir")
            + "/src/shadowstrider/generatedPlateOutputs");
    System.out.println("Working..");
    long startTime = System.currentTimeMillis();
    Random r = new Random();
    TreeSet<String> listOfPlates = new TreeSet<String>();
    int ctr = 0;/*from  ww w  .  j a  v a2s  .co m*/
    while (ctr < 69) {
        JLayeredPane lpane = new JLayeredPane();
        String plateLetters = "";
        String[] letterList = { "Z", "F", "C", "K" };
        String[] numList = { "0", "8" };
        for (int i = 0; i < 6; i++) {
            plateLetters += (i < 3) ? Character.toString((char) (r.nextInt(26) + 65))
                    : Integer.toString(r.nextInt(10));
            //               plateLetters += (i < 3) ?  letterList[r.nextInt(4)] :  numList[r.nextInt(2)];
        }
        if (!listOfPlates.contains(plateLetters)) {
            listOfPlates.add(plateLetters);
            BufferedImage loadImg = Loadimage.loadImage(System.getProperty("user.dir")
                    + "/src/shadowstrider/resources/new_letters/TEMPLATE_NEW.png");
            BufferedImage[] plateChars = new BufferedImage[6];
            plateChars[0] = Loadimage
                    .loadImage(System.getProperty("user.dir") + "/src/shadowstrider/resources/newer/"
                            + Character.toString(plateLetters.charAt(0)) + ".png");
            plateChars[1] = Loadimage
                    .loadImage(System.getProperty("user.dir") + "/src/shadowstrider/resources/newer/"
                            + Character.toString(plateLetters.charAt(1)) + ".png");
            plateChars[2] = Loadimage
                    .loadImage(System.getProperty("user.dir") + "/src/shadowstrider/resources/newer/"
                            + Character.toString(plateLetters.charAt(2)) + ".png");
            plateChars[3] = Loadimage
                    .loadImage(System.getProperty("user.dir") + "/src/shadowstrider/resources/newer/"
                            + Character.toString(plateLetters.charAt(3)) + ".png");
            plateChars[4] = Loadimage
                    .loadImage(System.getProperty("user.dir") + "/src/shadowstrider/resources/newer/"
                            + Character.toString(plateLetters.charAt(4)) + ".png");
            plateChars[5] = Loadimage
                    .loadImage(System.getProperty("user.dir") + "/src/shadowstrider/resources/newer/"
                            + Character.toString(plateLetters.charAt(5)) + ".png");
            BufferedImage generatedPlate = generateMaskedImage(loadImg, plateChars);
            //            frame.setBounds(0, 0, loadImg.getWidth(), loadImg.getHeight());
            //            lpane.setBounds(0, 0, loadImg.getWidth(), loadImg.getHeight());
            //            JImagePanel panel = new JImagePanel(generatedPlate, 0, 0);
            //            frame.add(panel);
            //            frame.setVisible(true);
            ctr++;
            System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
            //            writeImage(generatedPlate, System.getProperty("user.dir") + "/src/shadowstrider/generatedPlatesOutput/" + plateLetters + ".png", "png");
            //            Image thresholder = new Image(System.getProperty("user.dir") + "/src/shadowstrider/generatedPlatesOutput/" + plateLetters + ".png");
            //            Mat equalized = thresholder.equalizeHistogram();
            //            writeImage(thresholder.convertMatToBufferedImage(thresholder.generateThresholdImage(equalized, thresholder.ADAPTIVE_THRESHOLD)), System.getProperty("user.dir") + "/src/shadowstrider/generatedPlatesOutput/" + plateLetters + ".png", "png");
            Image thresholder = new Image();
            byte[] pixels = ((DataBufferByte) generatedPlate.getRaster().getDataBuffer()).getData();
            Mat generatedPlateMat = new Mat(generatedPlate.getHeight(), generatedPlate.getWidth(),
                    CvType.CV_8UC3);
            generatedPlateMat.put(0, 0, pixels);
            thresholder.setImage(generatedPlateMat);
            Mat equalized = thresholder.generateGreyScaleImage();
            //            writeImage(thresholder.convertMatToBufferedImage(thresholder.generateThresholdImage(equalized, thresholder.ADAPTIVE_THRESHOLD)), System.getProperty("user.dir") + "/src/shadowstrider/generatedPlatesOutput/" + plateLetters + ".png", "png");
            writeImage(thresholder.convertMatToBufferedImage(equalized), System.getProperty("user.dir")
                    + "/src/shadowstrider/generatedPlatesOutput/" + plateLetters + ".png", "png");
            System.out.println(Integer.toString(ctr) + ((ctr > 1) ? " plates generated" : " plate generated"));
        }
    }
    System.out.println("Done");
    long endTime = System.currentTimeMillis();
    System.out
            .println(Integer.toString(ctr) + " plates generated in " + (endTime - startTime) + " milliseconds");
}

From source file:spring2017ip.ConvolutionDemo.java

public Mat convolute(Mat inputImage, double kernel[][], double anchorX, double anchorY) {
    Mat outputImage = new Mat(inputImage.rows(), inputImage.cols(), inputImage.type());

    // have to fix this code so that it works for any sized kernel
    for (int i = 1; i < inputImage.rows() - 1; i++)
        for (int j = 1; j < inputImage.cols() - 1; j++) {
            double sum = 0;

            for (int r = 0; r < kernel.length; r++)
                for (int c = 0; c < kernel[r].length; c++) {
                    double pixel[] = inputImage.get(i - kernel.length / 2 + r, j - kernel[0].length / 2 + c);
                    double product = kernel[r][c] * pixel[0];
                    sum = sum + product;
                }/*w  w  w.j av  a2 s . c o  m*/

            outputImage.put(i, j, sum);
        }
    return outputImage;
}

From source file:spring2017ip.ConvolutionDemo.java

public Mat combineGxGy(Mat gx, Mat gy) {
    Mat outputImage = new Mat(gx.rows(), gx.cols(), gx.type());
    for (int r = 0; r < gx.height(); r++)
        for (int c = 0; c < gx.width(); c++) {
            double x[] = gx.get(r, c);
            double y[] = gy.get(r, c);
            double m = Math.sqrt(x[0] * x[0] + y[0] * y[0]);
            outputImage.put(r, c, m);
        }//from   ww  w .  j ava 2  s  .com
    return outputImage;
}

From source file:us.cboyd.android.dicom.DcmInfoFragment.java

License:Open Source License

public void updateDicomInfo() {
    mDicomObject = null;//from   w  ww. java 2s .  co  m
    if ((mCurrDir != null) && (mFileList != null) && (mPosition >= 0) && (mPosition < mFileList.size())) {
        try {
            // Read in the DicomObject
            DicomInputStream dis = new DicomInputStream(new FileInputStream(getDicomFile()));
            //mDicomObject = dis.readFileMetaInformation();
            mDicomObject = dis.readDicomObject();
            dis.close();

            // Get the SOP Class element
            DicomElement de = mDicomObject.get(Tag.MediaStorageSOPClassUID);
            String SOPClass = "";
            if (de != null)
                SOPClass = de.getString(new SpecificCharacterSet(""), true);
            else
                SOPClass = "null";
            Log.i("cpb", "SOP Class: " + SOPClass);

            // TODO: DICOMDIR support
            if (SOPClass.equals(UID.MediaStorageDirectoryStorage)) {
                showImage(false);
                mErrText.setText(mRes.getString(R.string.err_dicomdir));
            } else {
                showImage(true);
                int rows = mDicomObject.getInt(Tag.Rows);
                int cols = mDicomObject.getInt(Tag.Columns);
                Mat temp = new Mat(rows, cols, CvType.CV_32S);
                temp.put(0, 0, mDicomObject.getInts(Tag.PixelData));
                // [Y, X] or [row, column]
                double[] spacing = mDicomObject.getDoubles(Tag.PixelSpacing);
                double scaleY2X = spacing[1] / spacing[0];

                // Determine the minmax
                Core.MinMaxLocResult minmax = Core.minMaxLoc(temp);
                double diff = minmax.maxVal - minmax.minVal;
                temp.convertTo(temp, CvType.CV_8UC1, 255.0d / diff, 0);

                // Set the image
                Bitmap imageBitmap = Bitmap.createBitmap(cols, rows, Bitmap.Config.ARGB_8888);
                Log.w("cpb", "test3");
                Utils.matToBitmap(temp, imageBitmap, true);
                Log.w("cpb", "test4");
                mImageView.setImageBitmap(imageBitmap);
                mImageView.setScaleX((float) scaleY2X);
            }

            // TODO: Add selector for info tag listing
            mTags = mRes.getStringArray(R.array.dcmtag_default);
            refreshTagList();

        } catch (Exception ex) {
            showImage(false);
            mErrText.setText(mRes.getString(R.string.err_file_read) + mFileList.get(mPosition) + "\n\n"
                    + ex.getMessage());
        }
    } else {
        showImage(false);
        mErrText.setText(mRes.getString(R.string.err_unknown_state));
    }
}

From source file:us.cboyd.android.shared.ImageContrastView.java

License:Open Source License

public void setImageContrastCV(double brightness, double contrast, int colormap, boolean inv) {
    double diff = getWidth();
    double ImWidth = (1 - (contrast / 100.0d)) * diff;
    double alpha = 255.0d / ImWidth;
    double beta = alpha * (-mMin);
    mLevel = ImWidth / 2.0d + (diff - ImWidth) * (1.0d - (brightness / 100.0d));
    mMax = ImWidth + (diff - ImWidth) * (1.0d - (brightness / 100.0d));
    mMin = (diff - ImWidth) * (1.0d - (brightness / 100.0d));

    int i = 0;/*from  www. j a  v  a 2s  . c  o m*/
    int n = (int) diff;
    Mat cmap = new Mat(1, n, CvType.CV_32S);
    for (i = 0; i < n; i++) {
        cmap.put(0, i, i);
    }
    if (inv) {
        alpha *= -1.0d;
        beta = 255.0d - beta;
    }
    cmap.convertTo(cmap, CvType.CV_8UC1, alpha, beta);
    if (colormap >= 0) {
        Contrib.applyColorMap(cmap, cmap, colormap);
        //applyColorMap returns a BGR image, but createBitmap expects RGB
        //do a conversion to swap blue and red channels:
        Imgproc.cvtColor(cmap, cmap, Imgproc.COLOR_RGB2BGR);
    }
    Bitmap cmapBitmap = Bitmap.createBitmap(n, 1, Bitmap.Config.ARGB_8888);
    Utils.matToBitmap(cmap, cmapBitmap, false);
    setImageBitmap(cmapBitmap);
}

From source file:usefull.ColourQuery.java

License:LGPL

public static void main(String[] args) {

    // load the Core OpenCV library by name

    System.loadLibrary(Core.NATIVE_LIBRARY_NAME);

    // load an image from file (read and decode JPEG file)

    Mat inputImage = Highgui.imread("files/lena1.png");

    // create a display window using an Imshow object

    Imshow ims1 = new Imshow("My Image");

    // display some colour values (note the BGR colour channel order)

    double[] bgr = inputImage.get(0, 0);
    System.out.println("colour @ (0,0) = B: " + bgr[0] + " G: " + bgr[1] + " R: " + bgr[2]);

    bgr = inputImage.get(50, 50);//from   w  w  w .j a  v a2 s .c  o m
    System.out.println("colour @ (50,50) = B: " + bgr[0] + " G: " + bgr[1] + " R: " + bgr[2]);

    bgr = inputImage.get(100, 25);
    System.out.println("colour @ (100,25) = B: " + bgr[0] + " G: " + bgr[1] + " R: " + bgr[2]);

    bgr = inputImage.get(17, 234);
    System.out.println("colour @ (17,234) = B: " + bgr[0] + " G: " + bgr[1] + " R: " + bgr[2]);

    // set some pixel values to blue (i.e. BGR = (255,0,0)

    double[] colour = new double[3];
    colour[0] = 255;
    colour[1] = 0;
    colour[2] = 0;
    inputImage.put(25, 25, colour);
    inputImage.put(25, 24, colour);
    inputImage.put(25, 23, colour);
    inputImage.put(25, 22, colour);
    // ...

    // display image

    ims1.showImage(inputImage);

}