Example usage for org.opencv.core Mat get

List of usage examples for org.opencv.core Mat get

Introduction

In this page you can find the example usage for org.opencv.core Mat get.

Prototype

public double[] get(int row, int col) 

Source Link

Usage

From source file:overwatchteampicker.OverwatchTeamPicker.java

public static ReturnValues findImage(String template, String source, int flag) {
    File lib = null;//w  ww . j  a v a2  s  . co m
    BufferedImage image = null;
    try {
        image = ImageIO.read(new File(source));
    } catch (Exception e) {
        e.printStackTrace();
    }

    String os = System.getProperty("os.name");
    String bitness = System.getProperty("sun.arch.data.model");

    if (os.toUpperCase().contains("WINDOWS")) {
        if (bitness.endsWith("64")) {
            lib = new File("C:\\Users\\POWERUSER\\Downloads\\opencv\\build\\java\\x64\\"
                    + System.mapLibraryName("opencv_java2413"));
        } else {
            lib = new File("libs//x86//" + System.mapLibraryName("opencv_java2413"));
        }
    }
    System.load(lib.getAbsolutePath());
    String tempObject = "images\\hero_templates\\" + template + ".png";
    String source_pic = source;
    Mat objectImage = Highgui.imread(tempObject, Highgui.CV_LOAD_IMAGE_GRAYSCALE);
    Mat sceneImage = Highgui.imread(source_pic, Highgui.CV_LOAD_IMAGE_GRAYSCALE);

    MatOfKeyPoint objectKeyPoints = new MatOfKeyPoint();
    FeatureDetector featureDetector = FeatureDetector.create(FeatureDetector.SURF);
    featureDetector.detect(objectImage, objectKeyPoints);
    KeyPoint[] keypoints = objectKeyPoints.toArray();
    MatOfKeyPoint objectDescriptors = new MatOfKeyPoint();
    DescriptorExtractor descriptorExtractor = DescriptorExtractor.create(DescriptorExtractor.SURF);
    descriptorExtractor.compute(objectImage, objectKeyPoints, objectDescriptors);

    // Create the matrix for output image.
    Mat outputImage = new Mat(objectImage.rows(), objectImage.cols(), Highgui.CV_LOAD_IMAGE_COLOR);
    Scalar newKeypointColor = new Scalar(255, 0, 0);
    Features2d.drawKeypoints(objectImage, objectKeyPoints, outputImage, newKeypointColor, 0);

    // Match object image with the scene image
    MatOfKeyPoint sceneKeyPoints = new MatOfKeyPoint();
    MatOfKeyPoint sceneDescriptors = new MatOfKeyPoint();
    featureDetector.detect(sceneImage, sceneKeyPoints);
    descriptorExtractor.compute(sceneImage, sceneKeyPoints, sceneDescriptors);

    Mat matchoutput = new Mat(sceneImage.rows() * 2, sceneImage.cols() * 2, Highgui.CV_LOAD_IMAGE_COLOR);
    Scalar matchestColor = new Scalar(0, 255, 25);

    List<MatOfDMatch> matches = new LinkedList<MatOfDMatch>();
    DescriptorMatcher descriptorMatcher = DescriptorMatcher.create(DescriptorMatcher.FLANNBASED);
    descriptorMatcher.knnMatch(objectDescriptors, sceneDescriptors, matches, 2);

    LinkedList<DMatch> goodMatchesList = new LinkedList<DMatch>();

    float nndrRatio = .78f;

    for (int i = 0; i < matches.size(); i++) {
        MatOfDMatch matofDMatch = matches.get(i);
        DMatch[] dmatcharray = matofDMatch.toArray();
        DMatch m1 = dmatcharray[0];
        DMatch m2 = dmatcharray[1];

        if (m1.distance <= m2.distance * nndrRatio) {
            goodMatchesList.addLast(m1);

        }
    }

    if (goodMatchesList.size() >= 4) {

        List<KeyPoint> objKeypointlist = objectKeyPoints.toList();
        List<KeyPoint> scnKeypointlist = sceneKeyPoints.toList();

        LinkedList<Point> objectPoints = new LinkedList<>();
        LinkedList<Point> scenePoints = new LinkedList<>();

        for (int i = 0; i < goodMatchesList.size(); i++) {
            objectPoints.addLast(objKeypointlist.get(goodMatchesList.get(i).queryIdx).pt);
            scenePoints.addLast(scnKeypointlist.get(goodMatchesList.get(i).trainIdx).pt);
        }

        MatOfPoint2f objMatOfPoint2f = new MatOfPoint2f();
        objMatOfPoint2f.fromList(objectPoints);
        MatOfPoint2f scnMatOfPoint2f = new MatOfPoint2f();
        scnMatOfPoint2f.fromList(scenePoints);

        Mat homography = Calib3d.findHomography(objMatOfPoint2f, scnMatOfPoint2f, Calib3d.RANSAC, 3);

        Mat obj_corners = new Mat(4, 1, CvType.CV_32FC2);
        Mat scene_corners = new Mat(4, 1, CvType.CV_32FC2);

        obj_corners.put(0, 0, new double[] { 0, 0 });
        obj_corners.put(1, 0, new double[] { objectImage.cols(), 0 });
        obj_corners.put(2, 0, new double[] { objectImage.cols(), objectImage.rows() });
        obj_corners.put(3, 0, new double[] { 0, objectImage.rows() });

        Core.perspectiveTransform(obj_corners, scene_corners, homography);

        Mat img = Highgui.imread(source_pic, Highgui.CV_LOAD_IMAGE_COLOR);

        Core.line(img, new Point(scene_corners.get(0, 0)), new Point(scene_corners.get(1, 0)),
                new Scalar(0, 255, 255), 4);
        Core.line(img, new Point(scene_corners.get(1, 0)), new Point(scene_corners.get(2, 0)),
                new Scalar(255, 255, 0), 4);
        Core.line(img, new Point(scene_corners.get(2, 0)), new Point(scene_corners.get(3, 0)),
                new Scalar(0, 255, 0), 4);
        Core.line(img, new Point(scene_corners.get(3, 0)), new Point(scene_corners.get(0, 0)),
                new Scalar(0, 255, 0), 4);

        MatOfDMatch goodMatches = new MatOfDMatch();
        goodMatches.fromList(goodMatchesList);

        Features2d.drawMatches(objectImage, objectKeyPoints, sceneImage, sceneKeyPoints, goodMatches,
                matchoutput, matchestColor, newKeypointColor, new MatOfByte(), 2);
        if (new Point(scene_corners.get(0, 0)).x < new Point(scene_corners.get(1, 0)).x
                && new Point(scene_corners.get(0, 0)).y < new Point(scene_corners.get(2, 0)).y) {
            System.out.println("found " + template);
            Highgui.imwrite("points.jpg", outputImage);
            Highgui.imwrite("matches.jpg", matchoutput);
            Highgui.imwrite("final.jpg", img);

            if (flag == 0) {
                ReturnValues retVal = null;
                int y = (int) new Point(scene_corners.get(3, 0)).y;
                int yHeight = (int) new Point(scene_corners.get(3, 0)).y
                        - (int) new Point(scene_corners.get(2, 0)).y;
                if (y < image.getHeight() * .6) { //if found hero is in upper half of image then return point 3,0
                    retVal = new ReturnValues(y + (int) (image.getHeight() * .01), yHeight);
                } else { //if found hero is in lower half of image then return point 2,0
                    y = (int) new Point(scene_corners.get(2, 0)).y;
                    retVal = new ReturnValues(y + (int) (image.getHeight() * .3), yHeight);
                }
                return retVal;
            } else if (flag == 1) {
                int[] xPoints = new int[4];
                int[] yPoints = new int[4];

                xPoints[0] = (int) (new Point(scene_corners.get(0, 0)).x);
                xPoints[1] = (int) (new Point(scene_corners.get(1, 0)).x);
                xPoints[2] = (int) (new Point(scene_corners.get(2, 0)).x);
                xPoints[3] = (int) (new Point(scene_corners.get(3, 0)).x);

                yPoints[0] = (int) (new Point(scene_corners.get(0, 0)).y);
                yPoints[1] = (int) (new Point(scene_corners.get(1, 0)).y);
                yPoints[2] = (int) (new Point(scene_corners.get(2, 0)).y);
                yPoints[3] = (int) (new Point(scene_corners.get(3, 0)).y);

                ReturnValues retVal = new ReturnValues(xPoints, yPoints);
                return retVal;

            }
        }
    }
    return null;

}

From source file:qupath.opencv.processing.OpenCVTools.java

License:Open Source License

/**
 * Fill holes in a binary image (1-channel, 8-bit unsigned) with an area <= maxArea.
 * /*w  w w  .  j  a  v a 2 s. c o  m*/
 * @param matBinary
 * @param maxArea
 */
public static void fillSmallHoles(Mat matBinary, double maxArea) {
    Mat matHoles = new Mat();
    invertBinary(matBinary, matHoles);
    List<MatOfPoint> contours = new ArrayList<>();
    Mat hierarchy = new Mat();
    Imgproc.findContours(matHoles, contours, hierarchy, Imgproc.RETR_CCOMP, Imgproc.CHAIN_APPROX_SIMPLE);
    List<MatOfPoint> contoursTemp = new ArrayList<>(1);
    Scalar color = new Scalar(255);
    int ind = 0;
    for (MatOfPoint contour : contours) {
        // Only fill the small, inner contours
        if (hierarchy.get(0, ind)[3] >= 0 || Imgproc.contourArea(contour) > maxArea) {
            ind++;
            continue;
        }
        contoursTemp.clear();
        contoursTemp.add(contour);
        Imgproc.drawContours(matBinary, contoursTemp, 0, color, -1);
        ind++;
    }
}

From source file:Reconhecimento.Circulo.java

/**
segmenta o circulo para acompanhamento do movimento 
**//*www .  j  a v  a  2s .  c  om*/
public static void segmentarCirculo(int minRaio, int maxRaio, int minThreshold, int maxThreshold,
        int medianBlurKernel) {

    class threadSegmentar extends Thread {

        public boolean closed = false;
        public double CentroX;
        public double CentroY;

        @Override
        public void run() {
            int contador = 0;

            File folder = new File("imagens/frames");
            if (folder.exists() == false) {
                folder.mkdir();
            }
            for (String file : folder.list()) {
                new File(folder, file).delete();
            }

            ind = (char) ((int) ind + 1);

            JFrame frame = new JFrame();
            JLabel label = new JLabel();
            frame.add(label);
            frame.setBounds(10, 10, 640, 480);
            label.setSize(640, 480);
            frame.setLocation(250, 250);
            frame.setVisible(true);
            closed = false;

            frame.addWindowListener(new WindowAdapter() {
                @Override
                public void windowClosing(WindowEvent e) {
                    closed = true;
                }
            });

            Mat img = new Mat();
            Mat circles = new Mat();
            Mat grayImg = new Mat();
            Mat gravar = new Mat();
            Mat element = new Mat();

            VideoCapture cap = new VideoCapture(Video.videoAtual);

            // capturar primeiro frame do video 
            cap.read(img);

            Imgproc.cvtColor(img, grayImg, Imgproc.COLOR_BGR2GRAY);

            Imgproc.medianBlur(grayImg, grayImg, 5);

            Imgproc.HoughCircles(grayImg, circles, Imgproc.CV_HOUGH_GRADIENT, 1, 100, 220, 10, minRaio,
                    maxRaio);

            double Circle[] = circles.get(0, 0);

            Point center = new Point(Math.round(Circle[0]), Math.round(Circle[1]));

            int radius = (int) Math.round(Circle[2]);

            CentroX = center.x;
            CentroY = center.y;

            cap.read(img);

            boolean continuar = true;

            while (continuar) {

                // passar a imagem para tons de cinza
                Imgproc.cvtColor(img, grayImg, Imgproc.COLOR_BGR2GRAY);

                // limiarizacao
                Imgproc.threshold(grayImg, grayImg, minThreshold, maxThreshold, THRESH_BINARY_INV);

                Core.bitwise_not(grayImg, grayImg);

                // filtro da mediana
                Imgproc.medianBlur(grayImg, grayImg, medianBlurKernel);

                // deteccao de vertices
                Imgproc.Canny(grayImg, grayImg, 100, 255);

                // aplicar transformada circular de hough
                Imgproc.HoughCircles(grayImg, circles, Imgproc.CV_HOUGH_GRADIENT, 1, 100, 220, 9, minRaio,
                        maxRaio);

                try {
                    for (int x = 0; x < circles.cols(); x++) {
                        double vCircle[] = circles.get(0, x);

                        center = new Point(Math.round(vCircle[0]), Math.round(vCircle[1]));
                        radius = (int) Math.round(vCircle[2]);

                        // analisa a distancia entre o circulo do frame anterior e atual
                        if (((center.x <= CentroX) || (center.x - CentroX <= 5))
                                && (Math.sqrt(CentroX * CentroX + CentroY * CentroY)
                                        - Math.sqrt(center.x * center.x + center.y * center.y) <= 70.0)
                                && (Math.sqrt(CentroX * CentroX + CentroY * CentroY)
                                        - Math.sqrt(center.x * center.x + center.y * center.y) >= -70.0)) {

                            Core.circle(img, center, radius, new Scalar(0, 0, 255), 3, 8, 0);

                            CentroX = center.x;
                            CentroY = center.y;
                        }
                    }
                } catch (Exception e) {
                }

                Imgproc.resize(img, gravar, new Size(640, 480));
                Highgui.imwrite("imagens/frames/houghcircles" + contador + ind + ".jpg", gravar);

                label.setIcon(new ImageIcon("imagens/frames/houghcircles" + contador + ind + ".jpg"));

                contador++;

                continuar = cap.read(img) && !closed;
            }
        }
    }

    if (Video.videoAtual == null) {
        JOptionPane.showMessageDialog(null, "Selecione um arquivo de video!", "Nenhum vdeo selecionado",
                JOptionPane.WARNING_MESSAGE);
        Video.abrirVideo();
    }

    threadSegmentar t = new threadSegmentar();
    t.start();
}

From source file:Reconhecimento.Regua.java

public static void segmentarRegua() {

    long tempoInicio = System.currentTimeMillis();

    // coordenadas do retangulo de selecao 
    int x0 = TelaSegmentarRegua.localizarReguaPanel1.x0;
    int y0 = TelaSegmentarRegua.localizarReguaPanel1.y0;
    int x = TelaSegmentarRegua.localizarReguaPanel1.xf;
    int y = TelaSegmentarRegua.localizarReguaPanel1.yf;

    if (x0 > x) {
        int aux = x0;
        x0 = x;//w  ww.  j  a  v a 2s.  c  o  m
        x = aux;
    }

    if (y0 > y) {
        int aux = y0;
        y0 = y;
        y = aux;
    }

    Mat bigImage = Highgui.imread(TelaSegmentarRegua.localizarReguaPanel1.imagem);
    // cortar imagem de acordo com a selecao
    Mat img = new Mat(bigImage, new Rect(x0, y0, x - x0, y - y0));

    Mat grayImg = new Mat();
    // passar imagem para tons de cinza
    Imgproc.cvtColor(img, grayImg, Imgproc.COLOR_BGR2GRAY);
    // limiarizacao 
    Imgproc.threshold(grayImg, grayImg, 190, 255, THRESH_BINARY_INV);
    Core.bitwise_not(grayImg, grayImg);

    List<Point> pontos = new ArrayList<Point>();

    // adicionar todos os pontos da referentes a regua em um vetor
    for (int i = 0; i < grayImg.rows(); i++) {
        for (int j = 0; j < grayImg.cols(); j++) {
            if (Arrays.toString(grayImg.get(i, j)).equals("[255.0]")) {
                pontos.add(new Point(j, i));
                Core.line(img, new Point(j, i), new Point(j, i), new Scalar(255, 0, 0));
            }
        }
    }

    String filename = "imagens/regua_segmentada" + Math.random() * 1000 + ".jpg";

    Mat img2 = new Mat();
    Imgproc.resize(img, img2, new Size(img.size().width * 3.0, img.size().height * 3.0));
    Highgui.imwrite(filename, img2);

    int xMin = 5000, yMin = 5000;
    int xMax = 0, yMax = 0;

    // pontos extremos da regua
    for (Point ponto : pontos) {
        if (ponto.x > xMax) {
            xMax = (int) ponto.x;
        }
        if (ponto.x < xMin) {
            xMin = (int) ponto.x;
        }
        if (ponto.y > yMax) {
            yMax = (int) ponto.y;
        }
        if (ponto.y < yMin) {
            yMin = (int) ponto.y;
        }
    }

    // regua na posicao horizontal
    if (xMax - xMin > yMax - yMin) {
        /*
        a proporcao da imagem utilizada no processamento torna necessario
        a multiplicacao por 2 para manter a proporcao das medidas 
        */
        larguraPixels = (xMax - xMin) * 2;
    }
    // regua na posicao vertical
    else {
        larguraPixels = (yMax - yMin) * 2;
    }

    long tempoFim = System.currentTimeMillis() - tempoInicio;

    centimetrosPorPixel = 30.0 / larguraPixels;

    TelaSegmentarRegua2 telaResposta = new TelaSegmentarRegua2();
    telaResposta.jLabel1.setIcon(new ImageIcon(filename));
    telaResposta.jLabel4.setText(larguraPixels + " pixels");
    telaResposta.jLabel5.setText(String.valueOf(centimetrosPorPixel).substring(0, 5));
    telaResposta.jLabel7.setText(tempoFim + " ms");
    telaResposta.setDefaultCloseOperation(WindowConstants.DISPOSE_ON_CLOSE);
    telaResposta.setLocation(200, 200);
    telaResposta.setVisible(true);

}

From source file:Reconhecimento.SaltoSegmentacaoCirculo.java

private static Point segmentarCirculo(Mat img) {
    Mat grayImg = new Mat();
    Mat circles = new Mat();
    Mat element = new Mat();

    Imgproc.cvtColor(img, grayImg, Imgproc.COLOR_BGR2GRAY);

    // filtro da mediana
    Imgproc.medianBlur(grayImg, grayImg, 5);

    // transformada circular de hough
    Imgproc.HoughCircles(grayImg, circles, Imgproc.CV_HOUGH_GRADIENT, 1, 100, 220, 17, 5, 10);

    Point centro = new Point(0, 0);
    Point center;//from w  ww . j  ava  2 s  . c  om

    for (int x = 0; x < circles.cols(); x++) {
        double vCircle[] = circles.get(0, x);

        center = new Point(Math.round(vCircle[0]), Math.round(vCircle[1]));

        // pegar o circulo mais embaixo
        if (centro.y < center.y) {
            centro = center.clone();
        }

        int radius = (int) Math.round(vCircle[2]);

        Core.circle(img, center, radius, new Scalar(0, 0, 255), 3, 8, 0);
    }
    return centro;
}

From source file:saliency.saliency.java

/**
 * @param args the command line arguments
 *//*from ww w . j av  a 2  s .  c  o m*/
public static void main(String[] args) {
    // TODO code application logic here

    System.loadLibrary(Core.NATIVE_LIBRARY_NAME);

    Mat input_img = imread("input_img/sea.jpg");
    //fot temp test start
    Imgproc.resize(input_img, input_img, new Size(1980, 1080), 0, 0, Imgproc.INTER_LINEAR);
    //fot temp test end
    if (input_img.cols() == 0) {
        return;
    }

    //benchmark
    ///////////////////////step 1 : Extraction of Early Visual Deatures///////////////////////////////
    //intensity image: intensity_img
    Mat intensity_img = new Mat(input_img.rows(), input_img.cols(), CV_16UC1);
    //intensity = (R+G+B)/3
    int img_width = intensity_img.cols();
    int img_height = intensity_img.rows();
    int x, y;
    int i, c, s;
    int max_intensity = 0;
    for (x = 0; x < img_width; x++) {
        for (y = 0; y < img_height; y++) {
            int temp_intensity = ((int) input_img.get(y, x)[0] + (int) input_img.get(y, x)[1]
                    + (int) input_img.get(y, x)[2]) / 3;
            intensity_img.put(y, x, temp_intensity);
            if (max_intensity < temp_intensity) {
                max_intensity = temp_intensity;
            }
        }
    }
    //create Guassian pyramid for intensity
    Mat[] i_gaussian_pyramid = new Mat[9];
    i_gaussian_pyramid[0] = intensity_img.clone();
    for (i = 0; i < 8; i++) {
        i_gaussian_pyramid[i + 1] = i_gaussian_pyramid[i].clone();
        Imgproc.pyrDown(i_gaussian_pyramid[i + 1], i_gaussian_pyramid[i + 1], new Size());
    }

    //create intensity feature map using center-surround differences
    Mat[][] intensity_feature_map = new Mat[3][2];
    for (c = 0; c < 3; c++) {
        for (s = 0; s < 2; s++) {
            intensity_feature_map[c][s] = center_surround.main(i_gaussian_pyramid[c + 2],
                    i_gaussian_pyramid[s + c + 5], 0);
        }
    }
    //benchmark
    //imwrite("intensity_feature_map_00.bmp", intensity_feature_map[0][0]);
    //get normalized color image by I.
    Mat norm_input_img = input_img.clone();
    norm_input_img.convertTo(norm_input_img, CV_64F);
    for (x = 0; x < img_width; x++) {
        for (y = 0; y < img_height; y++) {
            //normalization is only applied at the locations where I is larger than 1/10 of its maximum over entire image
            double[] temp = new double[3];
            if (intensity_img.get(y, x)[0] > (max_intensity / 10)) {
                temp[0] = norm_input_img.get(y, x)[0] / intensity_img.get(y, x)[0];
                temp[1] = norm_input_img.get(y, x)[1] / intensity_img.get(y, x)[0];
                temp[2] = norm_input_img.get(y, x)[2] / intensity_img.get(y, x)[0];
                norm_input_img.put(y, x, temp);
            } else {
                temp[0] = 0;
                temp[1] = 0;
                temp[2] = 0;
                norm_input_img.put(y, x, temp);
            }
        }
    }
    //get R G B Y(Yellow) single color channel images
    Mat r_img = new Mat(input_img.rows(), input_img.cols(), CV_64FC1);
    Mat g_img = new Mat(input_img.rows(), input_img.cols(), CV_64FC1);
    Mat b_img = new Mat(input_img.rows(), input_img.cols(), CV_64FC1);
    Mat y_img = new Mat(input_img.rows(), input_img.cols(), CV_64FC1);
    //[0]: b [1]:g [2]:r
    for (x = 0; x < img_width; x++) {
        for (y = 0; y < img_height; y++) {
            //R = min(0,r-(g+b)/2)
            double temp_chroma = max(0, (norm_input_img.get(y, x)[2]
                    - (norm_input_img.get(y, x)[1] + norm_input_img.get(y, x)[0]) / 2));
            r_img.put(y, x, temp_chroma);
            //G = max(0,g-(r+b)/2)
            temp_chroma = max(0, (norm_input_img.get(y, x)[1]
                    - (norm_input_img.get(y, x)[2] + norm_input_img.get(y, x)[0]) / 2));
            g_img.put(y, x, temp_chroma);
            //B = max(0,b-(r+g)/2)
            temp_chroma = max(0, (norm_input_img.get(y, x)[0]
                    - (norm_input_img.get(y, x)[2] + norm_input_img.get(y, x)[1]) / 2));
            b_img.put(y, x, temp_chroma);
            //Y = max(0,(r+g)/2-|r-g|/2-b)
            temp_chroma = max(0,
                    ((norm_input_img.get(y, x)[2] + norm_input_img.get(y, x)[1]) / 2
                            - abs(norm_input_img.get(y, x)[2] + norm_input_img.get(y, x)[1]) / 2
                            - norm_input_img.get(y, x)[0]));
            y_img.put(y, x, temp_chroma);
        }
    }
    //create Gaussian pyramid for 4 color channels
    Mat[] b_gaussian_pyramid = new Mat[9];
    b_gaussian_pyramid[0] = b_img.clone();
    for (i = 0; i < 8; i++) {
        b_gaussian_pyramid[i + 1] = b_gaussian_pyramid[i].clone();
        Imgproc.pyrDown(b_gaussian_pyramid[i + 1], b_gaussian_pyramid[i + 1], new Size());
    }
    Mat[] g_gaussian_pyramid = new Mat[9];
    g_gaussian_pyramid[0] = g_img.clone();
    for (i = 0; i < 8; i++) {
        g_gaussian_pyramid[i + 1] = g_gaussian_pyramid[i].clone();
        Imgproc.pyrDown(g_gaussian_pyramid[i + 1], g_gaussian_pyramid[i + 1], new Size());
    }
    Mat[] r_gaussian_pyramid = new Mat[9];
    r_gaussian_pyramid[0] = r_img.clone();
    for (i = 0; i < 8; i++) {
        r_gaussian_pyramid[i + 1] = r_gaussian_pyramid[i].clone();
        Imgproc.pyrDown(r_gaussian_pyramid[i + 1], r_gaussian_pyramid[i + 1], new Size());
    }
    Mat[] y_gaussian_pyramid = new Mat[9];
    y_gaussian_pyramid[0] = y_img.clone();
    for (i = 0; i < 8; i++) {
        y_gaussian_pyramid[i + 1] = y_gaussian_pyramid[i].clone();
        Imgproc.pyrDown(y_gaussian_pyramid[i + 1], y_gaussian_pyramid[i + 1], new Size());
    }
    //create color feature map using center-surround differences
    //RG(c,s) = |(R(c)-G(c))(-)(G(c)-R(c))|
    Mat[][] rg_feature_map = new Mat[3][2];
    for (c = 0; c < 3; c++) {
        for (s = 0; s < 2; s++) {
            Mat r_minus_g = r_gaussian_pyramid[c + 2].clone();
            Core.subtract(r_gaussian_pyramid[c + 2], g_gaussian_pyramid[c + 2], r_minus_g);
            Mat g_minus_r = g_gaussian_pyramid[s + c + 5].clone();
            Core.subtract(g_gaussian_pyramid[s + c + 5], r_gaussian_pyramid[s + c + 5], g_minus_r);
            rg_feature_map[c][s] = center_surround.main(r_minus_g, g_minus_r, 1);
        }
    }
    //BY(c,s) = |(B(c)-Y(c))(-)(Y(c)-B(c))|
    Mat[][] by_feature_map = new Mat[3][2];
    for (c = 0; c < 3; c++) {
        for (s = 0; s < 2; s++) {
            Mat b_minus_g = b_gaussian_pyramid[c + 2].clone();
            Core.subtract(b_gaussian_pyramid[c + 2], y_gaussian_pyramid[c + 2], b_minus_g);
            Mat y_minus_b = y_gaussian_pyramid[s + c + 5].clone();
            Core.subtract(y_gaussian_pyramid[s + c + 5], b_gaussian_pyramid[s + c + 5], y_minus_b);
            by_feature_map[c][s] = center_surround.main(b_minus_g, y_minus_b, 1);
        }
    }
    //benchmark
    //create oriented Gabor pyramid from intensity image
    int kernel_size = 10;//31;//adjust value according to reference
    double sigma = 3;//default:  = 0.56 .  the larger , the support of the Gabor function  and the number of visible parallel excitatory and inhibitory stripe zones increases.
    double[] theta = new double[4];
    theta[0] = 0;
    theta[1] = Math.PI / 4;
    theta[2] = Math.PI / 2;
    theta[3] = Math.PI * 3 / 4;
    double lambda = 5;//36; minimum 3
    double gamma = 0.5;//0.02;
    // double psi = 0;
    Mat[][] gabor_pyramid = new Mat[4][9];
    int theta_index;
    for (theta_index = 0; theta_index < 4; theta_index++) {
        Mat gabor_kernel = Imgproc.getGaborKernel(new Size(kernel_size, kernel_size), sigma, theta[theta_index],
                lambda, gamma);
        //gabor_pyramid[theta_index][0] = intensity_img.clone();
        for (i = 0; i < 9; i++) {
            //gabor_pyramid[theta_index][i] = gabor_pyramid[theta_index][i].clone();
            gabor_pyramid[theta_index][i] = i_gaussian_pyramid[i].clone();
            Imgproc.filter2D(i_gaussian_pyramid[i], gabor_pyramid[theta_index][i], -1, gabor_kernel);
            //Imgproc.resize(gabor_pyramid[theta_index][i], gabor_pyramid[theta_index][i], new Size(), 0.5, 0.5, Imgproc.INTER_AREA);
        }
    }
    //imwrite("gabor_pyramid_01.bmp", gabor_pyramid[0][1]);
    //imwrite("gabor_pyramid_11.bmp", gabor_pyramid[1][1]);
    //imwrite("gabor_pyramid_21.bmp", gabor_pyramid[2][1]);
    //imwrite("gabor_pyramid_31.bmp", gabor_pyramid[3][1]);
    //imwrite("gabor_pyramid_03.bmp", gabor_pyramid[0][3]);
    //get orientation feature map using center-surround differences
    Mat[][][] orientation_feature_map = new Mat[4][3][2];
    for (theta_index = 0; theta_index < 4; theta_index++) {
        for (c = 0; c < 3; c++) {
            for (s = 0; s < 2; s++) {
                orientation_feature_map[theta_index][c][s] = center_surround
                        .main(gabor_pyramid[theta_index][c + 2], gabor_pyramid[theta_index][s + c + 5], 0);
            }
        }
    }
    //benchmark
    //imwrite("orientation_test_00.bmp", orientation_feature_map[0][0][0]);
    ///////////////////////step 2 : the saliency map///////////////////////////////
    //get intensity conspicuity map
    Mat intensity_conspicuity_map = Mat.zeros(intensity_feature_map[2][0].size(), CV_16UC1);
    for (c = 0; c < 3; c++) {
        for (s = 0; s < 2; s++) {
            Mat norm_out = map_norm.main(intensity_feature_map[c][s]);
            Mat resized_feature_map = Mat.zeros(intensity_feature_map[2][0].size(), CV_16UC1);
            Imgproc.resize(norm_out, resized_feature_map, intensity_feature_map[2][0].size(), 0, 0,
                    Imgproc.INTER_LINEAR);
            Core.addWeighted(intensity_conspicuity_map, 1, resized_feature_map, 1.0 / 6, 0,
                    intensity_conspicuity_map);
            /*if(c == 0 && s == 0){
            imwrite("in.bmp", intensity_feature_map[c][s]);
            imwrite("map_norm.bmp",norm_out);
            imwrite("resized_feature_map.bmp", resized_feature_map);
            }*/
        }
    }
    //benchmark
    //Core.normalize(intensity_conspicuity_map, intensity_conspicuity_map, 0, 255, Core.NORM_MINMAX);
    //imwrite("intensity_conspicuity_map.bmp", intensity_conspicuity_map);
    //get color conspicuity map
    for (c = 0; c < 3; c++) {
        for (s = 0; s < 2; s++) {
            Core.normalize(rg_feature_map[c][s], rg_feature_map[c][s], 0, 255, Core.NORM_MINMAX);
            rg_feature_map[c][s].convertTo(rg_feature_map[c][s], CV_16UC1);
            Core.normalize(by_feature_map[c][s], by_feature_map[c][s], 0, 255, Core.NORM_MINMAX);
            by_feature_map[c][s].convertTo(by_feature_map[c][s], CV_16UC1);
        }
    }
    //imwrite("test_rg.bmp",rg_feature_map[0][0]);      
    Mat color_conspicuity_map = Mat.zeros(rg_feature_map[2][0].size(), CV_16UC1);
    for (c = 0; c < 3; c++) {
        for (s = 0; s < 2; s++) {
            Mat norm_out = map_norm.main(rg_feature_map[c][s]);
            Mat resized_feature_map = Mat.zeros(rg_feature_map[2][0].size(), CV_16UC1);
            Imgproc.resize(norm_out, resized_feature_map, rg_feature_map[2][0].size(), 0, 0,
                    Imgproc.INTER_LINEAR);
            Core.addWeighted(color_conspicuity_map, 1, resized_feature_map, 1.0 / 12, 0, color_conspicuity_map);
            norm_out = map_norm.main(by_feature_map[c][s]);
            resized_feature_map = Mat.zeros(by_feature_map[2][0].size(), CV_16UC1);
            Imgproc.resize(norm_out, resized_feature_map, by_feature_map[2][0].size(), 0, 0,
                    Imgproc.INTER_LINEAR);
            Core.addWeighted(color_conspicuity_map, 1, resized_feature_map, 1.0 / 12, 0, color_conspicuity_map);
        }
    }
    //benchmark
    //get orientation conspicuity map
    Mat orientation_conspicuity_map_0 = Mat.zeros(orientation_feature_map[0][2][0].size(), CV_16UC1);
    for (c = 0; c < 3; c++) {
        for (s = 0; s < 2; s++) {
            Mat norm_out = map_norm.main(orientation_feature_map[0][c][s]);
            Mat resized_feature_map = Mat.zeros(orientation_feature_map[0][2][0].size(), CV_16UC1);
            Imgproc.resize(norm_out, resized_feature_map, orientation_feature_map[0][2][0].size(), 0, 0,
                    Imgproc.INTER_LINEAR);
            Core.addWeighted(orientation_conspicuity_map_0, 1, resized_feature_map, 1.0 / 6, 0,
                    orientation_conspicuity_map_0);
        }
    }

    Mat orientation_conspicuity_map_1 = Mat.zeros(orientation_feature_map[1][2][0].size(), CV_16UC1);
    for (c = 0; c < 3; c++) {
        for (s = 0; s < 2; s++) {
            Mat norm_out = map_norm.main(orientation_feature_map[1][c][s]);
            Mat resized_feature_map = Mat.zeros(orientation_feature_map[1][2][0].size(), CV_16UC1);
            Imgproc.resize(norm_out, resized_feature_map, orientation_feature_map[1][2][0].size(), 0, 0,
                    Imgproc.INTER_LINEAR);
            Core.addWeighted(orientation_conspicuity_map_1, 1, resized_feature_map, 1.0 / 6, 0,
                    orientation_conspicuity_map_1);
        }
    }
    Mat orientation_conspicuity_map_2 = Mat.zeros(orientation_feature_map[2][2][0].size(), CV_16UC1);
    for (c = 0; c < 3; c++) {
        for (s = 0; s < 2; s++) {
            Mat norm_out = map_norm.main(orientation_feature_map[2][c][s]);
            Mat resized_feature_map = Mat.zeros(orientation_feature_map[2][2][0].size(), CV_16UC1);
            Imgproc.resize(norm_out, resized_feature_map, orientation_feature_map[2][2][0].size(), 0, 0,
                    Imgproc.INTER_LINEAR);
            Core.addWeighted(orientation_conspicuity_map_2, 1, resized_feature_map, 1.0 / 6, 0,
                    orientation_conspicuity_map_2);
        }
    }
    Mat orientation_conspicuity_map_3 = Mat.zeros(orientation_feature_map[3][2][0].size(), CV_16UC1);
    for (c = 0; c < 3; c++) {
        for (s = 0; s < 2; s++) {
            Mat norm_out = map_norm.main(orientation_feature_map[3][c][s]);
            Mat resized_feature_map = Mat.zeros(orientation_feature_map[3][2][0].size(), CV_16UC1);
            Imgproc.resize(norm_out, resized_feature_map, orientation_feature_map[3][2][0].size(), 0, 0,
                    Imgproc.INTER_LINEAR);
            Core.addWeighted(orientation_conspicuity_map_3, 1, resized_feature_map, 1.0 / 6, 0,
                    orientation_conspicuity_map_3);
        }
    }
    Mat orientation_conspicuity_map = Mat.zeros(orientation_feature_map[0][2][0].size(), CV_16UC1);
    Core.addWeighted(orientation_conspicuity_map, 1, map_norm.main(orientation_conspicuity_map_0), 1.0 / 4, 0,
            orientation_conspicuity_map);
    Core.addWeighted(orientation_conspicuity_map, 1, map_norm.main(orientation_conspicuity_map_1), 1.0 / 4, 0,
            orientation_conspicuity_map);
    Core.addWeighted(orientation_conspicuity_map, 1, map_norm.main(orientation_conspicuity_map_2), 1.0 / 4, 0,
            orientation_conspicuity_map);
    Core.addWeighted(orientation_conspicuity_map, 1, map_norm.main(orientation_conspicuity_map_3), 1.0 / 4, 0,
            orientation_conspicuity_map);
    //benchmark
    Mat saliency = Mat.zeros(intensity_conspicuity_map.size(), CV_16UC1);
    Core.addWeighted(saliency, 1, map_norm.main(intensity_conspicuity_map), 1.0 / 3, 0, saliency);
    Core.addWeighted(saliency, 1, map_norm.main(color_conspicuity_map), 1.0 / 3, 0, saliency);
    Core.addWeighted(saliency, 1, map_norm.main(orientation_conspicuity_map), 1.0 / 3, 0, saliency);
    //benchmark
    Core.normalize(saliency, saliency, 0, 255, Core.NORM_MINMAX);
    //fot temp test
    Imgproc.resize(saliency, saliency, new Size(720, 480), 0, 0, Imgproc.INTER_LINEAR);
    imwrite("saliency.bmp", saliency);

    Core.normalize(intensity_conspicuity_map, intensity_conspicuity_map, 0, 255, Core.NORM_MINMAX);
    Imgproc.resize(intensity_conspicuity_map, intensity_conspicuity_map, new Size(720, 480), 0, 0,
            Imgproc.INTER_LINEAR);
    imwrite("intensity_conspicuity_map.bmp", intensity_conspicuity_map);
    Core.normalize(color_conspicuity_map, color_conspicuity_map, 0, 255, Core.NORM_MINMAX);
    Imgproc.resize(color_conspicuity_map, color_conspicuity_map, new Size(720, 480), 0, 0,
            Imgproc.INTER_LINEAR);
    imwrite("color_conspicuity_map.bmp", color_conspicuity_map);
    Core.normalize(orientation_conspicuity_map, orientation_conspicuity_map, 0, 255, Core.NORM_MINMAX);
    Imgproc.resize(orientation_conspicuity_map, orientation_conspicuity_map, new Size(720, 480), 0, 0,
            Imgproc.INTER_LINEAR);
    imwrite("orientation_conspicuity_map.bmp", orientation_conspicuity_map);
    Imgproc.resize(input_img, input_img, new Size(720, 480), 0, 0, Imgproc.INTER_LINEAR);
    imwrite("input_img.bmp", input_img);
    //for testing algorithm
    /*
    Mat temp1 = Mat.zeros(intensity_conspicuity_map.size(), CV_16UC1);
    temp1 = map_norm.main(intensity_conspicuity_map);
    Core.normalize(temp1, temp1, 0, 255, Core.NORM_MINMAX);
    Imgproc.resize(temp1, temp1, new Size(720,480), 0, 0, Imgproc.INTER_LINEAR);
    imwrite("intensity.bmp", temp1);
    temp1 = map_norm.main(color_conspicuity_map);
    Core.normalize(temp1, temp1, 0, 255, Core.NORM_MINMAX);
    Imgproc.resize(temp1, temp1, new Size(720,480), 0, 0, Imgproc.INTER_LINEAR);
    imwrite("color.bmp", temp1);
    temp1 = map_norm.main(orientation_conspicuity_map);
    Core.normalize(temp1, temp1, 0, 255, Core.NORM_MINMAX);
    Imgproc.resize(temp1, temp1, new Size(720,480), 0, 0, Imgproc.INTER_LINEAR);
    imwrite("orientation.bmp", temp1);
            
    Mat temp2 = Mat.zeros(orientation_conspicuity_map_0.size(), CV_16UC1);
    temp2 = map_norm.main(orientation_conspicuity_map_0);
    Core.normalize(temp2, temp2, 0, 255, Core.NORM_MINMAX);
    Imgproc.resize(temp2, temp2, new Size(720,480), 0, 0, Imgproc.INTER_LINEAR);
    imwrite("orientation_conspicuity_map_0.bmp", temp2);
    temp2 = map_norm.main(orientation_conspicuity_map_1);
    Core.normalize(temp2, temp2, 0, 255, Core.NORM_MINMAX);
    Imgproc.resize(temp2, temp2, new Size(720,480), 0, 0, Imgproc.INTER_LINEAR);
    imwrite("orientation_conspicuity_map_1.bmp", temp2);
    temp2 = map_norm.main(orientation_conspicuity_map_2);
    Core.normalize(temp2, temp2, 0, 255, Core.NORM_MINMAX);
    Imgproc.resize(temp2, temp2, new Size(720,480), 0, 0, Imgproc.INTER_LINEAR);
    imwrite("orientation_conspicuity_map_2.bmp", temp2);
    temp2 = map_norm.main(orientation_conspicuity_map_3);
    Core.normalize(temp2, temp2, 0, 255, Core.NORM_MINMAX);
    Imgproc.resize(temp2, temp2, new Size(720,480), 0, 0, Imgproc.INTER_LINEAR);
    imwrite("orientation_conspicuity_map_3.bmp", temp2);
    */
}

From source file:samples.FtcTestOpenCv.java

License:Open Source License

/**
 * This method combines an overlay image to the given background image at the specified location.
 * It is expecting both the background and overlay are color images. It also expects the overlay
 * image contains an alpha channel for opacity information.
 *
 * @param background specifies the background image.
 * @param overlay specifies the overlay image.
 * @param locX specifies the X location on the background image where the upper left corner of the overlay
 *        image should be at/*from  w  w w. ja  va2 s.com*/
 * @param locY specifies the Y location on the backgorund image where the upper left corner of the overlay
 *        image should be at.
 */
private void combineImage(Mat background, Mat overlay, int locX, int locY) {
    //
    // Make sure the background image has at least 3 channels and the overlay image has
    // at least 4 channels.
    //
    if (background.channels() >= 3 && overlay.channels() >= 4) {
        //
        // For each row of the overlay image.
        //
        for (int row = 0; row < overlay.rows(); row++) {
            //
            // Calculate the corresponding row number of the background image.
            // Skip the row if it is outside of the background image.
            //
            int destRow = locY + row;
            if (destRow < 0 || destRow >= background.rows())
                continue;
            //
            // For each column of the overlay image.
            //
            for (int col = 0; col < overlay.cols(); col++) {
                //
                // Calculate the corresponding column number of background image.
                // Skip the column if it is outside of the background image.
                //
                int destCol = locX + col;
                if (destCol < 0 || destCol >= background.cols())
                    continue;
                //
                // Get the source pixel from the overlay image and the destination pixel from the
                // background image. Calculate the opacity as a percentage.
                //
                double[] srcPixel = overlay.get(row, col);
                double[] destPixel = background.get(destRow, destCol);
                double opacity = srcPixel[3] / 255.0;
                //
                // Merge the source pixel to the destination pixel with the proper opacity.
                // Each color pixel consists of 3 channels: BGR (Blue, Green, Red).
                // The fourth channel is opacity and is only applicable for the overlay image.
                //
                for (int channel = 0; channel < 3; channel++) {
                    destPixel[channel] = destPixel[channel] * (1.0 - opacity) + srcPixel[channel] * opacity;
                }
                //
                // Put the resulting pixel into the background image.
                //
                background.put(destRow, destCol, destPixel);
            }
        }
    } else {
        throw new RuntimeException(
                "Invalid image format (src=" + overlay.channels() + ",dest=" + background.channels() + ").");
    }
}

From source file:SearchSystem.SearchingAlgorithms.CBIRSearchAlgorithm.java

@Override
public byte extractFeatures(Mat image, String outputFile) {
    int rows = image.rows();
    int cols = image.cols();
    int blockHeigth = rows / numberBlocksHeigth;
    int blockWidth = cols / numberBlocksWidth;
    int sizeOfBlocks = blockHeigth * blockWidth;
    rows = numberBlocksHeigth * blockHeigth;
    cols = numberBlocksWidth * blockWidth;
    Imgproc.resize(image, image, new Size(cols, rows));

    double[] vectors = new double[numberBlocksWidth * numberBlocksHeigth * 6]; // 3 channels, average and variance for each
    int counter = 0;

    for (int i = 0; i < rows; i += blockHeigth)
        for (int j = 0; j < cols; j += blockWidth) {
            for (int ii = i; ii < i + blockHeigth; ++ii)
                for (int jj = j; jj < j + blockWidth; ++jj) {
                    double pixel[] = image.get(ii, jj);
                    vectors[counter] += pixel[0]; // H mean
                    vectors[counter + 1] += pixel[1]; // S mean
                    vectors[counter + 2] += pixel[2]; // V mean
                }//from  w w  w .  ja va  2 s . c  om
            vectors[counter] /= sizeOfBlocks; // H mean
            vectors[counter + 1] /= sizeOfBlocks; // S mean
            vectors[counter + 2] /= sizeOfBlocks; // V mean

            for (int ii = i; ii < i + blockHeigth; ++ii)
                for (int jj = j; jj < j + blockWidth; ++jj) {
                    double pixel[] = image.get(ii, jj);
                    vectors[counter + 3] += Math.pow(pixel[0] - vectors[counter], 2); // H variation
                    vectors[counter + 4] += Math.pow(pixel[1] - vectors[counter + 1], 2); // S variation
                    vectors[counter + 5] += Math.pow(pixel[2] - vectors[counter + 2], 2); // V variation
                }
            vectors[counter + 3] = Math.sqrt(vectors[counter + 3] / sizeOfBlocks); // H variation
            vectors[counter + 4] = Math.sqrt(vectors[counter + 4] / sizeOfBlocks); // S variation
            vectors[counter + 5] = Math.sqrt(vectors[counter + 5] / sizeOfBlocks); // V variation

            counter += 6;
        }

    writeVectorToFile(vectors, outputFile);
    image.release();
    return 1;
}

From source file:SearchSystem.SearchingAlgorithms.CBIRSearchAlgorithm.java

public byte extractFeaturesHUE(Mat image, String outputFile) {
    int rows = image.rows();
    int cols = image.cols();
    int blockHeigth = rows / numberBlocksHeigth;
    int blockWidth = cols / numberBlocksWidth;
    int sizeOfBlocks = blockHeigth * blockWidth;
    rows = numberBlocksHeigth * blockHeigth;
    cols = numberBlocksWidth * blockWidth;
    Imgproc.resize(image, image, new Size(cols, rows));
    Imgproc.cvtColor(image, image, Imgproc.COLOR_BGR2HSV);

    double[] vectors = new double[numberBlocksWidth * numberBlocksHeigth * 6]; // 3 channels, average and variance for each
    int counter = 0;

    double Hmean = 0;
    double Smean = 0;
    double Vmean = 0;

    for (int i = 0; i < rows; i += blockHeigth)
        for (int j = 0; j < cols; j += blockWidth) {
            double pixel[] = image.get(i, j);
            for (int ii = i; ii < i + blockHeigth; ++ii)
                for (int jj = j; jj < j + blockWidth; ++jj) {
                    pixel = image.get(ii, jj);
                    if (vectors[counter] < pixel[0])
                        vectors[counter] = pixel[0]; // H max
                    if (vectors[counter + 1] < pixel[1])
                        vectors[counter + 1] = pixel[1]; // H max
                    if (vectors[counter + 2] < pixel[2])
                        vectors[counter + 2] = pixel[1]; // H max

                    Hmean += pixel[0]; // H mean
                    Smean += pixel[1]; // S mean
                    Vmean += pixel[2]; // V mean
                }//from  w  ww  .ja v  a 2s  . c  om
            vectors[counter] *= 2; // OpenCV scales H to H/2 to fit uchar;

            Hmean = Hmean * 2 / sizeOfBlocks; // H mean
            Smean /= sizeOfBlocks; // S mean
            Vmean /= sizeOfBlocks; // V mean

            for (int ii = i; ii < i + blockHeigth; ++ii)
                for (int jj = j; jj < j + blockWidth; ++jj) {
                    pixel = image.get(ii, jj);
                    vectors[counter + 3] += Math.pow(pixel[0] * 2 - Hmean, 2); // H variation
                    vectors[counter + 4] += Math.pow(pixel[1] - Smean, 2); // S variation
                    vectors[counter + 5] += Math.pow(pixel[2] - Vmean, 2); // V variation
                }
            vectors[counter + 3] = Math.sqrt(vectors[counter + 3] / sizeOfBlocks); // H variation
            vectors[counter + 4] = Math.sqrt(vectors[counter + 4] / sizeOfBlocks); // S variation
            vectors[counter + 5] = Math.sqrt(vectors[counter + 5] / sizeOfBlocks); // V variation

            counter += 6;
        }

    writeVectorToFile(vectors, outputFile);
    image.release();
    return 1;
}

From source file:SearchSystem.SearchingAlgorithms.CBIRSearchAlgorithm.java

public double[] extractFeaturesHUE(Mat image) {
    int rows = image.rows();
    int cols = image.cols();
    int blockHeigth = rows / numberBlocksHeigth;
    int blockWidth = cols / numberBlocksWidth;
    int sizeOfBlocks = blockHeigth * blockWidth;
    rows = numberBlocksHeigth * blockHeigth;
    cols = numberBlocksWidth * blockWidth;
    Imgproc.resize(image, image, new Size(cols, rows));
    Imgproc.cvtColor(image, image, Imgproc.COLOR_BGR2HSV);

    double[] vectors = new double[numberBlocksWidth * numberBlocksHeigth * 6]; // 3 channels, average and variance for each
    int counter = 0;

    double Hmean = 0;
    double Smean = 0;
    double Vmean = 0;

    for (int i = 0; i < rows; i += blockHeigth)
        for (int j = 0; j < cols; j += blockWidth) {
            double pixel[] = image.get(i, j);
            for (int ii = i; ii < i + blockHeigth; ++ii)
                for (int jj = j; jj < j + blockWidth; ++jj) {
                    pixel = image.get(ii, jj);
                    if (vectors[counter] < pixel[0])
                        vectors[counter] = pixel[0]; // H max
                    if (vectors[counter + 1] < pixel[1])
                        vectors[counter + 1] = pixel[1]; // H max
                    if (vectors[counter + 2] < pixel[2])
                        vectors[counter + 2] = pixel[1]; // H max

                    Hmean += pixel[0]; // H mean
                    Smean += pixel[1]; // S mean
                    Vmean += pixel[2]; // V mean
                }//from   w  w w .j  av  a2  s  .  c om
            vectors[counter] *= 2; // OpenCV scales H to H/2 to fit uchar;

            Hmean = Hmean * 2 / sizeOfBlocks; // H mean
            Smean /= sizeOfBlocks; // S mean
            Vmean /= sizeOfBlocks; // V mean

            for (int ii = i; ii < i + blockHeigth; ++ii)
                for (int jj = j; jj < j + blockWidth; ++jj) {
                    pixel = image.get(ii, jj);
                    vectors[counter + 3] += Math.pow(pixel[0] * 2 - Hmean, 2); // H variation
                    vectors[counter + 4] += Math.pow(pixel[1] - Smean, 2); // S variation
                    vectors[counter + 5] += Math.pow(pixel[2] - Vmean, 2); // V variation
                }
            vectors[counter + 3] = Math.sqrt(vectors[counter + 3] / sizeOfBlocks); // H variation
            vectors[counter + 4] = Math.sqrt(vectors[counter + 4] / sizeOfBlocks); // S variation
            vectors[counter + 5] = Math.sqrt(vectors[counter + 5] / sizeOfBlocks); // V variation

            counter += 6;
        }

    image.release();
    return vectors;
}