Example usage for org.opencv.core Mat reshape

List of usage examples for org.opencv.core Mat reshape

Introduction

In this page you can find the example usage for org.opencv.core Mat reshape.

Prototype

public Mat reshape(int cn, int rows) 

Source Link

Usage

From source file:ch.zhaw.facerecognitionlibrary.Recognition.Eigenfaces.java

License:Open Source License

public String recognize(Mat img, String expectedLabel) {
    // Ignore// w w w.  jav  a2  s . c  om
    img = img.reshape(1, 1);
    // Subtract mean
    img.convertTo(img, CvType.CV_32F);
    Core.subtract(img, Psi, img);
    // Project to subspace
    Mat projected = getFeatureVector(img);
    // Save all points of image for tSNE
    img.convertTo(img, CvType.CV_8U);
    addImage(projected, expectedLabel, true);
    //addImage(projected, expectedLabel);
    Mat distance = new Mat(Omega.rows(), 1, CvType.CV_64FC1);
    for (int i = 0; i < Omega.rows(); i++) {
        double dist = Core.norm(projected.row(0), Omega.row(i), Core.NORM_L2);
        distance.put(i, 0, dist);
    }
    Mat sortedDist = new Mat(Omega.rows(), 1, CvType.CV_8UC1);
    Core.sortIdx(distance, sortedDist, Core.SORT_EVERY_COLUMN + Core.SORT_ASCENDING);
    // Give back the name of the found person
    int index = (int) (sortedDist.get(0, 0)[0]);
    return labelMap.getKey(labelList.get(index));
}

From source file:ch.zhaw.facerecognitionlibrary.Recognition.Eigenfaces.java

License:Open Source License

public void addImage(Mat img, String label, boolean featuresAlreadyExtracted) {
    // Ignore featuresAlreadyExtracted because with Eigenfaces all the original images are needed to extract the eigenfaces (feature vector)
    int iLabel = 0;
    if (method == TRAINING) {
        // Reshape image to have only 1 row, then add it to GammaList
        Gamma.push_back(img.reshape(1, 1));
        if (labelMap.containsKey(label)) {
            iLabel = labelMap.getValue(label);
        } else {/*www.j av  a2 s.  c o m*/
            iLabel = labelMap.size() + 1;
            labelMap.put(label, iLabel);
        }
        labelList.add(iLabel);
    } else {
        testList.push_back(img);
        if (labelMapTest.containsKey(label)) {
            iLabel = labelMapTest.getValue(label);
        } else {
            iLabel = labelMapTest.size() + 1;
            labelMapTest.put(label, iLabel);
        }
        labelListTest.add(iLabel);
    }

}

From source file:ch.zhaw.facerecognitionlibrary.Recognition.KNearestNeighbor.java

License:Open Source License

@Override
public void addImage(Mat img, String label, boolean featuresAlreadyExtracted) {
    // Ignore featuresAlreadyExtracted because either KNN get the features from TensorFlow or Caffe
    int iLabel = 0;
    if (method == TRAINING) {
        // Reshape image to have only 1 row, then add it to GammaList
        trainingList.push_back(img.reshape(1, 1));
        if (labelMap.containsKey(label)) {
            iLabel = labelMap.getValue(label);
        } else {/*  w ww . ja  v  a2 s. c o  m*/
            iLabel = labelMap.size() + 1;
            labelMap.put(label, iLabel);
        }
        labelList.add(iLabel);
    } else {
        testList.push_back(img);
        if (labelMapTest.containsKey(label)) {
            iLabel = labelMapTest.getValue(label);
        } else {
            iLabel = labelMapTest.size() + 1;
            labelMapTest.put(label, iLabel);
        }
        labelListTest.add(iLabel);
    }
}

From source file:ch.zhaw.facerecognitionlibrary.Recognition.KNearestNeighbor.java

License:Open Source License

@Override
public Mat getFeatureVector(Mat img) {
    return img.reshape(1, 1);
}

From source file:ch.zhaw.facerecognitionlibrary.Recognition.SupportVectorMachine.java

License:Open Source License

public Mat getFeatureVector(Mat img) {
    return img.reshape(1, 1);
}

From source file:classes.TextRecognitionPreparer.java

public static Scalar cluster(Scalar userColor, Mat cutout, int k) {

    Mat samples = cutout.reshape(1, cutout.cols() * cutout.rows());
    Mat samples32f = new Mat();
    samples.convertTo(samples32f, CvType.CV_32F, 1.0 / 255.0);

    Mat labels = new Mat();
    TermCriteria criteria = new TermCriteria(TermCriteria.COUNT, 100, 1);
    Mat centers = new Mat();
    Core.kmeans(samples32f, k, labels, criteria, 1, Core.KMEANS_PP_CENTERS, centers);

    Scalar fillingColor = getFillingColor(userColor, cutout, labels, centers);

    return fillingColor;
}

From source file:com.joravasal.keyface.EigenFacesActivity.java

License:Open Source License

@Override
public void onCreate(Bundle savedInstanceState) {
    Log.i("eigenFacesActivity::", "OnCreate");
    super.onCreate(savedInstanceState);

    setContentView(R.layout.eigenfaces);
    setTitle("Eigenfaces");
    Mat aver = ((PCAfaceRecog) KeyFaceActivity.recogAlgorithm).getAverage();
    Mat faces = ((PCAfaceRecog) KeyFaceActivity.recogAlgorithm).getEigenFaces();

    int size = new Integer(KeyFaceActivity.prefs.getString("savedFaceSize", "200"));
    Mat aux = new Mat();

    aver = aver.reshape(1, size);
    //aver.convertTo(aux, );
    aver = toGrayscale(aver);/*from   www . j  ava2s. c o  m*/
    average = Bitmap.createBitmap(size, size, Bitmap.Config.ARGB_8888);
    Imgproc.cvtColor(aver, aux, Imgproc.COLOR_GRAY2RGBA, 4);
    Utils.matToBitmap(aux, average);
    LinearLayout layout = (LinearLayout) findViewById(id.eigenFacesHorizontalLayout);

    TextView avrgImgTV = new TextView(getApplicationContext());
    avrgImgTV.setText("Average image:");
    avrgImgTV.setPadding(5, 10, 10, 20);
    avrgImgTV.setGravity(Gravity.CENTER);

    TextView eigenfacesImgsTV = new TextView(getApplicationContext());
    eigenfacesImgsTV.setText("Eigenfaces:");
    eigenfacesImgsTV.setPadding(5, 10, 10, 20);
    eigenfacesImgsTV.setGravity(Gravity.CENTER);

    ImageView imgV = new ImageView(getApplicationContext());

    imgV.setClickable(false);
    imgV.setVisibility(0);
    imgV.setPadding(0, 10, 10, 20);
    imgV.setImageBitmap(average);

    layout.addView(avrgImgTV);
    layout.addView(imgV);
    layout.addView(eigenfacesImgsTV);

    LinkedList<ImageView> variables = new LinkedList<ImageView>();
    eigenfacesList = new LinkedList<Bitmap>();
    for (int i = 0; i < faces.rows(); i++) {
        variables.add(new ImageView(getApplicationContext()));
        eigenfacesList.add(Bitmap.createBitmap(size, size, Bitmap.Config.ARGB_8888));

        aux = new Mat();
        aux = faces.row(i).reshape(1, size);
        aux = toGrayscale(aux);
        Mat auxGreyC4 = new Mat();
        Imgproc.cvtColor(aux, auxGreyC4, Imgproc.COLOR_GRAY2RGBA, 4);
        Utils.matToBitmap(auxGreyC4, eigenfacesList.get(i));

        variables.get(i).setClickable(false);
        variables.get(i).setVisibility(0);
        variables.get(i).setPadding(0, 10, 10, 20);
        variables.get(i).setImageBitmap(eigenfacesList.get(i));
        layout.addView(variables.get(i));
    }

    Button save = (Button) findViewById(id.saveEigenfacesB);
    save.setOnClickListener(this);
}

From source file:com.joravasal.keyface.PCAfaceRecog.java

License:Open Source License

/**
 * Given a Mat object (data structure from OpenCV) with a face on it, 
 * it will try to find if the face is recognized from the data saved.
 * It applies a change in size to match the one needed.
 * /*from  w  ww . j ava  2  s  .c om*/
 * @return An integer that specifies which vector is recognized with the given Mat
 * */
public AlgorithmReturnValue recognizeFace(Mat face) {
    if (numImages < 2) {
        return new AlgorithmReturnValue();
    }
    Imgproc.resize(face, face, imageSize); //Size must be equal to the size of the saved faces 

    Mat analyze = new Mat(1, imgLength, CvType.CV_32FC1);
    Mat X = analyze.row(0);
    try {
        face.reshape(1, 1).convertTo(X, CvType.CV_32FC1);
    } catch (CvException e) {
        return new AlgorithmReturnValue();
    }
    Mat res = new Mat();
    Core.PCAProject(analyze, average, eigenfaces, res);
    return findClosest(res);
}

From source file:com.joravasal.keyface.PCAfaceRecog.java

License:Open Source License

/**
 * It has no input, it will add the last image (when numerically ordered)
 * to the array of images and calculate the new PCA subspace.
 * // w ww .  j a va 2  s.  c  om
 * PCA won't work properly if newimage is true.
 * 
 * @return A boolean that specifies if everything went fine.
 * */
public boolean updateData(boolean newimage) {
    if (newimage) { //There is some error with this code, if newimage is true.
        //Probably it is the matrix.create() function. Later when PCA is done, the projection will be wrong.
        //So this code is never used at the moment, and newimage should be used as false always.
        //It uses more instructions, but until a solution is found it must stay as it is.
        numImages++;
        try {
            File directory = new File(imagesDir);
            if (!directory.exists()) {
                throw new IOException("Path to file could not be opened.");
            }
            String lfile = imagesDir + "/Face" + (numImages - 1) + ".png";
            Mat img = Highgui.imread(lfile, 0);
            if (img.empty())
                throw new IOException("Opening image number " + (numImages - 1) + " failed.");
            //we adapt the old matrices to new sizes
            sum.create(numImages, imgLength, CvType.CV_32FC1);
            projectedTraining.create(numImages, numImages, CvType.CV_32FC1);

            //and add the new image to the array of images
            img.reshape(1, 1).convertTo(sum.row(numImages - 1), CvType.CV_32FC1);

        } catch (IOException e) {
            System.err.println(e.getMessage());
            return false;
        }
    } else {
        numImages = KeyFaceActivity.prefs.getInt("savedFaces", numImages);
        sum = new Mat(numImages, imgLength, CvType.CV_32FC1);
        projectedTraining = new Mat(numImages, numImages, CvType.CV_32FC1);

        for (int i = 0; i < numImages; i++) { //opens each image and appends it as a column in the matrix Sum
            String lfile = imagesDir + "/Face" + i + ".png";
            try {
                Mat img = Highgui.imread(lfile, 0);
                //Other way of loading image data
                //Mat img = Utils.bitmapToMat(BitmapFactory.decodeFile(lfile));
                if (img.empty())
                    throw new IOException("Opening image number " + i + " failed.");
                //We add the image to the correspondent row in the matrix of images (sum)
                img.reshape(1, 1).convertTo(sum.row(i), CvType.CV_32FC1);
            } catch (IOException e) {
                System.err.println(e.getMessage());
                return false;
            }
        }
    }

    if (numImages > 1) {
        average = new Mat();
        eigenfaces = new Mat();
        Core.PCACompute(sum, average, eigenfaces);
        for (int i = 0; i < numImages; i++) {
            Core.PCAProject(sum.row(i), average, eigenfaces, projectedTraining.row(i));
        }
    }

    return true;
}

From source file:nz.ac.auckland.lablet.vision.CamShiftTracker.java

License:Open Source License

/**
 * Finds the dominant colour in an image, and returns two values in HSV colour space to represent similar colours,
 * e.g. so you can keep all colours similar to the dominant colour.
 *
 * How the algorithm works://from  w w w. ja  v a 2  s .  co  m
 *
 * 1. Scale the frame down so that algorithm doesn't take too long.
 * 2. Segment the frame into different colours (number of colours determined by k)
 * 3. Find dominant cluster (largest area) and get its central colour point.
 * 4. Get range (min max) to represent similar colours.
 *
 * @param bgr The input frame, in BGR colour space.
 * @param k The number of segments to use (2 works well).
 * @return The min and max HSV colour values, which represent the colours similar to the dominant colour.
 */
private Pair<Scalar, Scalar> getMinMaxHsv(Mat bgr, int k) {
    //Convert to HSV
    Mat input = new Mat();
    Imgproc.cvtColor(bgr, input, Imgproc.COLOR_BGR2BGRA, 3);

    //Scale image
    Size bgrSize = bgr.size();
    Size newSize = new Size();

    if (bgrSize.width > CamShiftTracker.KMEANS_IMG_SIZE || bgrSize.height > CamShiftTracker.KMEANS_IMG_SIZE) {

        if (bgrSize.width > bgrSize.height) {
            newSize.width = CamShiftTracker.KMEANS_IMG_SIZE;
            newSize.height = CamShiftTracker.KMEANS_IMG_SIZE / bgrSize.width * bgrSize.height;
        } else {
            newSize.width = CamShiftTracker.KMEANS_IMG_SIZE / bgrSize.height * bgrSize.width;
            newSize.height = CamShiftTracker.KMEANS_IMG_SIZE;
        }

        Imgproc.resize(input, input, newSize);
    }

    //Image quantization using k-means, see here for details of k-means algorithm: http://bit.ly/1JIvrlB
    Mat clusterData = new Mat();

    Mat reshaped = input.reshape(1, input.rows() * input.cols());
    reshaped.convertTo(clusterData, CvType.CV_32F, 1.0 / 255.0);
    Mat labels = new Mat();
    Mat centres = new Mat();
    TermCriteria criteria = new TermCriteria(TermCriteria.COUNT, 50, 1);
    Core.kmeans(clusterData, k, labels, criteria, 1, Core.KMEANS_PP_CENTERS, centres);

    //Get num hits for each category
    int[] counts = new int[k];

    for (int i = 0; i < labels.rows(); i++) {
        int label = (int) labels.get(i, 0)[0];
        counts[label] += 1;
    }

    //Get cluster index with maximum number of members
    int maxCluster = 0;
    int index = -1;

    for (int i = 0; i < counts.length; i++) {
        int value = counts[i];

        if (value > maxCluster) {
            maxCluster = value;
            index = i;
        }
    }

    //Get cluster centre point hsv
    int r = (int) (centres.get(index, 2)[0] * 255.0);
    int g = (int) (centres.get(index, 1)[0] * 255.0);
    int b = (int) (centres.get(index, 0)[0] * 255.0);
    int sum = (r + g + b) / 3;

    //Get colour range
    Scalar min;
    Scalar max;

    int rg = Math.abs(r - g);
    int gb = Math.abs(g - b);
    int rb = Math.abs(r - b);
    int maxDiff = Math.max(Math.max(rg, gb), rb);

    if (maxDiff < 35 && sum > 120) { //white
        min = new Scalar(0, 0, 0);
        max = new Scalar(180, 40, 255);
    } else if (sum < 50 && maxDiff < 35) { //black
        min = new Scalar(0, 0, 0);
        max = new Scalar(180, 255, 40);
    } else {
        Mat bgrColour = new Mat(1, 1, CvType.CV_8UC3, new Scalar(r, g, b));
        Mat hsvColour = new Mat();

        Imgproc.cvtColor(bgrColour, hsvColour, Imgproc.COLOR_BGR2HSV, 3);
        double[] hsv = hsvColour.get(0, 0);

        int addition = 0;
        int minHue = (int) hsv[0] - colourRange;
        if (minHue < 0) {
            addition = Math.abs(minHue);
        }

        int maxHue = (int) hsv[0] + colourRange;

        min = new Scalar(Math.max(minHue, 0), 60, Math.max(35, hsv[2] - 30));
        max = new Scalar(Math.min(maxHue + addition, 180), 255, 255);
    }

    return new Pair<>(min, max);
}