Example usage for org.opencv.core Mat rows

List of usage examples for org.opencv.core Mat rows

Introduction

In this page you can find the example usage for org.opencv.core Mat rows.

Prototype

public int rows() 

Source Link

Usage

From source file:pfe.Segmentation.java

@Override
public void dilate_erose_ocr() {
    // pour le ocr
    Mat rgbImage = Imgcodecs.imread(ocrReadFrom);
    Mat destination2 = new Mat(rgbImage.rows(), rgbImage.cols(), rgbImage.type());
    // l objectif et de corriger les erreur de la transformation en noire et blan
    int dilation_size1 = 2;

    // la matrice de la dilatation on cherche a dilater en forme de rectange ( Imgproc.MORPH_RECT )   
    Mat element11 = Imgproc.getStructuringElement(Imgproc.MORPH_ELLIPSE,
            new Size(dilation_size1 + 1, dilation_size1 + 1));
    // on dilate l image
    Imgproc.dilate(rgbImage, destination2, element11);
    Imgcodecs.imwrite(ocrReadFrom, destination2);

}

From source file:pfe.Segmentation.java

/**
 * la dilatation et l erosion nous permette de reparer les forme
 * triangulaire afin de facilit leur detection ainsi elle permet de bien
 * remplir les texts pour qu il sois facilement detectable par les Ocrs
 *///ww w  .j av  a 2  s  . co  m
@Override
public void dilate_erose(JLabel jLabelErosion, JLabel jLabelDilated) {

    // lecture de l image a traiter  Pour la segmentation
    Mat source = Imgcodecs.imread(blackAndWhite, Imgcodecs.CV_LOAD_IMAGE_COLOR);
    // preparation de l image resultante
    Mat destination = new Mat(source.rows(), source.cols(), source.type());

    // taille de l erosion a 1 si petit juste pour corriger les contour
    int erosion_size = 1;
    // taille de dilatation inferieur a la moyenne pour evit la destruction des details et des formes 
    // l objectif et de corriger les erreur de la transformation en noire et blan
    int dilation_size = 3;

    destination = source;

    // la matrice de la dilatation on cherche a dilater en forme de rectange ( Imgproc.MORPH_RECT )   
    Mat element1 = Imgproc.getStructuringElement(Imgproc.MORPH_RECT,
            new Size(dilation_size + 1, dilation_size + 1));
    // on dilate l image
    Imgproc.dilate(source, destination, element1);
    // on sauvegarde    
    Imgcodecs.imwrite(dilation, destination);

    //on va maintenant eroser l image dilat ( repar la finition )
    source = Imgcodecs.imread(dilation, Imgcodecs.CV_LOAD_IMAGE_COLOR);

    destination = source;

    // on repare en ellipse pour repar les coins de l ecriture    
    Mat element = Imgproc.getStructuringElement(Imgproc.MORPH_ELLIPSE,
            new Size(erosion_size + 1, erosion_size + 1));
    Imgproc.erode(source, destination, element);
    Imgcodecs.imwrite(erosion, destination);

    jLabelErosion.setIcon(new ImageIcon(erosion));
    jLabelDilated.setIcon(new ImageIcon(dilation));
}

From source file:pfe.Segmentation.java

@Override
public void toB_A_W(JLabel jLabel) {
    Mat rgbImage = Imgcodecs.imread(original);
    Mat destination = new Mat(rgbImage.rows(), rgbImage.cols(), rgbImage.type());
    // l objectif et de corriger les erreur de la transformation en noire et blan
    int dilation_size = 2;

    // la matrice de la dilatation on cherche a dilater en forme de rectange ( Imgproc.MORPH_RECT ) puis a eroser   
    Mat element1 = Imgproc.getStructuringElement(Imgproc.MORPH_ERODE,
            new Size(dilation_size + 1, dilation_size + 1));
    // on dilate l image puis elle erose
    Imgproc.dilate(rgbImage, destination, element1);

    // on prepare une nouvelle matrice pour l image noire et blan
    Mat labImage = new Mat();
    // ici on va convertir les couleur vers bgr2gray ( noire et blan ) et mettre le resultat dans labimage
    cvtColor(destination, labImage, Imgproc.COLOR_BGR2GRAY);
    Imgcodecs.imwrite(ocrReadFrom, labImage);
    jLabel.setIcon(new ImageIcon(ocrReadFrom));
    // JOptionPane.showConfirmDialog(null, "");
}

From source file:pipeline.TextRegion.java

public static String SplitFiles(File fileIn) {
    String result = "";
    try {//from  ww  w.j  av  a2s .c o m
        String nomeFile = fileIn.getName();
        //System.out.println("il nome del file  "+nomeFile);
        FileInputStream in = new FileInputStream("src/pipeline/receivedImg/" + nomeFile);
        JPEGImageDecoder decoder = JPEGCodec.createJPEGDecoder(in);
        BufferedImage image = decoder.decodeAsBufferedImage();
        in.close();

        TextRecognition myget = new TextRecognition(image);
        LinkedList boxes = myget.getTextBoxes();

        String nomeFileOut = "src/pipeline/outputImg/" + Global.getJPGNameFile() + " out.jpg";
        FileOutputStream out = new FileOutputStream(nomeFileOut);
        JPEGImageEncoder encoder = JPEGCodec.createJPEGEncoder(out);
        encoder.encode(myget.isolateText(boxes));
        out.close();

        //parte con opencv

        System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
        File f = new File("src/pipeline/receivedImg/" + nomeFile);
        BufferedImage imageFile = ImageIO.read(f);

        byte[] data = ((DataBufferByte) imageFile.getRaster().getDataBuffer()).getData();
        Mat mat = new Mat(imageFile.getHeight(), imageFile.getWidth(), CvType.CV_8UC3);
        mat.put(0, 0, data);
        int tolleranza = 15;

        for (int i = 0; i < boxes.size(); i++) {
            TextRegion app = (TextRegion) boxes.get(i);
            //             System.out.println("RIGA: "+i+"  -> "+app.x1 +" "+app.x2 +" "+app.y1 +" "+app.y2 +" ");
            Rect roi1 = new Rect(app.x1 - tolleranza, app.y1 - tolleranza, app.x2 - app.x1 + tolleranza,
                    app.y2 - app.y1 + 2 * tolleranza);
            Mat mat1 = new Mat(mat, roi1);

            byte[] data1 = new byte[mat1.rows() * mat1.cols() * (int) (mat1.elemSize())];
            mat1.get(0, 0, data1);
            BufferedImage image1 = new BufferedImage(mat1.cols(), mat1.rows(), BufferedImage.TYPE_3BYTE_BGR);
            image1.getRaster().setDataElements(0, 0, mat1.cols(), mat1.rows(), data1);

            String nomeFileUscrita = "src/pipeline/outputImg/" + i + Global.getJPGNameFile() + " uscita.jpg";
            File tmp = new File(nomeFileUscrita);
            File output = new File(nomeFileUscrita);
            ImageIO.write(image1, "jpg", output);
            result += (i + 1) + ")" + OCR_Processing.performOCR_String2Text(output);
            tmp.delete();

        }
        f.delete();
        File foo = new File(nomeFileOut);
        foo.delete();

    } catch (Exception e) {
        System.out.println("Exception: " + e);
    }

    return result;

}

From source file:processdata.ExperimentalDataProcessingUI.java

public static BufferedImage Mat2BufferedImage(Mat m) {
    // source: http://answers.opencv.org/question/10344/opencv-java-load-image-to-gui/
    // Fastest code
    // The output can be assigned either to a BufferedImage or to an Image

    int type = BufferedImage.TYPE_BYTE_GRAY;
    if (m.channels() > 1) {
        type = BufferedImage.TYPE_3BYTE_BGR;
    }/*  w ww .  j ava 2 s.c o m*/
    int bufferSize = m.channels() * m.cols() * m.rows();
    byte[] b = new byte[bufferSize];
    m.get(0, 0, b); // get all the pixels
    BufferedImage image = new BufferedImage(m.cols(), m.rows(), type);
    final byte[] targetPixels = ((DataBufferByte) image.getRaster().getDataBuffer()).getData();
    System.arraycopy(b, 0, targetPixels, 0, b.length);
    return image;

}

From source file:processdata.ExperimentalDataProcessingUI.java

private void jButtonProcessImageActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jButtonProcessImageActionPerformed
    try {//from  www  .j  av a  2s  .c  om
        // TODO add your handling code here:
        //load library
        System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
        // TODO add your handling code here:
        folderName = textField2.getText();
        int currentFrameIndex = Integer.parseInt(initialFrameIndexBox.getText()) - 1;
        datasetIndex = Integer.parseInt(textField1.getText());
        String videoImageFileName = "./videoFrames//" + folderName + "//" + "frame_outVideo_"
                + currentFrameIndex + ".jpg";

        String depthFrameFileName = initialImagePath + datasetIndex + "//" + folderName + "//" + "depthData//"
                + "outDepthByte_" + currentFrameIndex;

        rgbFrame = Highgui.imread(videoImageFileName, Highgui.CV_LOAD_IMAGE_GRAYSCALE);

        depthFrame = depthDataProcessingUtilities.processDepthDataFile(depthFrameFileName, jSlider2.getValue(),
                jSlider1.getValue());

        Mat[] backgroundFrames = readBackground();
        rgbBackgroundFrame = backgroundFrames[0];
        depthBackgroundFrame = backgroundFrames[1];

        //subtract depth background
        Mat depthFrameBackgroundSubtracted = new Mat();
        Core.subtract(depthBackgroundFrame, depthFrame, depthFrameBackgroundSubtracted);
        Imgproc.threshold(depthFrameBackgroundSubtracted, depthFrameBackgroundSubtracted, 0, 255,
                Imgproc.THRESH_BINARY);
        displayImage(Mat2BufferedImage(
                videoProcessingUtilities.resizeImage(depthFrameBackgroundSubtracted, new Size(448, 234))),
                depthBckgSubtractedFrames);

        //remove the red-colored elements from depth image and leave only blue ones
        Mat depthImageCleaned = new Mat();
        Core.inRange(depthFrameBackgroundSubtracted, new Scalar(253, 0, 0), new Scalar(255, 0, 0),
                depthImageCleaned);

        //apply morphologic opening to remove noise
        Imgproc.morphologyEx(depthImageCleaned, depthImageCleaned, 2,
                Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(3, 3)));
        displayImage(
                Mat2BufferedImage(videoProcessingUtilities.resizeImage(depthImageCleaned, new Size(448, 234))),
                depthCleanedFramesPanel);

        //apply the homographic transform to cleaned depth image
        Mat hDepthImageCleaned = videoProcessingUtilities.performHomographyTransformation(depthImageCleaned,
                new Size(1920, 1080));

        //extract all contours
        //sort all extracted contours and choose top 2
        //overlay top 2 contours on the image and fill them with white color
        //mask the rgb frame
        // do all necessary rotation operations
        //offer user to save the image

        //extract all suitable contours between MIN and MAX areas:
        MatOfPoint[] contours = videoProcessingUtilities.extractLargestContours(hDepthImageCleaned, 100000,
                160000);
        System.out.println("Number of contorus extracted " + contours.length);

        //draw contours
        List<MatOfPoint> tempContours = new ArrayList<MatOfPoint>();
        Mat hDepthImageCleanedContours = hDepthImageCleaned.clone();
        for (MatOfPoint cnt : contours) {
            System.out.println("Extracted Contour Area is " + Imgproc.contourArea(cnt));
            tempContours.add(cnt);
        }
        Imgproc.cvtColor(hDepthImageCleanedContours, hDepthImageCleanedContours, Imgproc.COLOR_GRAY2BGR);
        Imgproc.drawContours(hDepthImageCleanedContours, tempContours, -1, new Scalar(0, 0, 255), 5);
        displayImage(
                Mat2BufferedImage(
                        videoProcessingUtilities.resizeImage(hDepthImageCleanedContours, new Size(448, 234))),
                extractedContoursPanel);

        //prepare final mask
        Mat hDepthImageFilledContours = new Mat(hDepthImageCleaned.rows(), hDepthImageCleaned.cols(),
                hDepthImageCleaned.type());
        Imgproc.drawContours(hDepthImageFilledContours, tempContours, -1, new Scalar(255, 255, 255), -1);
        displayImage(
                Mat2BufferedImage(
                        videoProcessingUtilities.resizeImage(hDepthImageFilledContours, new Size(448, 234))),
                maskedContoursPanel);

        //subtract video background
        //            Mat rgbFrameBackgroundSubtracted = new Mat();
        //            Core.subtract(rgbBackgroundFrame,rgbFrame, rgbFrameBackgroundSubtracted, hDepthImageCleaned);
        //            displayImage(Mat2BufferedImage(videoProcessingUtilities.resizeImage(rgbFrameBackgroundSubtracted, new Size(448,234))),videoBckgSubtractedFrames);
        //            
        //mask
        Mat preMaskedRGBFrame = new Mat();
        rgbFrame.copyTo(preMaskedRGBFrame, hDepthImageCleaned);
        displayImage(
                Mat2BufferedImage(videoProcessingUtilities.resizeImage(preMaskedRGBFrame, new Size(448, 234))),
                videoBckgSubtractedFrames);

        //postmask
        Mat betterMaskedRGBFrame = new Mat();
        rgbFrame.copyTo(betterMaskedRGBFrame, hDepthImageFilledContours);
        displayImage(
                Mat2BufferedImage(
                        videoProcessingUtilities.resizeImage(betterMaskedRGBFrame, new Size(448, 234))),
                videoMaskedPanel);

        //clear ArrayList containig all processed images
        finalImages.clear();
        javax.swing.JLabel[] jLabArray = { extractedShoePanel1, extractedShoePanel2 };
        //segment all images
        int panelIdx = 0;
        for (MatOfPoint contour : tempContours) {
            MatOfPoint2f newMatOfPoint2fContour = new MatOfPoint2f(contour.toArray());
            RotatedRect finalROI = Imgproc.minAreaRect(newMatOfPoint2fContour);
            Mat newMask = videoProcessingUtilities.getContourMasked(hDepthImageFilledContours.clone(), contour);
            Mat imageROIRegistred = new Mat();
            betterMaskedRGBFrame.copyTo(imageROIRegistred, newMask);
            Mat maskedRGBFrameROI = videoProcessingUtilities.rotateExtractedShoeprint(imageROIRegistred,
                    finalROI, new Size(500, 750), 2);
            finalImages.add(maskedRGBFrameROI);
            displayImage(
                    Mat2BufferedImage(
                            videoProcessingUtilities.resizeImage(maskedRGBFrameROI, new Size(203, 250))),
                    jLabArray[panelIdx]);
            panelIdx++;
        }

        //MatOfInt parameters = new MatOfInt();
        //parameters.fromArray(Highgui.CV_IMWRITE_JPEG_QUALITY, 100);
        //Highgui.imwrite(".//backgrounds//"+"test.jpg", depthFrameBackgroundSubtracted, parameters);

    } catch (FileNotFoundException ex) {
        Logger.getLogger(ExperimentalDataProcessingUI.class.getName()).log(Level.SEVERE, null, ex);
    }
}

From source file:qupath.opencv.classify.OpenCvClassifier.java

License:Open Source License

protected void createAndTrainClassifier() {

    // Create the required Mats
    int nMeasurements = measurements.size();

    Mat matTraining = new Mat(arrayTraining.length / nMeasurements, nMeasurements, CvType.CV_32FC1);
    matTraining.put(0, 0, arrayTraining);
    Mat matResponses = new Mat(arrayResponses.length, 1, CvType.CV_32SC1);
    matResponses.put(0, 0, arrayResponses);

    //      // Clear any existing classifier
    //      if (classifier != null)
    //         classifier.clear();

    logger.info("Training size: " + matTraining.size());
    logger.info("Responses size: " + matResponses.size());

    // Create & train the classifier
    try {//from  w  ww.  j a v  a 2 s  .com
        classifier = createClassifier();
        classifier.train(matTraining, Ml.ROW_SAMPLE, matResponses);
    } catch (CvException e) {
        // For reasons I haven't yet discerned, sometimes OpenCV throws an exception with the following message:
        // OpenCV Error: Assertion failed ((int)_sleft.size() < n && (int)_sright.size() < n) in calcDir, file /tmp/opencv320150620-1681-1u5iwhh/opencv-3.0.0/modules/ml/src/tree.cpp, line 1190
        // With one sample fewer, it can often recover... so attempt that, rather than failing miserably...
        //         logger.error("Classifier training error", e);
        logger.info("Will attempt retraining classifier with one sample fewer...");
        matTraining = matTraining.rowRange(0, matTraining.rows() - 1);
        matResponses = matResponses.rowRange(0, matResponses.rows() - 1);
        classifier = createClassifier();
        classifier.train(matTraining, Ml.ROW_SAMPLE, matResponses);
    }

    matTraining.release();
    matResponses.release();

    logger.info("Classifier trained with " + arrayResponses.length + " samples");
}

From source file:Recognizer.Recognizer.java

public Image TemplateMatching(Image imQuery, Image imDB, int match_method) {
    System.out.println("Running Template Matching ...");

    //Mat img = Highgui.imread(inFile); // Image in which area has to be searched
    //Mat template_img = Highgui.imread(templateFile); // Search Image

    Mat matQuery = imQuery.Image3CtoMat_CV();
    Mat matDB = imDB.Image3CtoMat_CV();

    Mat hsvQ = new Mat(), hsvDB = new Mat();

    Imgproc.cvtColor(matQuery, hsvQ, COLOR_RGB2HSV);
    Imgproc.cvtColor(matDB, hsvDB, COLOR_RGB2HSV);

    // Create result image matrix
    int resultImg_cols = matDB.cols() - matQuery.cols() + 1;
    int resultImg_rows = matDB.rows() - matQuery.rows() + 1;

    Mat matRes = new Mat(resultImg_rows, resultImg_cols, CvType.CV_32FC1);

    // Template Matching with Normalization
    Imgproc.matchTemplate(hsvDB, hsvQ, matRes, match_method);
    Core.normalize(matRes, matRes, 0, 1, Core.NORM_MINMAX, -1, new Mat());

    // / Localizing the best match with minMaxLoc
    Core.MinMaxLocResult Location_Result = Core.minMaxLoc(matRes);
    Point matchLocation;//from  ww w  .  j  a va2 s  . c  o  m

    if (match_method == Imgproc.TM_SQDIFF || match_method == Imgproc.TM_SQDIFF_NORMED) {
        matchLocation = Location_Result.minLoc;
    } else {
        matchLocation = Location_Result.maxLoc;
    }

    // Display Area by Rectangle
    Core.rectangle(matDB, matchLocation,
            new Point(matchLocation.x + matQuery.cols(), matchLocation.y + matQuery.rows()),
            new Scalar(0, 255, 0));

    Image imOut = new Image(matDB.width(), matDB.height());
    //Image imOut = new Image(matQuery.cols(), matQuery.rows());

    //Mat m = new Mat(matDB);

    //m =//matDB.submat((int)matchLocation.y, (int)matchLocation.y + matQuery.rows(),(int)matchLocation.x, (int)matchLocation.x + matQuery.cols());

    imOut.Mat_CVtoImage3C(matDB);

    System.out.println("Location: " + Location_Result.minLoc.x + " " + Location_Result.minLoc.y + "   "
            + Location_Result.maxLoc.x + " " + Location_Result.maxLoc.y);

    return imOut;
}

From source file:Recognizer.Recognizer.java

public void SIFT(Image imQ, Image imDB) {
    Mat Q = imQ.Image1CtoMat_CV();
    Mat DB = imDB.Image1CtoMat_CV();

    Mat matQ = new Mat();
    Mat matDB = new Mat();

    Q.convertTo(matQ, CvType.CV_8U);//from  ww  w. j a va 2s. c om
    DB.convertTo(matDB, CvType.CV_8U);

    FeatureDetector siftDet = FeatureDetector.create(FeatureDetector.SIFT);
    DescriptorExtractor siftExt = DescriptorExtractor.create(DescriptorExtractor.SIFT);

    MatOfKeyPoint kpQ = new MatOfKeyPoint();
    MatOfKeyPoint kpDB = new MatOfKeyPoint();

    siftDet.detect(matQ, kpQ);
    siftDet.detect(matDB, kpDB);

    Mat matDescriptorQ = new Mat(matQ.rows(), matQ.cols(), matQ.type());
    Mat matDescriptorDB = new Mat(matDB.rows(), matDB.cols(), matDB.type());

    siftExt.compute(matQ, kpQ, matDescriptorQ);
    siftExt.compute(matDB, kpDB, matDescriptorDB);

    MatOfDMatch matchs = new MatOfDMatch();

    DescriptorMatcher matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE);

    matcher.match(matDescriptorQ, matDescriptorDB, matchs);

    int N = 10;

    DMatch[] tmp01 = matchs.toArray();
    DMatch[] tmp02 = new DMatch[N];

    for (int i = 0; i < tmp02.length; i++) {
        tmp02[i] = tmp01[i];
    }

    matchs.fromArray(tmp02);

    Mat matchedImage = new Mat(matQ.rows(), matQ.cols() * 2, matQ.type());
    Features2d.drawMatches(matQ, kpQ, matDB, kpDB, matchs, matchedImage);

    Highgui.imwrite("./descriptedImageBySIFT.jpg", matchedImage);

}

From source file:Reconhecimento.Regua.java

public static void segmentarRegua() {

    long tempoInicio = System.currentTimeMillis();

    // coordenadas do retangulo de selecao 
    int x0 = TelaSegmentarRegua.localizarReguaPanel1.x0;
    int y0 = TelaSegmentarRegua.localizarReguaPanel1.y0;
    int x = TelaSegmentarRegua.localizarReguaPanel1.xf;
    int y = TelaSegmentarRegua.localizarReguaPanel1.yf;

    if (x0 > x) {
        int aux = x0;
        x0 = x;// w ww. ja v a 2 s  .  c  o m
        x = aux;
    }

    if (y0 > y) {
        int aux = y0;
        y0 = y;
        y = aux;
    }

    Mat bigImage = Highgui.imread(TelaSegmentarRegua.localizarReguaPanel1.imagem);
    // cortar imagem de acordo com a selecao
    Mat img = new Mat(bigImage, new Rect(x0, y0, x - x0, y - y0));

    Mat grayImg = new Mat();
    // passar imagem para tons de cinza
    Imgproc.cvtColor(img, grayImg, Imgproc.COLOR_BGR2GRAY);
    // limiarizacao 
    Imgproc.threshold(grayImg, grayImg, 190, 255, THRESH_BINARY_INV);
    Core.bitwise_not(grayImg, grayImg);

    List<Point> pontos = new ArrayList<Point>();

    // adicionar todos os pontos da referentes a regua em um vetor
    for (int i = 0; i < grayImg.rows(); i++) {
        for (int j = 0; j < grayImg.cols(); j++) {
            if (Arrays.toString(grayImg.get(i, j)).equals("[255.0]")) {
                pontos.add(new Point(j, i));
                Core.line(img, new Point(j, i), new Point(j, i), new Scalar(255, 0, 0));
            }
        }
    }

    String filename = "imagens/regua_segmentada" + Math.random() * 1000 + ".jpg";

    Mat img2 = new Mat();
    Imgproc.resize(img, img2, new Size(img.size().width * 3.0, img.size().height * 3.0));
    Highgui.imwrite(filename, img2);

    int xMin = 5000, yMin = 5000;
    int xMax = 0, yMax = 0;

    // pontos extremos da regua
    for (Point ponto : pontos) {
        if (ponto.x > xMax) {
            xMax = (int) ponto.x;
        }
        if (ponto.x < xMin) {
            xMin = (int) ponto.x;
        }
        if (ponto.y > yMax) {
            yMax = (int) ponto.y;
        }
        if (ponto.y < yMin) {
            yMin = (int) ponto.y;
        }
    }

    // regua na posicao horizontal
    if (xMax - xMin > yMax - yMin) {
        /*
        a proporcao da imagem utilizada no processamento torna necessario
        a multiplicacao por 2 para manter a proporcao das medidas 
        */
        larguraPixels = (xMax - xMin) * 2;
    }
    // regua na posicao vertical
    else {
        larguraPixels = (yMax - yMin) * 2;
    }

    long tempoFim = System.currentTimeMillis() - tempoInicio;

    centimetrosPorPixel = 30.0 / larguraPixels;

    TelaSegmentarRegua2 telaResposta = new TelaSegmentarRegua2();
    telaResposta.jLabel1.setIcon(new ImageIcon(filename));
    telaResposta.jLabel4.setText(larguraPixels + " pixels");
    telaResposta.jLabel5.setText(String.valueOf(centimetrosPorPixel).substring(0, 5));
    telaResposta.jLabel7.setText(tempoFim + " ms");
    telaResposta.setDefaultCloseOperation(WindowConstants.DISPOSE_ON_CLOSE);
    telaResposta.setLocation(200, 200);
    telaResposta.setVisible(true);

}