Example usage for org.apache.commons.csv CSVParser CSVParser

List of usage examples for org.apache.commons.csv CSVParser CSVParser

Introduction

In this page you can find the example usage for org.apache.commons.csv CSVParser CSVParser.

Prototype

public CSVParser(final Reader reader, final CSVFormat format) throws IOException 

Source Link

Document

Customized CSV parser using the given CSVFormat

If you do not read all records from the given reader , you should call #close() on the parser, unless you close the reader .

Usage

From source file:com.compomics.cell_coord.parser.impl.CSVFileParser.java

@Override
public Sample parseTrackFile(File trackFile) throws FileParserException {
    // create a new sample object -- watch out to set the relationships!
    Sample sample = new Sample(trackFile.getName());
    // initialize an empty list of tracks
    List<Track> list = new ArrayList<>();
    CSVParser csvFileParser;//from   w ww  .j a v a  2  s  .co m
    FileReader fileReader;
    CSVFormat csvFileFormat = CSVFormat.DEFAULT.withHeader(FILE_HEADER_MAPPING);
    try {
        // initialize the file reader
        fileReader = new FileReader(trackFile);
        //initialize CSVParser object
        csvFileParser = new CSVParser(fileReader, csvFileFormat);
        // get the csv records
        List<CSVRecord> csvRecords = csvFileParser.getRecords();
        Track currentTrack = null;
        List<TrackSpot> currentTrackPointList = new ArrayList<>();
        Long currentId = 0L;
        // read the CSV file records starting from the second record to skip the header
        for (int i = 1; i < csvRecords.size(); i++) {
            CSVRecord cSVRecord = csvRecords.get(i);
            // get the fields
            Long trackid = Long.parseLong(cSVRecord.get(TRACK_ID));
            if (!Objects.equals(currentId, trackid)) {
                currentTrack = new Track();
                currentTrack.setTrackid(trackid);
                list.add(currentTrack);
                currentId = trackid;
                currentTrackPointList = new ArrayList<>();
            }
            // create new Track Spot object
            Long spotid = Long.parseLong(cSVRecord.get(SPOT_ID));
            double x = Double.parseDouble(cSVRecord.get(X_COORD));
            double y = Double.parseDouble(cSVRecord.get(Y_COORD));
            double time = Double.parseDouble(cSVRecord.get(TIME));
            TrackSpot trackSpot = new TrackSpot(spotid, x, y, time, currentTrack);
            currentTrackPointList.add(trackSpot);
            currentTrack.setTrackSpots(currentTrackPointList);
            currentTrack.setSample(sample);
        }
    } catch (IOException ex) {
        LOG.error(ex.getMessage(), ex);
    } catch (NumberFormatException ex) {
        LOG.error(ex.getMessage(), ex);
        throw new FileParserException(
                "It seems like a line does not contain a number!\nPlease check your files!");
    }
    // set the tracks for the sample
    sample.setTracks(list);
    return sample;
}

From source file:br.edimarmanica.trinity.intrasitemapping.auto.MappingController.java

private void reading() {
    /**//from w ww . j  a  v a  2 s .  co  m
     * Lendos os Run02.NR_SHARED_PAGES primeiros elementos de cada offset
     */
    File dir = new File(Paths.PATH_TRINITY + site.getPath() + "/offset");

    for (int nrOffset = 0; nrOffset < dir.listFiles(new FilenameFilter() {
        @Override
        public boolean accept(File dir, String name) {
            return name.endsWith(".csv");
        }
    }).length; nrOffset++) {
        List<List<String>> offset = new ArrayList<>(); //cada arquivo  um offset

        try (Reader in = new FileReader(dir.getAbsoluteFile() + "/result_" + nrOffset + ".csv")) {
            try (CSVParser parser = new CSVParser(in, CSVFormat.EXCEL)) {
                int nrRegistro = 0;
                for (CSVRecord record : parser) {
                    if (nrRegistro >= Extract.NR_SHARED_PAGES) {
                        break;
                    }

                    for (int nrRegra = 0; nrRegra < record.size(); nrRegra++) {
                        if (nrRegistro == 0) {
                            List<String> regra = new ArrayList<>();
                            try {
                                regra.add(Preprocessing.filter(record.get(nrRegra)));
                            } catch (InvalidValue ex) {
                                regra.add("");
                            }
                            offset.add(regra);
                        } else {
                            try {
                                offset.get(nrRegra).add(Preprocessing.filter(record.get(nrRegra)));
                            } catch (InvalidValue ex) {
                                offset.get(nrRegra).add("");
                            }
                        }
                    }
                    nrRegistro++;
                }
            }
            offsets.add(offset);
        } catch (FileNotFoundException ex) {
            Logger.getLogger(MappingController.class.getName()).log(Level.SEVERE, null, ex);
        } catch (IOException ex) {
            Logger.getLogger(MappingController.class.getName()).log(Level.SEVERE, null, ex);
        }
    }

    /**
     * Mostrando a leitura
     */
    /*for (int i = 1; i < offsets.size(); i++) {
    for (int j = 0; j < 5; j++) {
        System.out.print(offsets.get(i).get(0).get(j) + " - ");
    }
    System.out.println("");
    }*/
}

From source file:de.tudarmstadt.ukp.dkpro.argumentation.sequence.report.TokenLevelBatchCrossValidationReport.java

protected void reportOnlyMacroFM() throws IOException {
    File aggregatedCSVFile = new File(
            getContext().getStorageLocation(Constants.TEST_TASK_OUTPUT_KEY, StorageService.AccessMode.READONLY),
            TokenLevelEvaluationReport.TOKEN_LEVEL_PREDICTIONS_CSV);

    // load the CSV
    CSVParser csvParser = new CSVParser(new FileReader(aggregatedCSVFile),
            CSVFormat.DEFAULT.withCommentMarker('#'));

    // compute confusion matrix
    ConfusionMatrix cm = new ConfusionMatrix();

    // and add the all rows
    for (CSVRecord csvRecord : csvParser) {
        // first item is the gold label
        String gold = csvRecord.get(0);
        // second item is the predicted label
        String predicted = csvRecord.get(1);

        cm.increaseValue(gold, predicted);
    }/*from www  . ja  v  a2 s . com*/

    File evaluationFile = new File(getContext().getStorageLocation(Constants.TEST_TASK_OUTPUT_KEY,
            StorageService.AccessMode.READWRITE), RESULT_SUMMARY);

    ReportTools.printFMeasuresToFile(cm, evaluationFile);
}

From source file:com.linkedin.pinot.core.data.readers.CSVRecordReader.java

@Override
public void init() throws Exception {
    final Reader reader = new FileReader(_fileName);
    _parser = new CSVParser(reader, getFormat());

    _iterator = _parser.iterator();/* www . j a v  a  2 s.c  om*/
}

From source file:javalibs.CSVExtractor.java

/**
 * Read a CSV file and return a list of records representing each row in the CSV
 * NOTE: This does not handle anything but plain CSV files with default formatting
 * @param csvPath The path to the CSV file
 * @return The list of CSVRecord objects
 *///from ww w .  ja v  a  2  s.  c o m
public static List<CSVRecord> getCSVRecords(String csvPath) {
    CSVParser parser = null;
    List<CSVRecord> records = null;
    try {
        parser = new CSVParser(Files.newBufferedReader(Paths.get(csvPath)),
                CSVFormat.DEFAULT.withHeader().withIgnoreHeaderCase().withTrim());
        records = parser.getRecords();
    } catch (IOException e) {
        TSL.get().exception(e);
    }
    return records;
}

From source file:com.cloudera.science.ml.parallel.normalize.StringSplitFn.java

@Override
public void process(String line, Emitter<Record> emitter) {
    if (line == null || line.isEmpty()) {
        return;/*from   ww  w.ja  va2s  . c  o  m*/
    }
    try {
        String[] pieces = new CSVParser(new StringReader(line), csvStrategy).getLine();
        if (pieces != null) {
            emitter.emit(new CSVRecord(pieces));
        }
    } catch (IOException e) {
        throw new CrunchRuntimeException(e);
    }
}

From source file:loternik.MyService.java

public ArrayList<CSVRecord> getRecords(String teryt, String okreg) throws FileNotFoundException, IOException {
    FileReader file = new FileReader("kandydaci.csv");
    CSVParser parser = new CSVParser(file, CSVFormat.EXCEL.withHeader().withDelimiter(';'));
    ArrayList<CSVRecord> output = new ArrayList<CSVRecord>();
    for (CSVRecord csvRecord : parser) {
        if (teryt.equals(csvRecord.get("teryt")) && okreg.equals(csvRecord.get("Okreg"))) {
            output.add(csvRecord);//from  www  .j a va  2  s. co  m
        }
    }
    return output;
}

From source file:mtsar.resources.AnswerResource.java

@POST
@Consumes(MediaType.MULTIPART_FORM_DATA)
public Response postAnswersCSV(@Context UriInfo uriInfo, @FormDataParam("file") InputStream stream)
        throws IOException {
    try (final Reader reader = new InputStreamReader(stream, StandardCharsets.UTF_8)) {
        try (final CSVParser csv = new CSVParser(reader, AnswerCSV.FORMAT)) {
            answerDAO.insert(AnswerCSV.parse(stage, csv));
        }/* www.j av  a  2  s . com*/
    }
    answerDAO.resetSequence();
    return Response.seeOther(getAnswersURI(uriInfo)).build();
}

From source file:de.tudarmstadt.ukp.experiments.argumentation.sequence.evaluation.GenerateCrossDomainCVReport.java

/**
 * Merges id2outcome files from sub-folders with cross-domain and creates a new folder
 * with overall results/*from  ww  w. j  ava 2s  . c o m*/
 *
 * @param folder folder
 * @throws java.io.IOException
 */
public static void aggregateDomainResults(File folder, String subDirPrefix, final String taskFolderSubText,
        String outputFolderName) throws IOException {
    // list all sub-folders
    File[] folders = folder.listFiles(new FileFilter() {
        @Override
        public boolean accept(File pathname) {
            return pathname.isDirectory() && pathname.getName().contains(taskFolderSubText);
        }
    });

    if (folders.length == 0) {
        throw new IllegalArgumentException("No sub-folders 'SVMHMMTestTask*' found in " + folder);
    }

    // write to a file
    File outFolder = new File(folder, outputFolderName);
    File output = new File(outFolder, subDirPrefix);
    output.mkdirs();

    File outCsv = new File(output, TOKEN_LEVEL_PREDICTIONS_CSV);

    CSVPrinter csvPrinter = new CSVPrinter(new FileWriter(outCsv), SVMHMMUtils.CSV_FORMAT);
    csvPrinter.printComment(SVMHMMUtils.CSV_COMMENT);

    ConfusionMatrix cm = new ConfusionMatrix();

    for (File domain : folders) {
        File tokenLevelPredictionsCsv = new File(domain, subDirPrefix + "/" + TOKEN_LEVEL_PREDICTIONS_CSV);

        if (!tokenLevelPredictionsCsv.exists()) {
            throw new IllegalArgumentException(
                    "Cannot locate tokenLevelPredictions.csv: " + tokenLevelPredictionsCsv);
        }

        CSVParser csvParser = new CSVParser(new FileReader(tokenLevelPredictionsCsv),
                CSVFormat.DEFAULT.withCommentMarker('#'));

        for (CSVRecord csvRecord : csvParser) {
            // copy record
            csvPrinter.printRecord(csvRecord);

            // update confusion matrix
            cm.increaseValue(csvRecord.get(0), csvRecord.get(1));
        }
    }

    // write to file
    FileUtils.writeStringToFile(new File(outFolder, "confusionMatrix.txt"), cm.toString() + "\n"
            + cm.printNiceResults() + "\n" + cm.printLabelPrecRecFm() + "\n" + cm.printClassDistributionGold());

    // write csv
    IOUtils.closeQuietly(csvPrinter);
}

From source file:GUI.ReadFile.java

public boolean readTrace(String fileName) {
    FileReader fileReader;/*from  w  w  w .j a  v a  2s .co m*/
    CSVParser csvFileParser;
    boolean isSuccess = true;
    CSVFormat csvFileFormat = CSVFormat.DEFAULT.withHeader(TRACE_HEADER_MAPPING);

    try {
        ArrayList<String> Activity_set = new ArrayList<String>();
        HashSet<String> ID_set = new HashSet<String>();
        traces = new ArrayList<Trace>();
        //initialize FileReader object
        System.out.println(fileName);
        fileReader = new FileReader(fileName);

        //initialize CSVParser object
        csvFileParser = new CSVParser(fileReader, csvFileFormat);
        //Get a list of CSV file records
        List<CSVRecord> csvRecords = csvFileParser.getRecords();
        Trace t = new Trace("");
        //Read the CSV file records starting from the second record to skip the header
        for (int i = 1; i < csvRecords.size(); i++) {
            CSVRecord record = csvRecords.get(i);
            String ID = record.get(CaseID);
            if (!ID_set.contains(ID) || (i == csvRecords.size() - 1)) {
                //Discard void trace
                if (i != 1) {
                    traces.add(t);
                }
                ID_set.add(ID);
                t = new Trace(ID);
            }
            Activity ac = new Activity(record.get(Activity), record.get(StartTime), record.get(CompleteTime),
                    record.get(Timestamp));
            t.add_activity(ac);

            if (!Activity_set.contains(ac.get_name())) {
                Activity_set.add(ac.get_name());
            }
        }
        //sort activity set by string
        Collections.sort(Activity_set);

        //sort trace by ID
        Collections.sort(traces, new Comparator<Trace>() {
            @Override
            public int compare(Trace t1, Trace t2) {
                return Integer.parseInt(t1.get_ID()) < Integer.parseInt(t2.get_ID()) ? -1 : 1;
            }
        });
        //Set activity set for each trace
        for (Trace T : traces) {
            T.set_ActivitySet((List<String>) Activity_set.clone());
        }

    } catch (Exception e) {
        System.out.println("Error in CsvFileReader !!!");
        e.printStackTrace();
        isSuccess = false;
        return isSuccess;
    }
    if (isSuccess) {
        try {
            fileReader.close();
            csvFileParser.close();
        } catch (IOException e) {
            System.out.println("Error while closing fileReader/csvFileParser !!!");
        }
    }
    return isSuccess;
}