Example usage for org.apache.commons.csv CSVParser CSVParser

List of usage examples for org.apache.commons.csv CSVParser CSVParser

Introduction

In this page you can find the example usage for org.apache.commons.csv CSVParser CSVParser.

Prototype

public CSVParser(final Reader reader, final CSVFormat format) throws IOException 

Source Link

Document

Customized CSV parser using the given CSVFormat

If you do not read all records from the given reader , you should call #close() on the parser, unless you close the reader .

Usage

From source file:br.edimarmanica.weir2.rule.Loader.java

/**
 *
 * @param site/*ww  w .  j ava  2 s . c  o m*/
 * @return Map<Page,Entity>
 */
public static Map<String, String> loadEntityID(Site site) {
    Map<String, String> ids = new HashMap<>();

    try (Reader in = new FileReader(Paths.PATH_BASE + site.getEntityPath())) {
        try (CSVParser parser = new CSVParser(in, CSVFormat.EXCEL.withHeader())) {
            for (CSVRecord record : parser) {
                String url = formatURL(record.get("url"));
                ids.put(url, record.get("entityID"));
            }
        }
    } catch (FileNotFoundException ex) {
        Logger.getLogger(Loader.class.getName()).log(Level.SEVERE, null, ex);
    } catch (IOException ex) {
        Logger.getLogger(Loader.class.getName()).log(Level.SEVERE, null, ex);
    }

    return ids;
}

From source file:com.github.jferard.pgloaderutils.loader.CSVCleanerFileReader.java

public static CSVCleanerFileReader fromReader(Reader reader, CSVFormat format, CSVRecordCleaner recordCleaner)
        throws IOException {
    CSVParser parser = new CSVParser(reader, format);
    return new CSVCleanerFileReader(parser, recordCleaner);
}

From source file:br.edimarmanica.trinity.intrasitemapping.manual.Mapping.java

private List<Map<String, String>> readOffset(File offsetFile) {
    List<Map<String, String>> offset = new ArrayList<>(); //cada arquivo  um offset

    try (Reader in = new FileReader(offsetFile)) {
        try (CSVParser parser = new CSVParser(in, CSVFormat.EXCEL)) {
            int nrRegistro = 0;
            for (CSVRecord record : parser) {

                for (int nrRegra = 0; nrRegra < record.size(); nrRegra++) {
                    String value;
                    try {
                        value = Formatter.formatValue(Preprocessing.filter(record.get(nrRegra)));
                    } catch (InvalidValue ex) {
                        value = "";
                    }/*  w w  w. j a  va  2  s.  c om*/

                    if (nrRegistro == 0) {
                        Map<String, String> regra = new HashMap<>();
                        regra.put(Formatter.formatURL(record.get(0)), value);
                        offset.add(regra);
                    } else {
                        offset.get(nrRegra).put(Formatter.formatURL(record.get(0)), value);
                    }
                }
                nrRegistro++;
            }
        }
    } catch (FileNotFoundException ex) {
        Logger.getLogger(Mapping.class.getName()).log(Level.SEVERE, null, ex);
    } catch (IOException ex) {
        Logger.getLogger(Mapping.class.getName()).log(Level.SEVERE, null, ex);
    }

    return offset;
}

From source file:mtsar.csv.TaskCSVTest.java

@Test
public void testCSV() throws IOException {
    try (final PipedInputStream pis = new PipedInputStream()) {
        try (final PipedOutputStream pos = new PipedOutputStream(pis)) {
            TaskCSV.write(tasks, pos);/*from   w  ww.  j a v a2 s.  c o m*/

            try (final Reader reader = new InputStreamReader(pis, StandardCharsets.UTF_8)) {
                try (final CSVParser csv = new CSVParser(reader, TaskCSV.FORMAT)) {
                    final List<Task> parsed = Lists.newArrayList(TaskCSV.parse(stage, csv));
                    assertThat(parsed).hasSize(2);
                    assertThat(parsed).usingElementComparatorIgnoringFields("dateTime").isEqualTo(tasks);
                }
            }
        }
    }
}

From source file:edu.emory.mathcs.nlp.zzz.CSVRadiology.java

public void categorize(String inputFile) throws Exception {
    CSVParser parser = new CSVParser(IOUtils.createBufferedReader(inputFile), CSVFormat.DEFAULT);
    List<CSVRecord> records = parser.getRecords();
    StringJoiner join;/*www .j av a2s. c  o  m*/
    CSVRecord record;

    for (int i = 0; i <= 500; i++) {
        if (i == 0)
            continue;
        record = records.get(i);
        join = new StringJoiner(" ");

        for (int j = 2; j < 7; j++)
            join.add(record.get(j));

        System.out.println(join.toString());
    }

    parser.close();
}

From source file:mtsar.csv.WorkerCSVTest.java

@Test
public void testCSV() throws IOException {
    try (final PipedInputStream pis = new PipedInputStream()) {
        try (final PipedOutputStream pos = new PipedOutputStream(pis)) {
            WorkerCSV.write(workers, pos);

            try (final Reader reader = new InputStreamReader(pis, StandardCharsets.UTF_8)) {
                try (final CSVParser csv = new CSVParser(reader, WorkerCSV.FORMAT)) {
                    final List<Worker> parsed = Lists.newArrayList(WorkerCSV.parse(stage, csv));
                    assertThat(parsed).hasSize(2);
                    assertThat(parsed).usingElementComparatorIgnoringFields("dateTime").isEqualTo(workers);
                }/*from   w  w  w.  ja  va2 s .co m*/
            }
        }
    }
}

From source file:br.edimarmanica.trinity.intrasitemapping.auto.MergeOffsets.java

private void executeOffset(int indexOffset) {
    File dir = new File(Paths.PATH_TRINITY + site.getPath() + "/offset");
    try (Reader in = new FileReader(dir.getAbsoluteFile() + "/result_" + indexOffset + ".csv")) {
        List<List<String>> lines = new ArrayList<>();
        try (CSVParser parser = new CSVParser(in, CSVFormat.EXCEL)) {

            int indexRegistro = 0;
            for (CSVRecord record : parser) {
                if (indexOffset != 0 && indexRegistro < Extract.NR_SHARED_PAGES) { //seno vai extrair repetido
                    indexRegistro++;//from w  w  w.j a  va2 s . c om
                    continue;
                }
                List<String> line = new ArrayList<>();
                for (int nrRegra = 0; nrRegra < record.size(); nrRegra++) {
                    try {
                        line.add(Preprocessing.filter(record.get(nrRegra)));
                    } catch (InvalidValue ex) {
                        line.add("");
                    }
                }
                lines.add(line);
                indexRegistro++;
            }

            print(indexOffset, lines);
        }
    } catch (FileNotFoundException ex) {
        Logger.getLogger(MergeOffsets.class.getName()).log(Level.SEVERE, null, ex);
    } catch (IOException ex) {
        Logger.getLogger(MergeOffsets.class.getName()).log(Level.SEVERE, null, ex);
    }

}

From source file:de.upb.wdqa.wdvd.revisiontags.TagDownloader.java

/**
 * Reads the csv file of the TagDownloader
 *///  w  w w  .  j a va2  s .co  m
public static void readFile(File file) {
    try {
        logger.info("Starting to read file of TagDownloader ...");
        BufferedReader reader = new BufferedReader(new InputStreamReader(
                new BZip2CompressorInputStream(new BufferedInputStream(new FileInputStream(file))), "UTF-8"));

        CSVParser parser = new CSVParser(reader, CSVFormat.RFC4180);

        dataStore.connect();

        for (CSVRecord csvRecord : parser) {
            parseRecord(csvRecord);
            if (csvRecord.getRecordNumber() % 1000000 == 0) {
                logger.info("Current Record: " + csvRecord.getRecordNumber());
            }
        }

        dataStore.disconnect();
        parser.close();
        logger.info("Tag Distribution:\n" + FrequencyUtils.formatFrequency(tagDistribution));
        logger.info("Finished");
    } catch (Exception e) {
        logger.error("", e);
    }
}

From source file:com.compomics.cell_coord.parser.impl.TSVFileParser.java

@Override
public Sample parseTrackFile(File trackFile) throws FileParserException {
    // create a new sample object -- watch out to set the relationships!
    Sample sample = new Sample(trackFile.getName());
    // initialize an empty list of tracks
    List<Track> list = new ArrayList<>();
    CSVParser tsvFileParser;//from w  w w  .  j a v a2s .  c  om
    FileReader fileReader;
    CSVFormat csvFileFormat = CSVFormat.TDF.withHeader(FILE_HEADER_MAPPING);
    try {
        // initialize the file reader
        fileReader = new FileReader(trackFile);
        //initialize CSVParser object
        tsvFileParser = new CSVParser(fileReader, csvFileFormat);
        // get the csv records
        List<CSVRecord> csvRecords = tsvFileParser.getRecords();
        Track currentTrack = null;
        List<TrackSpot> currentTrackPointList = new ArrayList<>();
        Long currentId = 0L;

        //Read the CSV file records starting from the second record to skip the header
        for (int i = 1; i < csvRecords.size(); i++) {
            CSVRecord cSVRecord = csvRecords.get(i);
            // get the fields
            Long trackid = Long.parseLong(cSVRecord.get(TRACK_ID));
            if (!Objects.equals(currentId, trackid)) {
                currentTrack = new Track();
                currentTrack.setTrackid(trackid);
                list.add(currentTrack);
                currentId = trackid;
                currentTrackPointList = new ArrayList<>();
            }
            // create new Track Spot object
            Long spotid = Long.parseLong(cSVRecord.get(SPOT_ID));
            double x = Double.parseDouble(cSVRecord.get(X_COORD));
            double y = Double.parseDouble(cSVRecord.get(Y_COORD));
            double time = Double.parseDouble(cSVRecord.get(TIME));
            TrackSpot trackSpot = new TrackSpot(spotid, x, y, time, currentTrack);
            currentTrackPointList.add(trackSpot);
            currentTrack.setTrackSpots(currentTrackPointList);
            currentTrack.setSample(sample);
        }
    } catch (IOException ex) {
        LOG.error(ex.getMessage(), ex);
    } catch (NumberFormatException ex) {
        LOG.error(ex.getMessage(), ex);
        throw new FileParserException(
                "It seems like a line does not contain a number!\nPlease check your files!");
    }
    sample.setTracks(list);
    return sample;
}

From source file:com.publictransitanalytics.scoregenerator.datalayer.directories.GTFSReadingStopDetailsDirectory.java

public GTFSReadingStopDetailsDirectory(final Store<StopIdKey, StopDetails> stopDetailsStore,
        final Reader stopDetailsReader) throws IOException, InterruptedException {

    this.stopDetailsStore = stopDetailsStore;
    try {/*  w  ww.j av  a2 s  .co  m*/
        if (stopDetailsStore.isEmpty()) {
            log.info("Building stop details directory.");

            final CSVParser parser = new CSVParser(stopDetailsReader, CSVFormat.DEFAULT.withHeader());
            final List<CSVRecord> stopDetailsRecords = parser.getRecords();
            for (CSVRecord record : stopDetailsRecords) {
                final String latitude = record.get("stop_lat");
                final String longitude = record.get("stop_lon");
                final String stopId = record.get("stop_id");
                final StopDetails stopDetails = new StopDetails(stopId, record.get("stop_name"),
                        new Coordinate(latitude, longitude));
                stopDetailsStore.put(new StopIdKey(stopId), stopDetails);
            }
        }
    } catch (final BitvantageStoreException e) {
        throw new ScoreGeneratorFatalException(e);
    }
}