List of usage examples for org.apache.commons.csv CSVParser getRecords
public List<CSVRecord> getRecords() throws IOException
From source file:com.publictransitanalytics.scoregenerator.datalayer.directories.GTFSReadingServiceTypeCalendar.java
private void parseCalendarFile(final Reader calendarReader, final Multimap<LocalDate, String> serviceTypesMap) throws IOException { final CSVParser calendarParser = new CSVParser(calendarReader, CSVFormat.DEFAULT.withHeader()); final List<CSVRecord> calendarRecords = calendarParser.getRecords(); LocalDate earliestDate = null; LocalDate latestDate = null;/*from w w w . java 2 s. co m*/ for (final CSVRecord record : calendarRecords) { final String serviceType = record.get("service_id"); final LocalDate start = LocalDate.parse(record.get("start_date"), DateTimeFormatter.BASIC_ISO_DATE); if (earliestDate == null || start.isBefore(earliestDate)) { earliestDate = start; } final LocalDate end = LocalDate.parse(record.get("end_date"), DateTimeFormatter.BASIC_ISO_DATE); if (latestDate == null || end.isAfter(latestDate)) { latestDate = end; } final EnumSet<DayOfWeek> daysOfWeek = EnumSet.noneOf(DayOfWeek.class); if (record.get("monday").equals("1")) { daysOfWeek.add(DayOfWeek.MONDAY); } if (record.get("tuesday").equals("1")) { daysOfWeek.add(DayOfWeek.TUESDAY); } if (record.get("wednesday").equals("1")) { daysOfWeek.add(DayOfWeek.WEDNESDAY); } if (record.get("thursday").equals("1")) { daysOfWeek.add(DayOfWeek.THURSDAY); } if (record.get("friday").equals("1")) { daysOfWeek.add(DayOfWeek.FRIDAY); } if (record.get("saturday").equals("1")) { daysOfWeek.add(DayOfWeek.SATURDAY); } if (record.get("sunday").equals("1")) { daysOfWeek.add(DayOfWeek.SUNDAY); } LocalDate targetDate = start; while (!targetDate.isAfter(end)) { if (daysOfWeek.contains(targetDate.getDayOfWeek())) { serviceTypesMap.put(targetDate, serviceType); } targetDate = targetDate.plusDays(1); } } }
From source file:com.garethahealy.quotalimitsgenerator.cli.parsers.DefaultCLIParser.java
private Map<String, Pair<Integer, Integer>> parseLines(String instanceTypeCsv) throws IOException, URISyntaxException, ParseException { InputStreamReader inputStreamReader; if (instanceTypeCsv.equalsIgnoreCase("classpath")) { inputStreamReader = new InputStreamReader( getClass().getClassLoader().getResourceAsStream("instancetypes.csv"), Charset.forName("UTF-8")); } else {/*from w ww . j a va 2 s.c o m*/ URI uri = new URI(instanceTypeCsv); inputStreamReader = new InputStreamReader(new FileInputStream(new File(uri)), Charset.forName("UTF-8")); } CSVParser parser = null; List<CSVRecord> lines = null; try { parser = CSVFormat.DEFAULT.parse(new BufferedReader(inputStreamReader)); lines = parser.getRecords(); } finally { inputStreamReader.close(); if (parser != null) { parser.close(); } } if (lines == null || lines.size() <= 0) { throw new ParseException("instance-type-csv data is empty"); } Map<String, Pair<Integer, Integer>> linesMap = new HashMap<String, Pair<Integer, Integer>>(); for (CSVRecord current : lines) { linesMap.put(current.get(1), new ImmutablePair<Integer, Integer>(Integer.parseInt(current.get(2)), Integer.parseInt(current.get(3)))); } return linesMap; }
From source file:com.publictransitanalytics.scoregenerator.datalayer.directories.GTFSReadingStopTimesDirectory.java
private void parseFrequenciesFile(final ImmutableMultimap.Builder<String, FrequencyRecord> builder, final Reader frequenciesReader) throws FileNotFoundException, IOException { final CSVParser frequenciesParser = new CSVParser(frequenciesReader, CSVFormat.DEFAULT.withHeader()); final List<CSVRecord> frequenciesRecords = frequenciesParser.getRecords(); for (CSVRecord record : frequenciesRecords) { final String tripId = record.get("trip_id"); final FrequencyRecord frequencyRecord = new FrequencyRecord(tripId, TransitTime.parse(record.get("start_time")), TransitTime.parse(record.get("end_time")), Duration.ofSeconds(Long.parseLong(record.get("headway_secs")))); builder.put(tripId, frequencyRecord); }/*from ww w . j a v a2 s . co m*/ }
From source file:io.github.seiferma.jameica.hibiscus.dkb.creditcard.synchronize.csvparser.DKBCsvParser.java
public Iterable<DKBTransaction> getTransactions() throws IOException { CSVParser parser = CSVParser.parse(contentCsv, csvFormat.withFirstRecordAsHeader()); List<DKBTransaction> transactions = new ArrayList<>(); for (CSVRecord csvRecord : parser.getRecords()) { transactions.add(createTransaction(csvRecord)); }//from w ww . j a va 2 s. c om return transactions; }
From source file:edu.washington.gs.skyline.model.quantification.QuantificationTest.java
private Map<RecordKey, Double> readExpectedRows(String filename) throws Exception { Map<RecordKey, Double> map = new HashMap<>(); Reader reader = new InputStreamReader(QuantificationTest.class.getResourceAsStream(filename)); try {//from w ww .j av a2s . co m CSVParser parser = new CSVParser(reader, CSVFormat.EXCEL.withHeader()); for (CSVRecord record : parser.getRecords()) { map.put(new RecordKey(record), parseNullableDouble(record.get("NormalizedArea"))); } } finally { reader.close(); } return map; }
From source file:edu.washington.gs.skyline.model.quantification.QuantificationTest.java
private List<InputRecord> readInputRecords(String filename) throws Exception { List<InputRecord> list = new ArrayList<>(); Reader reader = new InputStreamReader(QuantificationTest.class.getResourceAsStream(filename)); try {//from w w w . ja v a 2s. c o m CSVParser parser = new CSVParser(reader, CSVFormat.EXCEL.withHeader()); for (CSVRecord record : parser.getRecords()) { list.add(new InputRecord(record)); } } finally { reader.close(); } return list; }
From source file:edu.washington.gs.skyline.model.quantification.QuantificationTest.java
private List<ReplicateData> readReplicates(String filename) throws Exception { Map<String, ReplicateData> replicates = new LinkedHashMap<>(); Reader reader = new InputStreamReader(QuantificationTest.class.getResourceAsStream(filename)); try {/* ww w . ja va 2 s .c o m*/ CSVParser parser = new CSVParser(reader, CSVFormat.EXCEL.withHeader()); for (CSVRecord record : parser.getRecords()) { String fileName = record.get("FileName"); ReplicateData replicate = replicates.get(fileName); if (replicate == null) { replicate = new ReplicateData(); replicates.put(fileName, replicate); } } } finally { reader.close(); } throw new NotImplementedException(); }
From source file:com.github.douglasjunior.simpleCSVEditor.FXMLController.java
private ObservableList<CSVRow> readFile(File csvFile) throws IOException { ObservableList<CSVRow> rows = FXCollections.observableArrayList(); Integer maxColumns = 0;/*from w w w. j a v a 2 s .c o m*/ try (Reader in = new InputStreamReader(new FileInputStream(csvFile));) { CSVParser parse = csvFormat.parse(in); for (CSVRecord record : parse.getRecords()) { if (maxColumns < record.size()) { maxColumns = record.size(); } CSVRow row = new CSVRow(); for (int i = 0; i < record.size(); i++) { row.getColumns().add(new SimpleStringProperty(record.get(i))); } rows.add(row); } this.numbeColumns = maxColumns; } return rows; }
From source file:co.cask.hydrator.transforms.CSVParser2.java
@Override public void transform(StructuredRecord in, Emitter<StructuredRecord> emitter) throws Exception { // Field has to string to be parsed correctly. For others throw an exception. String body = in.get(config.field); // If decoder is not NONE, then apply decoder. byte[] decodedPayLoad; if (!config.decoder.equalsIgnoreCase("NONE")) { decodedPayLoad = decodePayLoad(body); } else {// w w w. j a va2 s. c om decodedPayLoad = body.getBytes(); } // If decompess is not NONE, then apply decompressor. byte[] uncompressedPayLoad = decodedPayLoad.clone(); if (!config.decompress.equalsIgnoreCase("NONE")) { if (config.decompress.equalsIgnoreCase("SNAPPY")) { uncompressedPayLoad = Snappy.uncompress(decodedPayLoad); } else if (config.decompress.equalsIgnoreCase("GZIP")) { uncompressedPayLoad = ungzip(decodedPayLoad); } else if (config.decompress.equalsIgnoreCase("ZIP")) { uncompressedPayLoad = unzip(decodedPayLoad); } } // Parse the text as CSV and emit it as structured record. try { CSVParser parser = CSVParser.parse(new String(uncompressedPayLoad), csvFormat); List<CSVRecord> records = parser.getRecords(); for (CSVRecord record : records) { if (fields.size() == record.size()) { StructuredRecord sRecord = createStructuredRecord(record); emitter.emit(sRecord); } else { // Write the record to error Dataset. } } } catch (IOException e) { } }
From source file:edu.clemson.lph.utils.CSVParserWrapper.java
public CSVParserWrapper(CSVParser pIn) throws IOException { if (pIn == null) return;/* w w w . j a v a 2 s . co m*/ try { for (CSVRecord r : pIn.getRecords()) { List<String> aRow = new ArrayList<String>(); for (int i = 0; i < r.size(); i++) { String sField = r.get(i); aRow.add(sField); } aRows.add(aRow); } iRows = aRows.size(); iCurrent = 1; } finally { pIn.close(); } }