List of usage examples for org.apache.commons.csv CSVFormat DEFAULT
CSVFormat DEFAULT
To view the source code for org.apache.commons.csv CSVFormat DEFAULT.
Click Source Link
From source file:nl.mpi.tg.eg.frinex.rest.CsvController.java
private byte[] getParticipantsCsv() throws IOException { final StringBuilder stringBuilder = new StringBuilder(); CSVPrinter printer = new CSVPrinter(stringBuilder, CSVFormat.DEFAULT); final ParticipantCsvExporter participantCsvExporter = new ParticipantCsvExporter(); participantCsvExporter.appendCsvHeader(printer); ArrayList<String> insertedUserIds = new ArrayList<>(); for (Participant participant : participantRepository.findAllByOrderBySubmitDateDesc()) { if (!insertedUserIds.contains(participant.getUserId())) { // here we are relying on the last user data submission being the most complete because that data is only added to in the experiment GUI participantCsvExporter.appendCsvRow(printer, participant); insertedUserIds.add(participant.getUserId()); }//from ww w . j av a2 s. c o m } printer.close(); return stringBuilder.toString().getBytes(); }
From source file:nl.mpi.tg.eg.frinex.rest.CsvController.java
private byte[] getScreenDataCsv() throws IOException { final StringBuilder stringBuilder = new StringBuilder(); CSVPrinter printer = new CSVPrinter(stringBuilder, CSVFormat.DEFAULT); printer.printRecord("UserId", "ScreenName", "ViewDate"); for (ScreenData screenData : screenDataRepository.findAllDistinctRecords()) { printer.printRecord(screenData.getUserId(), screenData.getScreenName(), screenData.getViewDate()); }//from w w w . j a v a2 s .com printer.close(); return stringBuilder.toString().getBytes(); }
From source file:nl.mpi.tg.eg.frinex.rest.CsvController.java
private byte[] getTimeStampDataCsv() throws IOException { final StringBuilder stringBuilder = new StringBuilder(); CSVPrinter printer = new CSVPrinter(stringBuilder, CSVFormat.DEFAULT); printer.printRecord("UserId", "EventTag", "EventMs", "TagDate"); for (TimeStamp timeStamp : timeStampRepository.findAllDistinctRecords()) { printer.printRecord(timeStamp.getUserId(), timeStamp.getEventTag(), timeStamp.getEventMs(), timeStamp.getTagDate()); }/*from ww w . j a v a 2 s .co m*/ printer.close(); return stringBuilder.toString().getBytes(); }
From source file:nl.mpi.tg.eg.frinex.rest.CsvController.java
private byte[] getTagDataCsv() throws IOException { final StringBuilder stringBuilder = new StringBuilder(); CSVPrinter printer = new CSVPrinter(stringBuilder, CSVFormat.DEFAULT); printer.printRecord("UserId", "EventTag", "TagValue", "EventMs", "TagDate"); for (TagData tagData : tagRepository.findAllDistinctRecords()) { printer.printRecord(tagData.getUserId(), tagData.getEventTag(), tagData.getTagValue(), tagData.getEventMs(), tagData.getTagDate()); }/*from www. j av a 2 s .co m*/ printer.close(); return stringBuilder.toString().getBytes(); }
From source file:nl.mpi.tg.eg.frinex.rest.CsvController.java
private byte[] getTagPairDataCsv() throws IOException { final StringBuilder stringBuilder = new StringBuilder(); CSVPrinter printer = new CSVPrinter(stringBuilder, CSVFormat.DEFAULT); printer.printRecord("UserId", "EventTag", "TagValue1", "TagValue2", "EventMs", "TagDate"); for (TagPairData tagPairData : tagPairRepository.findAllDistinctRecords()) { printer.printRecord(tagPairData.getUserId(), tagPairData.getEventTag(), tagPairData.getTagValue1(), tagPairData.getTagValue2(), tagPairData.getEventMs(), tagPairData.getTagDate()); }//from w w w . ja va 2s . c o m printer.close(); return stringBuilder.toString().getBytes(); }
From source file:nl.utwente.ewi.caes.tactiletriana.simulation.devices.UncontrollableLoad.java
/** * * @param profileNumber - A number between 0 and 5 (inclusive) which selects * the profile data on which this instance is based *///from w w w.jav a 2s. com public UncontrollableLoad(int profileNumber, Simulation simulation) { super("Uncontrollable Load", simulation); if (profileNumber < 0 || profileNumber > 5) { throw new IllegalArgumentException("profileNumber must be in the range of 0 to 5"); } this.profileNumber = profileNumber; //Load the profile data into an array from the CSV file containing power consumptions for 6 houses. if (profile == null) { profile = new double[6][525608]; try { File csvData = new File("src/main/resources/datasets/watt_house_profiles_year.csv"); // Jan Harm: je kan gewoon een format aanmaken :) CSVFormat format = CSVFormat.DEFAULT.withDelimiter(';'); CSVParser parser = CSVParser.parse(csvData, Charset.defaultCharset(), format); for (CSVRecord csvRecord : parser) { for (int p = 0; p < 6; p++) { profile[p][(int) parser.getRecordNumber()] = Double.parseDouble(csvRecord.get(p)); } } } catch (IOException e) { throw new RuntimeException("Error while parsing house profile dataset", e); } } }
From source file:nl.utwente.trafficanalyzer.GeoTagger.java
public static List readCsvFile(File fileName) { FileReader fileReader = null; CSVParser csvFileParser = null;//www .j av a 2 s. co m //Create the CSVFormat object with the header mapping CSVFormat csvFileFormat = CSVFormat.DEFAULT; try { //initialize FileReader object fileReader = new FileReader(fileName); //initialize CSVParser object csvFileParser = new CSVParser(fileReader, csvFileFormat); //Get a list of CSV file records List csvRecords = csvFileParser.getRecords(); return csvRecords; } catch (Exception e) { System.out.println("Error in CsvFileReader !!!"); e.printStackTrace(); } finally { try { fileReader.close(); csvFileParser.close(); } catch (IOException e) { System.out.println("Error while closing fileReader/csvFileParser !!!"); e.printStackTrace(); } } return null; }
From source file:no.packdrill.android.sparkledroid.lib.parse.csvtsv.CSVParse.java
@Override public void parse(Reader input) throws IOException //------------------------------------------------ { CSVFormat format = CSVFormat.DEFAULT.withHeader(); parser = new CSVParser(input, format); headerMap = parser.getHeaderMap();/*from w w w.j a va2 s .c o m*/ Set<Map.Entry<String, Integer>> ss = headerMap.entrySet(); columns = new String[headerMap.size()]; for (Map.Entry<String, Integer> e : ss) columns[e.getValue()] = e.getKey(); it = parser.iterator(); }
From source file:notaql.engines.csv.CSVEngineEvaluator.java
/** * Evaluates the given transformation./*from w w w . j av a 2s.c o m*/ * * This first parses the document (with the first line being the header) and then evaluates on our framework. * * TODO: this assumes a header line. It might happen that it is not provided. * * @param transformation * @return */ @Override public JavaRDD<ObjectValue> evaluate(Transformation transformation) { final SparkTransformationEvaluator evaluator = new SparkTransformationEvaluator(transformation); final JavaSparkContext sc = NotaQL.SparkFactory.getSparkContext(); final CSVFormat format = CSVFormat.DEFAULT; final JavaRDD<String> csv = sc.textFile(path); final String first = csv.first(); final CSVRecord header; try { header = format.parse(new StringReader(first)).iterator().next(); } catch (IOException e) { e.printStackTrace(); throw new AssertionError("Header could not be read for some reason."); } String[] headerCols = new String[header.size()]; for (int i = 0; i < header.size(); i++) { headerCols[i] = header.get(i); } final CSVFormat headerFormat = CSVFormat.DEFAULT.withHeader(headerCols); final JavaRDD<CSVRecord> records = csv.filter(f -> !f.equals(first)) .map(line -> headerFormat.parse(new StringReader(line)).iterator().next()); final JavaRDD<Value> converted = records.map(ValueConverter::convertToNotaQL); final JavaRDD<Value> filtered = converted.filter(o -> transformation.satisfiesInPredicate((ObjectValue) o)); return evaluator.process(filtered); }
From source file:notaql.engines.csv.CSVEngineEvaluator.java
/** * Stores the resulting rows to disk./* w w w .j a v a2s . c o m*/ * * @param result */ @Override public void store(JavaRDD<ObjectValue> result) { final JavaSparkContext sc = NotaQL.SparkFactory.getSparkContext(); final String[] header = getHeader(result); final CSVFormat format = CSVFormat.DEFAULT; final JavaRDD<String> headerRDD = sc.parallelize(Arrays.asList(format.format(header))); final JavaRDD<String> lines = result.map(o -> ValueConverter.convertFromNotaQL(o, header)) .map(strings -> format.format(strings.toArray())); sc.union(headerRDD, lines).saveAsTextFile(path); }