List of usage examples for org.apache.commons.csv CSVPrinter printRecord
public void printRecord(final Object... values) throws IOException
From source file:org.cricketmsf.in.http.CsvFormatter.java
public String format(List list) { StringBuilder sb = new StringBuilder(); try {// w w w .j a v a 2 s . c om CSVPrinter printer = new CSVPrinter(sb, CSVFormat.DEFAULT); if (list.size() > 0) { printer.printRecord((List) list.get(0)); for (int i = 1; i < list.size(); i++) { printer.printRecord((List) list.get(i)); } } } catch (IOException e) { sb.append(e.getMessage()); } return sb.toString(); }
From source file:org.cricketmsf.in.http.CsvFormatter.java
public String format(Map data) { StringBuilder sb = new StringBuilder(); try {/*from www . j ava 2s . com*/ CSVPrinter printer = new CSVPrinter(sb, CSVFormat.DEFAULT); printer.printRecord(data.keySet()); printer.printRecord(data.values()); } catch (IOException e) { sb.append(e.getMessage()); } return sb.toString(); }
From source file:org.easybatch.extensions.apache.common.csv.ApacheCommonCsvRecordMarshaller.java
@Override public StringRecord processRecord(final GenericRecord record) throws RecordMarshallingException { try {/* ww w . j a v a 2 s .c o m*/ StringWriter stringWriter = new StringWriter(); CSVPrinter csvPrinter = new CSVPrinter(stringWriter, csvFormat); Iterable<Object> iterable = fieldExtractor.extractFields(record.getPayload()); csvPrinter.printRecord(iterable); csvPrinter.flush(); return new StringRecord(record.getHeader(), stringWriter.toString()); } catch (Exception e) { throw new RecordMarshallingException(e); } }
From source file:org.gitia.jdataanalysis.JDataAnalysis.java
public void save(String[][] data, String[] headers, String folder, String fileName) { String NEW_LINE_SEPARATOR = "\n"; FileWriter fileWriter = null; CSVPrinter csvFilePrinter = null; //Create the CSVFormat object with "\n" as a record delimiter CSVFormat csvFileFormat = CSVFormat.DEFAULT.withRecordSeparator(NEW_LINE_SEPARATOR); try {/* w ww . j a v a 2 s . c o m*/ //initialize FileWriter object File file = new File(folder + "/" + fileName); fileWriter = new FileWriter(file); //initialize CSVPrinter object csvFilePrinter = new CSVPrinter(fileWriter, csvFileFormat); //Create CSV file header csvFilePrinter.printRecord(headers); //Write a new student object list to the CSV file for (int i = 0; i < data.length; i++) { //List studentDataRecord = new ArrayList(); csvFilePrinter.printRecord(data[i]); } System.out.println("CSV file was created successfully !!!"); System.out.println(folder + "/" + fileName); } catch (Exception e) { System.out.println("Error in CsvFileWriter !!!"); e.printStackTrace(); } finally { try { fileWriter.flush(); fileWriter.close(); csvFilePrinter.close(); } catch (IOException e) { System.out.println("Error while flushing/closing fileWriter/csvPrinter !!!"); e.printStackTrace(); } } }
From source file:org.gitia.jdataanalysis.JDataAnalysis.java
/** * * @param list/*from w w w .j a va 2 s .c om*/ * @param folder * @param fileName */ public void save(List<String> list, String folder, String fileName) { String NEW_LINE_SEPARATOR = "\n"; FileWriter fileWriter = null; CSVPrinter csvFilePrinter = null; //Create the CSVFormat object with "\n" as a record delimiter CSVFormat csvFileFormat = CSVFormat.DEFAULT.withRecordSeparator(NEW_LINE_SEPARATOR); try { //initialize FileWriter object File file = new File(folder + "/" + fileName); fileWriter = new FileWriter(file); //initialize CSVPrinter object csvFilePrinter = new CSVPrinter(fileWriter, csvFileFormat); //Create CSV file header //csvFilePrinter.printRecord(headers); //Write a new student object list to the CSV file for (int i = 0; i < list.size(); i++) { //List studentDataRecord = new ArrayList(); csvFilePrinter.printRecord(list.get(i)); } System.out.println("CSV file was created successfully !!!"); System.out.println(folder + "/" + fileName); } catch (Exception e) { System.out.println("Error in CsvFileWriter !!!"); } finally { try { fileWriter.flush(); fileWriter.close(); csvFilePrinter.close(); } catch (IOException e) { System.out.println("Error while flushing/closing fileWriter/csvPrinter !!!"); e.printStackTrace(); } } }
From source file:org.kuali.test.runner.execution.TestExecutionContext.java
private void writePerformanceDataFile(File f) { FileWriter fileWriter = null; CSVPrinter csvFilePrinter = null; //Create the CSVFormat object with "\n" as a record delimiter CSVFormat csvFileFormat = CSVFormat.EXCEL.withRecordSeparator("\n"); try {/*from ww w. ja v a2 s .com*/ //initialize FileWriter object fileWriter = new FileWriter(f); //initialize CSVPrinter object csvFilePrinter = new CSVPrinter(fileWriter, csvFileFormat); //Create CSV file header csvFilePrinter.printRecord(Arrays.asList(PERFORMANCE_DATA_HEADER)); //Write a new student object list to the CSV file for (String[] rec : performanceData) { csvFilePrinter.printRecord(Arrays.asList(rec)); } } catch (Exception ex) { LOG.error(ex.toString(), ex); } finally { try { if (fileWriter != null) { fileWriter.flush(); fileWriter.close(); } } catch (Exception e) { } try { if (csvFilePrinter != null) { csvFilePrinter.close(); } } catch (Exception e) { } } }
From source file:org.languagetool.rules.spelling.suggestions.SuggestionChangesTest.java
public void testChanges() throws IOException, InterruptedException { File configFile = new File(System.getProperty("config", "SuggestionChangesTestConfig.json")); ObjectMapper mapper = new ObjectMapper(new JsonFactory().enable(JsonParser.Feature.ALLOW_COMMENTS)); SuggestionChangesTestConfig config = mapper.readValue(configFile, SuggestionChangesTestConfig.class); SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd_HH:mm:ss"); String timestamp = dateFormat.format(new Date()); Path loggingFile = Paths.get(config.logDir, String.format("suggestionChangesExperiment_%s.log", timestamp)); Path datasetFile = Paths.get(config.logDir, String.format("suggestionChangesExperiment_%s.csv", timestamp)); BufferedWriter writer = Files.newBufferedWriter(loggingFile); CSVPrinter datasetWriter = new CSVPrinter(Files.newBufferedWriter(datasetFile), CSVFormat.DEFAULT.withEscape('\\')); List<String> datasetHeader = new ArrayList<>( Arrays.asList("sentence", "correction", "covered", "replacement", "dataset_id")); SuggestionsChanges.init(config, writer); writer.write("Evaluation configuration: \n"); String configContent = String.join("\n", Files.readAllLines(configFile.toPath())); writer.write(configContent);// w w w . j a v a 2s . c o m writer.write("\nRunning experiments: \n"); int experimentId = 0; for (SuggestionChangesExperiment experiment : SuggestionsChanges.getInstance().getExperiments()) { experimentId++; writer.write(String.format("#%d: %s%n", experimentId, experiment)); datasetHeader.add(String.format("experiment_%d_suggestions", experimentId)); datasetHeader.add(String.format("experiment_%d_metadata", experimentId)); datasetHeader.add(String.format("experiment_%d_suggestions_metadata", experimentId)); } writer.newLine(); datasetWriter.printRecord(datasetHeader); BlockingQueue<SuggestionTestData> tasks = new LinkedBlockingQueue<>(1000); ConcurrentLinkedQueue<Pair<SuggestionTestResultData, String>> results = new ConcurrentLinkedQueue<>(); List<SuggestionTestThread> threads = new ArrayList<>(); for (int i = 0; i < Runtime.getRuntime().availableProcessors(); i++) { SuggestionTestThread worker = new SuggestionTestThread(tasks, results); worker.start(); threads.add(worker); } // Thread for writing results from worker threads into CSV Thread logger = new Thread(() -> { try { long messages = 0; //noinspection InfiniteLoopStatement while (true) { Pair<SuggestionTestResultData, String> message = results.poll(); if (message != null) { writer.write(message.getRight()); SuggestionTestResultData result = message.getLeft(); int datasetId = 1 + config.datasets.indexOf(result.getInput().getDataset()); if (result != null && result.getSuggestions() != null && !result.getSuggestions().isEmpty() && result.getSuggestions().stream() .noneMatch(m -> m.getSuggestedReplacements() == null || m.getSuggestedReplacements().isEmpty())) { List<Object> record = new ArrayList<>(Arrays.asList(result.getInput().getSentence(), result.getInput().getCorrection(), result.getInput().getCovered(), result.getInput().getReplacement(), datasetId)); for (RuleMatch match : result.getSuggestions()) { List<String> suggestions = match.getSuggestedReplacements(); record.add(mapper.writeValueAsString(suggestions)); // features extracted by SuggestionsOrdererFeatureExtractor record.add(mapper.writeValueAsString(match.getFeatures())); List<SortedMap<String, Float>> suggestionsMetadata = new ArrayList<>(); for (SuggestedReplacement replacement : match.getSuggestedReplacementObjects()) { suggestionsMetadata.add(replacement.getFeatures()); } record.add(mapper.writeValueAsString(suggestionsMetadata)); } datasetWriter.printRecord(record); } if (++messages % 1000 == 0) { writer.flush(); System.out.printf("Evaluated %d corrections.%n", messages); } } } } catch (IOException e) { throw new RuntimeException(e); } }); logger.setDaemon(true); logger.start(); // format straight from database dump String[] header = { "id", "sentence", "correction", "language", "rule_id", "suggestion_pos", "accept_language", "country", "region", "created_at", "updated_at", "covered", "replacement", "text_session_id", "client" }; int datasetId = 0; // read data, send to worker threads via queue for (SuggestionChangesDataset dataset : config.datasets) { writer.write(String.format("Evaluating dataset #%d: %s.%n", ++datasetId, dataset)); CSVFormat format = CSVFormat.DEFAULT; if (dataset.type.equals("dump")) { format = format.withEscape('\\').withNullString("\\N").withHeader(header); } else if (dataset.type.equals("artificial")) { format = format.withEscape('\\').withFirstRecordAsHeader(); } try (CSVParser parser = new CSVParser(new FileReader(dataset.path), format)) { for (CSVRecord record : parser) { String lang = record.get("language"); String rule = dataset.type.equals("dump") ? record.get("rule_id") : ""; String covered = record.get("covered"); String replacement = record.get("replacement"); String sentence = record.get("sentence"); String correction = record.isSet("correction") ? record.get("correction") : ""; String acceptLanguage = dataset.type.equals("dump") ? record.get("accept_language") : ""; if (sentence == null || sentence.trim().isEmpty()) { continue; } if (!config.language.equals(lang)) { continue; // TODO handle auto maybe? } if (dataset.type.equals("dump") && !config.rule.equals(rule)) { continue; } // correction column missing in export from doccano; workaround if (dataset.enforceCorrect && !record.isSet("correction")) { throw new IllegalStateException("enforceCorrect in dataset configuration enabled," + " but column 'correction' is not set for entry " + record); } if (dataset.type.equals("dump") && dataset.enforceAcceptLanguage) { if (acceptLanguage != null) { String[] entries = acceptLanguage.split(",", 2); if (entries.length == 2) { String userLanguage = entries[0]; // TODO: what to do with e.g. de-AT,de-DE;... if (!config.language.equals(userLanguage)) { continue; } } } } tasks.put(new SuggestionTestData(lang, sentence, covered, replacement, correction, dataset)); } } } for (Thread t : threads) { t.join(); } logger.join(10000L); logger.interrupt(); datasetWriter.close(); }
From source file:org.oneandone.gitter.out.CSVConsumer.java
public void consume(Map<String, Map<?, ?>> perProjectResults, Function<Object, String> keyFormatter, Function<Object, String> valueFormatter, Supplier<Object> nullValue) throws IOException { List<String> projects = perProjectResults.keySet().stream().sorted().collect(Collectors.toList()); List<String> headers = new ArrayList<>(projects); headers.add(0, "Key"); CSVPrinter printer = CSVFormat.EXCEL.withHeader(headers.toArray(new String[0])).print(p); Set<Object> keys = perProjectResults.values().stream().flatMap(m -> m.keySet().stream()) .collect(Collectors.toSet()); TreeSet<Object> sortedKeys = new TreeSet<>(keys); sortedKeys.stream().forEachOrdered(key -> { List<String> values = new ArrayList<>(); values.add(keyFormatter.apply(key)); projects.forEach(project -> { Object obj = perProjectResults.get(project).get(key); if (obj == null) { obj = nullValue.get();/*w ww. j a v a 2s. c o m*/ } Objects.requireNonNull(obj, () -> "Object at key " + keyFormatter.apply(key) + " for project " + project + " is null"); values.add(obj != null ? valueFormatter.apply(obj) : "<null>"); }); try { printer.printRecord(values); } catch (IOException ex) { throw new RuntimeException(ex); } }); }
From source file:org.openmrs.projectbuendia.servlet.DataExportServlet.java
@Override protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { // Set the default merge mode boolean merge = true; // Defines the interval in minutes that will be used to merge encounters. int interval = DEFAULT_INTERVAL_MINS; String intervalParameter = request.getParameter("interval"); if (intervalParameter != null) { int newInterval = Integer.valueOf(intervalParameter); if (newInterval >= 0) { interval = newInterval;//from w w w .j a va 2 s . c o m if (interval == 0) { merge = false; } } else { log.error("Interval value is less then 0. Default used."); } } CSVPrinter printer = new CSVPrinter(response.getWriter(), CSVFormat.EXCEL.withDelimiter(',')); //check for authenticated users if (!XformsUtil.isAuthenticated(request, response, null)) return; Date now = new Date(); DateFormat format = new SimpleDateFormat("yyyyMMdd_HHmmss"); String filename = String.format("buendiadata_%s.csv", format.format(now)); String contentDispositionHeader = String.format("attachment; filename=%s;", filename); response.addHeader("Content-Disposition", contentDispositionHeader); PatientService patientService = Context.getPatientService(); EncounterService encounterService = Context.getEncounterService(); List<Patient> patients = new ArrayList<>(patientService.getAllPatients()); Collections.sort(patients, PATIENT_COMPARATOR); // We may want to get the observations displayed in the chart/xform, in which case there // are a few // sensible orders: // 1: UUID // 2: Order in chart // 3: Order in Xform // Order in Xform/chart is not good as stuff changes every time we change xform // So instead we will use UUID order, but use the Chart form to use the concepts to display. Set<Concept> questionConcepts = new HashSet<>(); for (Form form : ChartResource.getCharts(Context.getFormService())) { TreeMap<Integer, TreeSet<FormField>> formStructure = FormUtil.getFormStructure(form); for (FormField groupField : formStructure.get(0)) { for (FormField fieldInGroup : formStructure.get(groupField.getId())) { questionConcepts.add(fieldInGroup.getField().getConcept()); } } } FixedSortedConceptIndexer indexer = new FixedSortedConceptIndexer(questionConcepts); // Write English headers. writeHeaders(printer, indexer); Calendar calendar = Calendar.getInstance(); // Loop through all the patients and get their encounters. for (Patient patient : patients) { // Define an array that will represent the line that will be inserted in the CSV. Object[] previousCSVLine = new Object[FIXED_HEADERS.length + indexer.size() * COLUMNS_PER_OBS]; Date deadLine = new Date(0); ArrayList<Encounter> encounters = new ArrayList<>(encounterService.getEncountersByPatient(patient)); Collections.sort(encounters, ENCOUNTER_COMPARATOR); // TODO: For now patients with no encounters are ignored. List them on the future. if (encounters.size() == 0) continue; // Loop through all the encounters for this patient to get the observations. for (Encounter encounter : encounters) { try { // Flag to whether we will use the merged version of the encounter // or the single version. boolean useMerged = merge; // Array that will be used to merge in previous encounter with the current one. Object[] mergedCSVLine = new Object[previousCSVLine.length]; // Duplicate previous encounter into the (future to be) merged one. System.arraycopy(previousCSVLine, 0, mergedCSVLine, 0, previousCSVLine.length); // Define the array to be used to store the current encounter. Object[] currentCSVLine = new Object[FIXED_HEADERS.length + indexer.size() * COLUMNS_PER_OBS]; // If the current encounter is more then "interval" minutes from the previous // print the previous and reset it. Date encounterTime = encounter.getEncounterDatetime(); if (encounterTime.after(deadLine)) { printer.printRecord(previousCSVLine); previousCSVLine = new Object[FIXED_HEADERS.length + indexer.size() * COLUMNS_PER_OBS]; useMerged = false; } // Set the next deadline as the current encounter time plus "interval" minutes. calendar.setTime(encounterTime); calendar.add(Calendar.MINUTE, interval); deadLine = calendar.getTime(); // Fill the fixed columns values. currentCSVLine[0] = patient.getUuid(); currentCSVLine[1] = patient.getPatientIdentifier("MSF"); if (patient.getBirthdate() != null) { currentCSVLine[2] = Utils.YYYYMMDD_UTC_FORMAT.format(patient.getBirthdate()); } currentCSVLine[3] = encounter.getUuid(); currentCSVLine[4] = encounterTime.getTime(); currentCSVLine[5] = Utils.toIso8601(encounterTime); currentCSVLine[6] = Utils.SPREADSHEET_FORMAT.format(encounterTime); // All the values fo the fixed columns saved in the current encounter line // will also be saved to the merged line. System.arraycopy(currentCSVLine, 0, mergedCSVLine, 0, 7); // Loop through all the observations for this encounter for (Obs obs : encounter.getAllObs()) { Integer index = indexer.getIndex(obs.getConcept()); if (index == null) continue; // For each observation there are three columns: if the value of the // observation is a concept, then the three columns contain the English // name, the OpenMRS ID, and the UUID of the concept; otherwise all // three columns contain the formatted value. int valueColumn = FIXED_HEADERS.length + index * COLUMNS_PER_OBS; // Coded values are treated differently if (obs.getValueCoded() != null) { Concept value = obs.getValueCoded(); currentCSVLine[valueColumn] = NAMER.getClientName(value); currentCSVLine[valueColumn + 1] = value.getId(); currentCSVLine[valueColumn + 2] = value.getUuid(); if (useMerged) { // If we are still merging the current encounter values into // the previous one get the previous value and see if it had // something in it. String previousValue = (String) mergedCSVLine[valueColumn]; if ((previousValue == null) || (previousValue.isEmpty())) { // If the previous value was empty copy the current value into it. mergedCSVLine[valueColumn] = currentCSVLine[valueColumn]; mergedCSVLine[valueColumn + 1] = currentCSVLine[valueColumn + 1]; mergedCSVLine[valueColumn + 2] = currentCSVLine[valueColumn + 2]; } else { // If the previous encounter have values stored for this // observation we cannot merge them anymore. useMerged = false; } } } // All values except the coded ones will be treated equally. else { // Return the value of the the current observation using the visitor. String value = (String) VisitObsValue.visit(obs, stringVisitor); // Check if we have values stored for this observation if ((value != null) && (!value.isEmpty())) { // Save the value of the observation on the current encounter line. currentCSVLine[valueColumn] = value; currentCSVLine[valueColumn + 1] = value; currentCSVLine[valueColumn + 2] = value; if (useMerged) { // Since we are still merging this encounter with the previous // one let's get the previous value to see if it had something // stored on it. String previousValue = (String) mergedCSVLine[valueColumn]; if ((previousValue != null) && (!previousValue.isEmpty())) { // Yes, we had information stored for this observation on // the previous encounter if (obs.getValueText() != null) { // We only continue merging if the observation is of // type text, so we concatenate it. // TODO: add timestamps to the merged values that are of type text previousValue += "\n" + value; value = previousValue; } else { // Any other type of value we stop the merging. useMerged = false; } } mergedCSVLine[valueColumn] = value; mergedCSVLine[valueColumn + 1] = value; mergedCSVLine[valueColumn + 2] = value; } } } } if (useMerged) { // If after looping through all the observations we didn't had any // overlapped values we keep the merged line. previousCSVLine = mergedCSVLine; } else { // We had overlapped values so let's print the previous line and make the // current encounter the previous one. Only if the previous line is not empty. if (previousCSVLine[0] != null) { printer.printRecord(previousCSVLine); } previousCSVLine = currentCSVLine; } } catch (Exception e) { log.error("Error exporting encounter", e); } } // For the last encounter we print the remaining line. printer.printRecord(previousCSVLine); } }
From source file:org.openo.client.cli.fw.output.print.OpenOCommandPrint.java
/** * Print output in csv format./*from w w w .j a v a 2 s .c om*/ * * @return string * @throws OpenOCommandOutputPrintingFailed * exception */ public String printCsv() throws OpenOCommandOutputPrintingFailed { StringWriter writer = new StringWriter(); CSVPrinter printer = null; try { CSVFormat formattor = CSVFormat.DEFAULT.withRecordSeparator(System.getProperty("line.separator")); printer = new CSVPrinter(writer, formattor); List<List<Object>> rows = this.formRows(false); for (int i = 0; i < this.findMaxRows(); i++) { printer.printRecord(rows.get(i)); } return writer.toString(); } catch (IOException e) { throw new OpenOCommandOutputPrintingFailed(e); } finally { try { if (printer != null) { printer.close(); } writer.close(); } catch (IOException e) { throw new OpenOCommandOutputPrintingFailed(e); // NOSONAR } } }