Example usage for org.apache.commons.csv CSVPrinter CSVPrinter

List of usage examples for org.apache.commons.csv CSVPrinter CSVPrinter

Introduction

In this page you can find the example usage for org.apache.commons.csv CSVPrinter CSVPrinter.

Prototype

public CSVPrinter(final Appendable out, final CSVFormat format) throws IOException 

Source Link

Document

Creates a printer that will print values to the given stream following the CSVFormat.

Usage

From source file:org.languagetool.dev.RuleDetails.java

public static void main(String[] args) throws ParseException, IOException {
    Options options = new Options();
    options.addRequiredOption("l", "language", true, "Language for rules");
    options.addRequiredOption("f", "file", true, "Input file");
    options.addRequiredOption("o", "output", true, "Output file");
    options.addRequiredOption("c", "column", true, "Column in input file");
    options.addOption("n", "ngramPath", true, "Ngram path to activate ngram rules");

    CommandLine cmd = new DefaultParser().parse(options, args);

    String langCode = cmd.getOptionValue('l');
    String inputFile = cmd.getOptionValue('f');
    String outputFile = cmd.getOptionValue('o');
    String column = cmd.getOptionValue('c');
    String ngramPath = cmd.hasOption('n') ? cmd.getOptionValue('n') : null;

    RuleDetails details = new RuleDetails(Languages.getLanguageForShortCode(langCode), ngramPath);

    CSVFormat format = CSVFormat.RFC4180.withFirstRecordAsHeader();

    try (CSVParser parser = CSVParser.parse(new File(inputFile), Charset.defaultCharset(), format)) {
        try (CSVPrinter printer = new CSVPrinter(new BufferedWriter(new FileWriter(outputFile)), format)) {
            Map<String, Integer> oldHeader = parser.getHeaderMap();
            List<String> newHeader = new ArrayList<>(Collections.nCopies(oldHeader.size(), null));

            for (Map.Entry<String, Integer> entry : oldHeader.entrySet()) {
                newHeader.set(entry.getValue(), entry.getKey());
            }//from   ww  w  .j  a v a  2s  .com
            newHeader.add("description");
            newHeader.add("category");
            printer.printRecord(newHeader);

            if (!oldHeader.containsKey(column)) {
                throw new RuntimeException("Input file does not contain specified column " + column);
            }

            List<CSVRecord> records = parser.getRecords();

            records.stream().sequential().map(record -> {
                String ruleId = record.get(column);
                Rule rule = details.getRuleById(ruleId);
                List<String> transformedValues = new ArrayList<>();
                record.iterator().forEachRemaining(transformedValues::add);
                if (rule == null) {
                    transformedValues.add("");
                    transformedValues.add("");
                } else {
                    transformedValues.add(rule.getDescription());
                    transformedValues.add(rule.getCategory().getId().toString());
                }
                return transformedValues;
            }).forEachOrdered(values -> {
                try {
                    printer.printRecord(values);
                } catch (IOException e) {
                    throw new RuntimeException(e);
                }
            });
        }
    }

}

From source file:org.languagetool.rules.spelling.suggestions.SuggestionChangesTest.java

public void testChanges() throws IOException, InterruptedException {

    File configFile = new File(System.getProperty("config", "SuggestionChangesTestConfig.json"));
    ObjectMapper mapper = new ObjectMapper(new JsonFactory().enable(JsonParser.Feature.ALLOW_COMMENTS));
    SuggestionChangesTestConfig config = mapper.readValue(configFile, SuggestionChangesTestConfig.class);

    SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd_HH:mm:ss");
    String timestamp = dateFormat.format(new Date());
    Path loggingFile = Paths.get(config.logDir, String.format("suggestionChangesExperiment_%s.log", timestamp));
    Path datasetFile = Paths.get(config.logDir, String.format("suggestionChangesExperiment_%s.csv", timestamp));

    BufferedWriter writer = Files.newBufferedWriter(loggingFile);
    CSVPrinter datasetWriter = new CSVPrinter(Files.newBufferedWriter(datasetFile),
            CSVFormat.DEFAULT.withEscape('\\'));
    List<String> datasetHeader = new ArrayList<>(
            Arrays.asList("sentence", "correction", "covered", "replacement", "dataset_id"));

    SuggestionsChanges.init(config, writer);
    writer.write("Evaluation configuration: \n");
    String configContent = String.join("\n", Files.readAllLines(configFile.toPath()));
    writer.write(configContent);/*from ww  w .  ja  v  a2s  . com*/
    writer.write("\nRunning experiments: \n");
    int experimentId = 0;
    for (SuggestionChangesExperiment experiment : SuggestionsChanges.getInstance().getExperiments()) {
        experimentId++;
        writer.write(String.format("#%d: %s%n", experimentId, experiment));
        datasetHeader.add(String.format("experiment_%d_suggestions", experimentId));
        datasetHeader.add(String.format("experiment_%d_metadata", experimentId));
        datasetHeader.add(String.format("experiment_%d_suggestions_metadata", experimentId));
    }
    writer.newLine();
    datasetWriter.printRecord(datasetHeader);

    BlockingQueue<SuggestionTestData> tasks = new LinkedBlockingQueue<>(1000);
    ConcurrentLinkedQueue<Pair<SuggestionTestResultData, String>> results = new ConcurrentLinkedQueue<>();
    List<SuggestionTestThread> threads = new ArrayList<>();
    for (int i = 0; i < Runtime.getRuntime().availableProcessors(); i++) {
        SuggestionTestThread worker = new SuggestionTestThread(tasks, results);
        worker.start();
        threads.add(worker);
    }

    // Thread for writing results from worker threads into CSV
    Thread logger = new Thread(() -> {
        try {
            long messages = 0;
            //noinspection InfiniteLoopStatement
            while (true) {
                Pair<SuggestionTestResultData, String> message = results.poll();
                if (message != null) {
                    writer.write(message.getRight());

                    SuggestionTestResultData result = message.getLeft();
                    int datasetId = 1 + config.datasets.indexOf(result.getInput().getDataset());
                    if (result != null && result.getSuggestions() != null && !result.getSuggestions().isEmpty()
                            && result.getSuggestions().stream()
                                    .noneMatch(m -> m.getSuggestedReplacements() == null
                                            || m.getSuggestedReplacements().isEmpty())) {

                        List<Object> record = new ArrayList<>(Arrays.asList(result.getInput().getSentence(),
                                result.getInput().getCorrection(), result.getInput().getCovered(),
                                result.getInput().getReplacement(), datasetId));
                        for (RuleMatch match : result.getSuggestions()) {
                            List<String> suggestions = match.getSuggestedReplacements();
                            record.add(mapper.writeValueAsString(suggestions));
                            // features extracted by SuggestionsOrdererFeatureExtractor
                            record.add(mapper.writeValueAsString(match.getFeatures()));
                            List<SortedMap<String, Float>> suggestionsMetadata = new ArrayList<>();
                            for (SuggestedReplacement replacement : match.getSuggestedReplacementObjects()) {
                                suggestionsMetadata.add(replacement.getFeatures());
                            }
                            record.add(mapper.writeValueAsString(suggestionsMetadata));
                        }
                        datasetWriter.printRecord(record);
                    }

                    if (++messages % 1000 == 0) {
                        writer.flush();
                        System.out.printf("Evaluated %d corrections.%n", messages);
                    }
                }
            }
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
    });
    logger.setDaemon(true);
    logger.start();

    // format straight from database dump
    String[] header = { "id", "sentence", "correction", "language", "rule_id", "suggestion_pos",
            "accept_language", "country", "region", "created_at", "updated_at", "covered", "replacement",
            "text_session_id", "client" };

    int datasetId = 0;
    // read data, send to worker threads via queue
    for (SuggestionChangesDataset dataset : config.datasets) {

        writer.write(String.format("Evaluating dataset #%d: %s.%n", ++datasetId, dataset));

        CSVFormat format = CSVFormat.DEFAULT;
        if (dataset.type.equals("dump")) {
            format = format.withEscape('\\').withNullString("\\N").withHeader(header);
        } else if (dataset.type.equals("artificial")) {
            format = format.withEscape('\\').withFirstRecordAsHeader();
        }
        try (CSVParser parser = new CSVParser(new FileReader(dataset.path), format)) {
            for (CSVRecord record : parser) {

                String lang = record.get("language");
                String rule = dataset.type.equals("dump") ? record.get("rule_id") : "";
                String covered = record.get("covered");
                String replacement = record.get("replacement");
                String sentence = record.get("sentence");
                String correction = record.isSet("correction") ? record.get("correction") : "";
                String acceptLanguage = dataset.type.equals("dump") ? record.get("accept_language") : "";

                if (sentence == null || sentence.trim().isEmpty()) {
                    continue;
                }

                if (!config.language.equals(lang)) {
                    continue; // TODO handle auto maybe?
                }
                if (dataset.type.equals("dump") && !config.rule.equals(rule)) {
                    continue;
                }

                // correction column missing in export from doccano; workaround
                if (dataset.enforceCorrect && !record.isSet("correction")) {
                    throw new IllegalStateException("enforceCorrect in dataset configuration enabled,"
                            + " but column 'correction' is not set for entry " + record);
                }

                if (dataset.type.equals("dump") && dataset.enforceAcceptLanguage) {
                    if (acceptLanguage != null) {
                        String[] entries = acceptLanguage.split(",", 2);
                        if (entries.length == 2) {
                            String userLanguage = entries[0]; // TODO: what to do with e.g. de-AT,de-DE;...
                            if (!config.language.equals(userLanguage)) {
                                continue;
                            }
                        }
                    }
                }

                tasks.put(new SuggestionTestData(lang, sentence, covered, replacement, correction, dataset));
            }
        }

    }

    for (Thread t : threads) {
        t.join();
    }
    logger.join(10000L);
    logger.interrupt();
    datasetWriter.close();
}

From source file:org.logstash.dependencies.ReportGenerator.java

public boolean generateReport(InputStream licenseMappingStream, InputStream acceptableLicensesStream,
        InputStream rubyDependenciesStream, InputStream[] javaDependenciesStreams, Writer output)
        throws IOException {

    SortedSet<Dependency> dependencies = new TreeSet<>();
    Dependency.addDependenciesFromRubyReport(rubyDependenciesStream, dependencies);

    for (InputStream stream : javaDependenciesStreams) {
        Dependency.addDependenciesFromJavaReport(stream, dependencies);
    }/*from  w ww  . j  a  v  a 2 s .c  om*/

    Map<String, LicenseUrlPair> licenseMapping = new HashMap<>();
    readLicenseMapping(licenseMappingStream, licenseMapping);
    List<String> acceptableLicenses = new ArrayList<>();
    readAcceptableLicenses(acceptableLicensesStream, acceptableLicenses);
    for (Dependency dependency : dependencies) {
        String nameAndVersion = dependency.name + ":" + dependency.version;
        if (licenseMapping.containsKey(nameAndVersion)) {
            LicenseUrlPair pair = licenseMapping.get(nameAndVersion);

            if (pair.url != null && !pair.url.equals("")
                    && (acceptableLicenses.stream().anyMatch(pair.license::equalsIgnoreCase))) {
                dependency.spdxLicense = pair.license;
                dependency.url = pair.url;
            } else {
                // unacceptable license or missing URL
                UNKNOWN_LICENSES.add(dependency);
            }
        } else {
            dependency.spdxLicense = UNKNOWN_LICENSE;
            UNKNOWN_LICENSES.add(dependency);
        }
    }

    try (CSVPrinter csvPrinter = new CSVPrinter(output, CSVFormat.DEFAULT.withHeader(CSV_HEADERS))) {
        for (Dependency dependency : dependencies) {
            csvPrinter.printRecord(dependency.toCsvReportRecord());
        }
        csvPrinter.flush();
    }

    String msg = "Generated report with %d dependencies (%d unknown or unacceptable licenses).";
    System.out.println(String.format(msg + "\n", dependencies.size(), UNKNOWN_LICENSES.size()));

    if (UNKNOWN_LICENSES.size() > 0) {
        String errMsg = "Add complying licenses (using the SPDX license ID from https://spdx.org/licenses) "
                + "with URLs for the libraries listed below to tools/dependencies-report/src/main/resources/"
                + "licenseMapping.csv:";
        System.out.println(errMsg);
        for (Dependency dependency : UNKNOWN_LICENSES) {
            System.out.println(String.format("\"%s:%s\"", dependency.name, dependency.version));
        }
    }

    return UNKNOWN_LICENSES.size() == 0;
}

From source file:org.nuxeo.theme.presets.PaletteParser.java

public static String renderPaletteAsCsv(byte[] bytes, String fileName) {
    StringWriter sw = new StringWriter();
    try (CSVPrinter writer = new CSVPrinter(sw, CSVFormat.DEFAULT.withDelimiter('\t'))) {
        for (Map.Entry<String, String> entry : parse(bytes, fileName).entrySet()) {
            writer.printRecord(entry.getKey(), entry.getValue());
        }//  w w  w.ja  v a  2 s .  c  o  m
    } catch (PaletteIdentifyException e) {
        log.warn("Could not identify palette type: " + fileName);
    } catch (PaletteParseException e) {
        log.warn("Could not parse palette: " + fileName);
    } catch (IOException e) {
        log.error(e.getMessage(), e);
    }
    return sw.toString();
}

From source file:org.nuxeo.theme.Utils.java

public static String listToCsv(List<String> list) {
    StringWriter sw = new StringWriter();
    try (CSVPrinter writer = new CSVPrinter(sw,
            CSVFormat.DEFAULT.withDelimiter(',').withQuoteMode(QuoteMode.ALL))) {
        writer.printRecord(list);//from   w w  w .  j a  v a 2 s .c  om
    } catch (IOException e) {
        log.error(e.getMessage(), e);
    }
    return sw.toString();
}

From source file:org.openestate.io.core.CsvFormat.java

/**
 * Creates a {@link CsvPrinter}, that writes CSV data into an
 * {@link Appendable} object./*  w  w  w  . j  av  a 2s.  c  o m*/
 * <p>
 * Use this function to, to override the print methods through inheritance.
 *
 * @param output
 * where CSV is written to
 *
 * @return
 * created printer
 *
 * @throws IOException
 * if CSV is not writable
 */
protected Printer print(Appendable output) throws IOException {
    return newPrinter(new CSVPrinter(output, this.getFormat()));
}

From source file:org.openmrs.projectbuendia.servlet.DataExportServlet.java

@Override
protected void doGet(HttpServletRequest request, HttpServletResponse response)
        throws ServletException, IOException {

    // Set the default merge mode
    boolean merge = true;

    // Defines the interval in minutes that will be used to merge encounters.
    int interval = DEFAULT_INTERVAL_MINS;
    String intervalParameter = request.getParameter("interval");
    if (intervalParameter != null) {
        int newInterval = Integer.valueOf(intervalParameter);
        if (newInterval >= 0) {
            interval = newInterval;//from w w w. j ava  2s . c  om
            if (interval == 0) {
                merge = false;
            }
        } else {
            log.error("Interval value is less then 0. Default used.");
        }
    }

    CSVPrinter printer = new CSVPrinter(response.getWriter(), CSVFormat.EXCEL.withDelimiter(','));

    //check for authenticated users
    if (!XformsUtil.isAuthenticated(request, response, null))
        return;

    Date now = new Date();
    DateFormat format = new SimpleDateFormat("yyyyMMdd_HHmmss");
    String filename = String.format("buendiadata_%s.csv", format.format(now));
    String contentDispositionHeader = String.format("attachment; filename=%s;", filename);
    response.addHeader("Content-Disposition", contentDispositionHeader);

    PatientService patientService = Context.getPatientService();
    EncounterService encounterService = Context.getEncounterService();

    List<Patient> patients = new ArrayList<>(patientService.getAllPatients());
    Collections.sort(patients, PATIENT_COMPARATOR);

    // We may want to get the observations displayed in the chart/xform, in which case there
    // are a few
    // sensible orders:
    // 1: UUID
    // 2: Order in chart
    // 3: Order in Xform

    // Order in Xform/chart is not good as stuff changes every time we change xform
    // So instead we will use UUID order, but use the Chart form to use the concepts to display.
    Set<Concept> questionConcepts = new HashSet<>();
    for (Form form : ChartResource.getCharts(Context.getFormService())) {
        TreeMap<Integer, TreeSet<FormField>> formStructure = FormUtil.getFormStructure(form);
        for (FormField groupField : formStructure.get(0)) {
            for (FormField fieldInGroup : formStructure.get(groupField.getId())) {
                questionConcepts.add(fieldInGroup.getField().getConcept());
            }
        }
    }
    FixedSortedConceptIndexer indexer = new FixedSortedConceptIndexer(questionConcepts);

    // Write English headers.
    writeHeaders(printer, indexer);

    Calendar calendar = Calendar.getInstance();

    // Loop through all the patients and get their encounters.
    for (Patient patient : patients) {

        // Define an array that will represent the line that will be inserted in the CSV.
        Object[] previousCSVLine = new Object[FIXED_HEADERS.length + indexer.size() * COLUMNS_PER_OBS];

        Date deadLine = new Date(0);

        ArrayList<Encounter> encounters = new ArrayList<>(encounterService.getEncountersByPatient(patient));
        Collections.sort(encounters, ENCOUNTER_COMPARATOR);

        // TODO: For now patients with no encounters are ignored. List them on the future.
        if (encounters.size() == 0)
            continue;

        // Loop through all the encounters for this patient to get the observations.
        for (Encounter encounter : encounters) {
            try {
                // Flag to whether we will use the merged version of the encounter
                // or the single version.
                boolean useMerged = merge;

                // Array that will be used to merge in previous encounter with the current one.
                Object[] mergedCSVLine = new Object[previousCSVLine.length];

                // Duplicate previous encounter into the (future to be) merged one.
                System.arraycopy(previousCSVLine, 0, mergedCSVLine, 0, previousCSVLine.length);

                // Define the array to be used to store the current encounter.
                Object[] currentCSVLine = new Object[FIXED_HEADERS.length + indexer.size() * COLUMNS_PER_OBS];

                // If the current encounter is more then "interval" minutes from the previous
                // print the previous and reset it.
                Date encounterTime = encounter.getEncounterDatetime();
                if (encounterTime.after(deadLine)) {
                    printer.printRecord(previousCSVLine);
                    previousCSVLine = new Object[FIXED_HEADERS.length + indexer.size() * COLUMNS_PER_OBS];
                    useMerged = false;
                }
                // Set the next deadline as the current encounter time plus "interval" minutes.
                calendar.setTime(encounterTime);
                calendar.add(Calendar.MINUTE, interval);
                deadLine = calendar.getTime();

                // Fill the fixed columns values.
                currentCSVLine[0] = patient.getUuid();
                currentCSVLine[1] = patient.getPatientIdentifier("MSF");
                if (patient.getBirthdate() != null) {
                    currentCSVLine[2] = Utils.YYYYMMDD_UTC_FORMAT.format(patient.getBirthdate());
                }
                currentCSVLine[3] = encounter.getUuid();
                currentCSVLine[4] = encounterTime.getTime();
                currentCSVLine[5] = Utils.toIso8601(encounterTime);
                currentCSVLine[6] = Utils.SPREADSHEET_FORMAT.format(encounterTime);

                // All the values fo the fixed columns saved in the current encounter line
                // will also be saved to the merged line.
                System.arraycopy(currentCSVLine, 0, mergedCSVLine, 0, 7);

                // Loop through all the observations for this encounter
                for (Obs obs : encounter.getAllObs()) {
                    Integer index = indexer.getIndex(obs.getConcept());
                    if (index == null)
                        continue;
                    // For each observation there are three columns: if the value of the
                    // observation is a concept, then the three columns contain the English
                    // name, the OpenMRS ID, and the UUID of the concept; otherwise all
                    // three columns contain the formatted value.
                    int valueColumn = FIXED_HEADERS.length + index * COLUMNS_PER_OBS;

                    // Coded values are treated differently
                    if (obs.getValueCoded() != null) {
                        Concept value = obs.getValueCoded();
                        currentCSVLine[valueColumn] = NAMER.getClientName(value);
                        currentCSVLine[valueColumn + 1] = value.getId();
                        currentCSVLine[valueColumn + 2] = value.getUuid();
                        if (useMerged) {
                            // If we are still merging the current encounter values into
                            // the previous one get the previous value and see if it had
                            // something in it.
                            String previousValue = (String) mergedCSVLine[valueColumn];
                            if ((previousValue == null) || (previousValue.isEmpty())) {
                                // If the previous value was empty copy the current value into it.
                                mergedCSVLine[valueColumn] = currentCSVLine[valueColumn];
                                mergedCSVLine[valueColumn + 1] = currentCSVLine[valueColumn + 1];
                                mergedCSVLine[valueColumn + 2] = currentCSVLine[valueColumn + 2];
                            } else {
                                // If the previous encounter have values stored for this
                                // observation we cannot merge them anymore.
                                useMerged = false;
                            }
                        }
                    }
                    // All values except the coded ones will be treated equally.
                    else {
                        // Return the value of the the current observation using the visitor.
                        String value = (String) VisitObsValue.visit(obs, stringVisitor);
                        // Check if we have values stored for this observation
                        if ((value != null) && (!value.isEmpty())) {
                            // Save the value of the observation on the current encounter line.
                            currentCSVLine[valueColumn] = value;
                            currentCSVLine[valueColumn + 1] = value;
                            currentCSVLine[valueColumn + 2] = value;
                            if (useMerged) {
                                // Since we are still merging this encounter with the previous
                                // one let's get the previous value to see if it had something
                                // stored on it.
                                String previousValue = (String) mergedCSVLine[valueColumn];
                                if ((previousValue != null) && (!previousValue.isEmpty())) {
                                    // Yes, we had information stored for this observation on
                                    // the previous encounter
                                    if (obs.getValueText() != null) {
                                        // We only continue merging if the observation is of
                                        // type text, so we concatenate it.
                                        // TODO: add timestamps to the merged values that are of type text
                                        previousValue += "\n" + value;
                                        value = previousValue;
                                    } else {
                                        // Any other type of value we stop the merging.
                                        useMerged = false;
                                    }
                                }
                                mergedCSVLine[valueColumn] = value;
                                mergedCSVLine[valueColumn + 1] = value;
                                mergedCSVLine[valueColumn + 2] = value;
                            }
                        }
                    }
                }
                if (useMerged) {
                    // If after looping through all the observations we didn't had any
                    // overlapped values we keep the merged line.
                    previousCSVLine = mergedCSVLine;
                } else {
                    // We had overlapped values so let's print the previous line and make the
                    // current encounter the previous one. Only if the previous line is not empty.
                    if (previousCSVLine[0] != null) {
                        printer.printRecord(previousCSVLine);
                    }
                    previousCSVLine = currentCSVLine;
                }
            } catch (Exception e) {
                log.error("Error exporting encounter", e);
            }
        }
        // For the last encounter we print the remaining line.
        printer.printRecord(previousCSVLine);
    }
}

From source file:org.openo.client.cli.fw.output.print.OpenOCommandPrint.java

/**
 * Print output in csv format./*from ww w . j av a  2s.  c o  m*/
 *
 * @return string
 * @throws OpenOCommandOutputPrintingFailed
 *             exception
 */
public String printCsv() throws OpenOCommandOutputPrintingFailed {
    StringWriter writer = new StringWriter();
    CSVPrinter printer = null;
    try {
        CSVFormat formattor = CSVFormat.DEFAULT.withRecordSeparator(System.getProperty("line.separator"));
        printer = new CSVPrinter(writer, formattor);

        List<List<Object>> rows = this.formRows(false);

        for (int i = 0; i < this.findMaxRows(); i++) {
            printer.printRecord(rows.get(i));
        }

        return writer.toString();
    } catch (IOException e) {
        throw new OpenOCommandOutputPrintingFailed(e);
    } finally {
        try {
            if (printer != null) {
                printer.close();
            }
            writer.close();
        } catch (IOException e) {
            throw new OpenOCommandOutputPrintingFailed(e); // NOSONAR
        }
    }
}

From source file:org.ow2.proactive_grid_cloud_portal.scheduler.server.ExportUsageServlet.java

private String csvExport(String sessionId, String user, Date startDate, Date endDate)
        throws ServiceException, RestServerException, IOException {
    Object[] header = { "Owner", "Project", "Job Id", "Job Name", "Job Duration", "Task Id", "Task Name",
            "Task Node Number", "Task Start Time", "Task Finished Time", "Task Duration" };
    List<JobUsage> jobUsages = ((SchedulerServiceImpl) Service.get()).getUsage(sessionId, user, startDate,
            endDate);/*from ww  w. jav a  2s .  c o  m*/
    StringBuilder sb = new StringBuilder();
    CSVPrinter csvFilePrinter = null;
    CSVFormat csvFileFormat = CSVFormat.DEFAULT.withRecordSeparator(LINE_SEPARATOR);
    csvFilePrinter = new CSVPrinter(sb, csvFileFormat);
    csvFilePrinter.printRecord(header);
    for (JobUsage jobUsage : jobUsages) {
        for (TaskUsage taskUsage : jobUsage.getTaskUsages()) {
            csvFilePrinter.printRecord(jobUsage.getOwner(), jobUsage.getProject(), jobUsage.getJobId(),
                    jobUsage.getJobName(), jobUsage.getJobDuration(), taskUsage.getTaskId(),
                    taskUsage.getTaskName(), taskUsage.getTaskNodeNumber(), taskUsage.getTaskStartTime(),
                    taskUsage.getTaskFinishedTime(), taskUsage.getTaskExecutionDuration());
        }
    }
    csvFilePrinter.close();
    return sb.toString();
}

From source file:org.qamatic.mintleaf.tools.CsvExportFlavour.java

@Override
public void export(ResultSet resultSet) throws MintleafException {
    CSVPrinter printer = null;/*from   w  w  w  .j  a v  a 2s.  com*/
    try {
        printer = new CSVPrinter(writer, CSVFormat.EXCEL.withHeader(resultSet));
        printer.printRecords(resultSet);
        printer.close();
    } catch (SQLException e) {
        throw new MintleafException(e);
    } catch (IOException e) {
        throw new MintleafException(e);

    }
}