Example usage for org.apache.commons.csv CSVPrinter CSVPrinter

List of usage examples for org.apache.commons.csv CSVPrinter CSVPrinter

Introduction

In this page you can find the example usage for org.apache.commons.csv CSVPrinter CSVPrinter.

Prototype

public CSVPrinter(final Appendable out, final CSVFormat format) throws IOException 

Source Link

Document

Creates a printer that will print values to the given stream following the CSVFormat.

Usage

From source file:br.edimarmanica.weir2.rule.filter.RulesFilter.java

private void persiste() {
    File dirOutput = new File(Paths.PATH_WEIR_V2 + "/" + site.getPath());
    dirOutput.mkdirs();//w  w  w.j a v a 2  s  .  c  o m

    File file = new File(dirOutput.getAbsolutePath() + "/filter.csv");
    String[] HEADER = { "RULE" };
    CSVFormat format = CSVFormat.EXCEL.withHeader(HEADER);

    try (Writer out = new FileWriter(file)) {
        try (CSVPrinter csvFilePrinter = new CSVPrinter(out, format)) {
            for (String rule : remainingRules) {
                List<String> dataRecord = new ArrayList<>();
                dataRecord.add(rule);
                csvFilePrinter.printRecord(dataRecord);
            }
        }
    } catch (IOException ex) {
        Logger.getLogger(RulesFilter.class.getName()).log(Level.SEVERE, null, ex);
    }
}

From source file:com.ge.research.semtk.load.DataCleaner.java

/**
 * Constructor that takes the cleaning spec as JSON
 * @param dataset the dataset to clean//  ww w . j  a v a2s  . c  o  m
 * @param cleanedFilePathStr the file to write cleaned data to
 * @param cleanSpecJson the cleaning spec in JSON format, e.g. {"LOWERCASE":["child_names","has_pool"],"SPLIT":{"pet_names":"##","child_names":"~"},"REMOVE_NULLS":"true"}
 */
public DataCleaner(Dataset dataset, String cleanedFilePathStr, JSONObject cleanSpecJson) throws Exception {
    this.dataset = dataset;
    this.headers = dataset.getColumnNamesinOrder();
    this.writer = new BufferedWriter(new FileWriter(cleanedFilePathStr));
    this.csvPrinter = new CSVPrinter(this.writer, CSVFormat.DEFAULT);

    // add the specs for cleaning
    parseCleanSpecJson(cleanSpecJson);

    // write the headers
    this.csvPrinter.printRecord(this.headers);
}

From source file:br.edimarmanica.trinity.intrasitemapping.auto.MergeOffsets.java

private void print(int indexOffset, List<List<String>> lines) {
    for (int indexRule = 1; indexRule < lines.get(0).size(); indexRule++) {
        int map = mc.getSpecificMap(indexOffset, indexRule);
        if (map == -1) { //no tem mapeamento
            continue;
        }//  ww  w.  j  a v  a  2s .  com

        File dir = new File(Paths.PATH_TRINITY + site.getPath() + "/extracted_values");
        dir.mkdirs();
        File file = new File(dir.getAbsolutePath() + "/rule_" + map + ".csv");

        CSVFormat format;
        if (append) {
            format = CSVFormat.EXCEL;
        } else {
            String[] header = { "URL", "EXTRACTED VALUE" };
            format = CSVFormat.EXCEL.withHeader(header);
        }

        try (Writer out = new FileWriter(file, append)) {
            try (CSVPrinter csvFilePrinter = new CSVPrinter(out, format)) {

                for (int indexRegistro = 0; indexRegistro < lines.size(); indexRegistro++) {
                    String page = lines.get(indexRegistro).get(0);
                    String value = lines.get(indexRegistro).get(indexRule);

                    if (value.isEmpty()) {//no tem valor
                        continue;
                    }

                    List<String> dataRecord = new ArrayList<>();
                    dataRecord.add(page);
                    dataRecord.add(value);
                    csvFilePrinter.printRecord(dataRecord);
                }
            }
        } catch (IOException ex) {
            Logger.getLogger(MergeOffsets.class.getName()).log(Level.SEVERE, null, ex);
        }
    }
    append = true;
}

From source file:com.itemanalysis.psychometrics.irt.estimation.ItemResponseSimulator.java

/**
 * Generates a comma separated file (CSV file) of item responses.
 *
 * @param outputFile complete path and file name of output file
 * @param includeID include an examinee ID number in the first column if true. Omits the ID if false.
 * @param includeHeader if true will include variable names in first row of CSV file.
 * @throws IOException//from  ww w . ja  v  a  2s .  c om
 */
public void generateData(String outputFile, boolean includeID, boolean includeHeader) throws IOException {
    byte[][] x = generateData();
    int baseID = nPeople * 10 + 1;

    Writer writer = null;
    CSVPrinter printer = null;
    File file = new File(outputFile);

    try {
        writer = new OutputStreamWriter(new FileOutputStream(file));
        printer = new CSVPrinter(writer, CSVFormat.DEFAULT.withCommentMarker('#'));

        if (includeHeader) {
            if (includeID)
                printer.print("ID");
            for (int j = 0; j < nItems; j++) {
                printer.print("V" + (j + 1));
            }
            printer.println();
        }

        for (int i = 0; i < nPeople; i++) {
            if (includeID)
                printer.print(baseID);
            for (int j = 0; j < nItems; j++) {
                printer.print(x[i][j]);
            }
            printer.println();
            baseID++;
        }
    } catch (IOException ex) {
        throw (ex);
    } finally {
        if (writer != null)
            writer.close();
        if (printer != null)
            printer.close();
    }

}

From source file:com.itemanalysis.jmetrik.file.JmetrikOutputWriter.java

private void saveCsvFile(File outputFile, Outputter outputter) throws IOException {

    ArrayList<VariableAttributes> variables = outputter.getColumnAttributes();
    LinkedHashMap<VariableName, VariableAttributes> variableAttributeMap = new LinkedHashMap<VariableName, VariableAttributes>();
    String[] header = new String[variables.size()];
    int hIndex = 0;
    for (VariableAttributes v : variables) {
        variableAttributeMap.put(v.getName(), v);
        header[hIndex] = v.getName().toString();
        hIndex++;/*from  www .  j  a v  a 2s  .  com*/
    }

    Writer writer = null;
    CSVPrinter printer = null;

    try {
        //Ensure that file is a csv file.
        String fname = FilenameUtils.removeExtension(outputFile.getAbsolutePath());
        outputFile = new File(fname + ".csv");

        writer = new OutputStreamWriter(new FileOutputStream(outputFile));
        printer = new CSVPrinter(writer, CSVFormat.DEFAULT.withCommentMarker('#').withHeader(header));

        Iterator<Object[][]> iter = outputter.iterator();
        Object[][] outputChunk = null;

        while (iter.hasNext()) {
            outputChunk = iter.next();

            for (int i = 0; i < outputChunk.length; i++) {
                printer.printRecord(outputChunk[i]);
            }

        }

    } catch (IOException ex) {
        throw ex;
    } finally {
        printer.close();
    }

}

From source file:javalibs.CSVDataNormalizer.java

public void normalize() {
    BufferedWriter bw = null;//w  ww  .java 2s  . com
    CSVPrinter printer = null;

    try {
        bw = Files.newBufferedWriter(Paths.get(this.savePath));
        printer = new CSVPrinter(bw, CSVFormat.DEFAULT.withHeader(this.headersInOrder));
    } catch (IOException e) {
        log_.die(e);
    }

    for (CSVRecord rec : this.allRecords) {
        List<String> writerCells = new ArrayList<>();
        for (int i = 0; i < this.numCols; ++i) {
            String colName = this.colNumToName.get(i);
            if (columnsToNormalize.contains(colName)) {
                double curVal = NumUtils.getDoubleFromStr(rec.get(colName));
                Pair<Double, Double> maxMin = this.colsToMaxMinPairs.get(colName);
                double normal = NumUtils.normalizeBetweenZeroOne(maxMin.right(), maxMin.left(), curVal);
                if (normal > 1.0) {
                    log_.warn("Normalized value greater than 1.0: " + normal + " from curVal: " + curVal
                            + " setting normal to 1.");
                    normal = 1.0;
                } else if (normal < 0.0) {
                    log_.warn("Normalized value less than 0.0: " + normal + " from curVal : " + curVal
                            + " setting normal to 0.");
                    normal = 0.0;
                }

                writerCells.add(Double.toString(normal));
            } else
                writerCells.add(rec.get(i));
        }
        try {
            printer.printRecord(writerCells.toArray());
        } catch (IOException e) {
            log_.die(e);
        }
    }
    try {
        printer.flush();
    } catch (IOException e) {
        log_.die(e);
    }
}

From source file:de.tudarmstadt.ukp.dkpro.tc.svmhmm.report.SVMHMMOutcomeIDReport.java

@Override
public void execute() throws Exception {
    // load gold and predicted labels
    loadGoldAndPredictedLabels();/*from w  w  w  .ja  v  a 2 s . co m*/

    File testFile = locateTestFile();

    // original tokens
    List<String> originalTokens = SVMHMMUtils.extractOriginalTokens(testFile);

    // sequence IDs
    List<Integer> sequenceIDs = SVMHMMUtils.extractOriginalSequenceIDs(testFile);

    // sanity check
    if (goldLabels.size() != originalTokens.size() || goldLabels.size() != sequenceIDs.size()) {
        throw new IllegalStateException("Gold labels, original tokens or sequenceIDs differ in size!");
    }

    File evaluationFile = new File(
            getContext().getStorageLocation(TEST_TASK_OUTPUT_KEY, StorageService.AccessMode.READWRITE),
            SVMHMMUtils.GOLD_PREDICTED_OUTCOMES_CSV);

    // write results into CSV
    // form: gold;predicted;token;seqID

    CSVPrinter csvPrinter = new CSVPrinter(new FileWriter(evaluationFile), SVMHMMUtils.CSV_FORMAT);
    csvPrinter.printComment(SVMHMMUtils.CSV_COMMENT);

    for (int i = 0; i < goldLabels.size(); i++) {
        csvPrinter.printRecord(goldLabels.get(i), predictedLabels.get(i), originalTokens.get(i),
                sequenceIDs.get(i).toString());
    }

    IOUtils.closeQuietly(csvPrinter);
}

From source file:com.goeuro.goeurotest.service.Services.java

/**
 * Write CSV file using list of records and pre defined static header
 *
 * @param recordsList/*w w  w .  ja  va 2s. c o m*/
 * @throws Exception
 */
public void writeCSV(List recordsList) throws Exception {
    FileWriter fileWriter = null;
    CSVPrinter csvFilePrinter = null;
    try {
        CSVFormat csvFileFormat = CSVFormat.DEFAULT.withRecordSeparator(Defines.NEW_LINE_SEPARATOR);
        fileWriter = new FileWriter(Defines.FILE_NAME);
        csvFilePrinter = new CSVPrinter(fileWriter, csvFileFormat);
        csvFilePrinter.printRecord(Defines.FILE_HEADER);
        for (Object recordList : recordsList) {
            csvFilePrinter.printRecords(recordList);
        }
        fileWriter.flush();
        fileWriter.close();
        csvFilePrinter.close();
    } catch (IOException ex) {
        throw new Exception("IOException occured while writing CSV file " + ex.getMessage());
    }
}

From source file:de.speexx.jira.jan.command.issuequery.CsvCreator.java

void printIssueData(final IssueData issueData, final List<FieldNamePath> currentFieldNames,
        final List<FieldName> historyFieldNames, final TemporalChangeOutput temporalOutput) {
    assert !Objects.isNull(issueData);
    assert !Objects.isNull(currentFieldNames);
    assert !Objects.isNull(historyFieldNames);
    assert !Objects.isNull(temporalOutput);

    final List<String> currentFieldEntries = fetchCurrentFieldEntries(issueData, currentFieldNames);

    try {//from w ww.j  a  va  2 s  .co m
        final CSVPrinter csvPrinter = new CSVPrinter(new OutputStreamWriter(System.out, StandardCharsets.UTF_8),
                RFC4180);

        if (issueData.getHistoricalCount() == 0) {
            final int fieldsPerChangeEntry = calculateHistoricalFieldSize(temporalOutput);
            final int max = historyFieldNames.size() * fieldsPerChangeEntry;
            final List<String> out = new ArrayList(currentFieldEntries);
            addEmptyChangeData(out, max);
            csvPrinter.printRecord(out);

        } else {
            final int fieldsPerChangeEntry = calculateHistoricalFieldSize(temporalOutput);
            final int historyFieldNamesSize = historyFieldNames.size();

            for (int idx = 0; idx < historyFieldNamesSize; idx++) {
                final FieldName fieldName = historyFieldNames.get(idx);

                final List<HistoricalDataEntry> historicalData = issueData.getHistoricalIssueData(fieldName);
                LocalDateTime lastChangeDate = issueData.getCreatedDate()
                        .orElseThrow(() -> new IllegalStateException("No createdDate available"));

                for (final HistoricalDataEntry entry : historicalData) {
                    final List<String> out = new ArrayList();
                    for (int i = 0; i < historyFieldNamesSize; i++) {
                        if (i != idx) {
                            addEmptyChangeData(out, fieldsPerChangeEntry);
                        } else {
                            lastChangeDate = addChangeData(out, entry, temporalOutput, lastChangeDate);
                        }
                    }
                    final List<String> outList = new ArrayList<>(currentFieldEntries);
                    outList.addAll(out);
                    csvPrinter.printRecord(outList.toArray());
                }
            }
        }

        csvPrinter.flush();
    } catch (final IOException e) {
        throw new JiraAnalyzeException(e);
    }
}

From source file:com.bigtester.ate.tcg.controller.TrainingFileDB.java

/**
 * Write test csv file.//from w ww  .  j av  a2  s . c om
 *
 * @param mlInputs
 *            the ml inputs
 * @param append
 *            the append
 * @throws IOException
 */
public static void writeTestCsvFile(List<String> mlInputs, boolean append) throws IOException {

    if (mlInputs.isEmpty())
        return;

    // Create new students objects
    List<UserInputTrainingRecord> trainings = new ArrayList<UserInputTrainingRecord>();
    for (int index = 0; index < mlInputs.size(); index++) {
        String temp = mlInputs.get(index);
        if (null != temp) {
            trainings.add(new UserInputTrainingRecord(" ", temp));
        }
    }

    FileWriter fileWriter = null; // NOPMD

    CSVPrinter csvFilePrinter = null; // NOPMD

    // Create the CSVFormat object with "\n" as a record delimiter
    CSVFormat csvFileFormat = getCSVFormat();
    try {

        // initialize FileWriter object
        // FileSystemResource testFile = new
        // FileSystemResource(UserInputsTrainer.TESTFILE);

        fileWriter = new FileWriter(UserInputsTrainer.TESTFILE, append);

        // initialize CSVPrinter object
        csvFilePrinter = new CSVPrinter(fileWriter, csvFileFormat);// NOPMD

        // Write a new student object list to the CSV file
        for (UserInputTrainingRecord student : trainings) {
            List<String> studentDataRecord = new ArrayList<String>();
            studentDataRecord.add(student.getInputLabelName());
            studentDataRecord.add(student.getInputMLHtmlCode());
            csvFilePrinter.printRecord(studentDataRecord);
        }

        // System.out.println("CSV file was created successfully !!!");

    } catch (Exception e) {// NOPMD
        throw new IOException("Error in CsvFileWriter !!!");// NOPMD
        // e.printStackTrace();
    } finally {// NOPMD
        try {
            if (null != fileWriter) {
                fileWriter.flush();
                fileWriter.close();

            }
            if (null != csvFilePrinter) {
                csvFilePrinter.close();
            }
        } catch (IOException e) { // NOPMD
            throw new IOException(//NOPMD
                    "Error while flushing/closing fileWriter/csvPrinter !!!"); // NOPMD
            // e.printStackTrace();
        }
    }
}