Example usage for org.apache.commons.csv CSVFormat RFC4180

List of usage examples for org.apache.commons.csv CSVFormat RFC4180

Introduction

In this page you can find the example usage for org.apache.commons.csv CSVFormat RFC4180.

Prototype

CSVFormat RFC4180

To view the source code for org.apache.commons.csv CSVFormat RFC4180.

Click Source Link

Document

Comma separated format as defined by <a href="http://tools.ietf.org/html/rfc4180">RFC 4180</a>.

Usage

From source file:net.javacrumbs.codecamp.boot.common.CsvFileLogger.java

@Override
@CacheEvict(value = "messages", key = "'messages'")
public void addMessage(Message message) {
    try {//  w  ww  .jav a2  s .c o  m
        String newRow = CSVFormat.RFC4180.format(message.getSeverity(), message.getText(), message.getTime());
        Files.write(file.toPath(), singletonList(newRow), APPEND, CREATE);
    } catch (IOException e) {
        throw new IllegalStateException(e);
    }
}

From source file:com.github.r351574nc3.amex.assignment1.csv.DefaultInterpreter.java

/**
 * Convert records to {@link TestData}./* ww w.  j a va  2  s  . c om*/
 *
 * @return {@link Hashtable} instance which makes record lookup by name much easier. Records that belong to a given name are indexed
 * within the {@link Hashtable} instance. In case there is more than one instance, the object in the {@link Hashtable} is
 * a {@link LinkedList} which can be quickly iterated
 */
public Hashtable interpret(final File input) throws IOException {
    final CSVParser parser = CSVParser.parse(input, Charset.defaultCharset(),
            CSVFormat.RFC4180.withDelimiter('|'));

    // Using a {@link Hashtable with the name field on the CSV record as the key. A lower load factor is used to give more
    // priority to the time cost for looking up values. 
    final Hashtable<String, LinkedList<TestData>> index = new Hashtable<String, LinkedList<TestData>>(2, 0.5f);

    for (final CSVRecord record : parser) {
        final EmailNotificationTestData data = toTestData(record);

        LinkedList<TestData> data_ls = index.get(data.getName());
        if (data_ls == null) {
            data_ls = new LinkedList<TestData>();
            index.put(data.getName(), data_ls);
        }
        data_ls.add(data);
    }

    return index;
}

From source file:co.cask.hydrator.transforms.ParseCSV.java

@Override
public void initialize(TransformContext context) throws Exception {
    super.initialize(context);

    String csvFormatString = config.format.toLowerCase();
    switch (csvFormatString) {
    case "default":
        csvFormat = CSVFormat.DEFAULT;//from  w ww .j a  v  a2s.  co  m
        break;

    case "excel":
        csvFormat = CSVFormat.EXCEL;
        break;

    case "mysql":
        csvFormat = CSVFormat.MYSQL;
        break;

    case "rfc4180":
        csvFormat = CSVFormat.RFC4180;
        break;

    case "tdf":
        csvFormat = CSVFormat.TDF;
        break;

    default:
        throw new IllegalArgumentException(
                "Format {} specified is not one of the allowed format. Allowed formats are"
                        + "DEFAULT, EXCEL, MYSQL, RFC4180 and TDF");
    }

    try {
        outSchema = Schema.parseJson(config.schema);
        fields = outSchema.getFields();
    } catch (IOException e) {
        throw new IllegalArgumentException("Format of schema specified is invalid. Please check the format.");
    }
}

From source file:com.awesheet.managers.CSVManager.java

/**
 * Exports the given Sheet to a CSV file in the specified path.
 * @param sheet the Sheet to export./*from  ww w.  ja v a2 s. co  m*/
 * @param path the target path of the CSV file.
 * @return whether the export was successful
 */
public boolean exportSheet(Sheet sheet, String path) {
    FileWriter writer = null;
    CSVPrinter printer = null;

    try {
        writer = new FileWriter(path);
        printer = new CSVPrinter(writer, CSVFormat.RFC4180);

        // Write records.
        for (int y = 0; y < sheet.getMaxRow(); ++y) {
            List<String> values = new ArrayList<String>();

            for (int x = 0; x < sheet.getMaxColumn(); ++x) {
                Cell cell = sheet.getCell(x, y);
                values.add(cell == null ? "" : cell.getDisplayValue());
            }

            printer.printRecord(values);
        }
    } catch (Exception e) {
        return false;
    } finally {
        try {
            if (writer != null) {
                writer.flush();
                writer.close();
            }

            if (printer != null) {
                printer.close();
            }
        } catch (Exception ignored) {
        }
    }

    return true;
}

From source file:de.upb.wdqa.wdvd.processors.decorators.GeolocationFeatureProcessor.java

@Override
public void startRevisionProcessing() {
    logger.debug("Starting...");
    try {/*  w w  w  . ja v  a  2  s  . c  om*/
        BufferedReader csvReader;

        csvReader = new BufferedReader(
                new InputStreamReader(
                        new BZip2CompressorInputStream(
                                new BufferedInputStream(new FileInputStream(geolocationFeatureFile))),
                        "UTF-8"));

        csvParser = new CSVParser(csvReader, CSVFormat.RFC4180.withHeader());
        iterator = csvParser.iterator();

        processor.startRevisionProcessing();

    } catch (IOException e) {
        logger.error("", e);
    }
}

From source file:com.github.jferard.pgloaderutils.sniffer.csd.CSDSchemaSnifferTest.java

@Test
public void sniffBadHeader2() throws Exception {
    CSVParser p = CSVFormat.RFC4180.parse(new StringReader("a,b,c\n1,2,3\n4,5,6"));

    EasyMock.expect(this.vh.validateHeader(EasyMock.isA(CSDValidationResult.class), EasyMock.eq(this.sp),
            EasyMock.isA(CSVRecord.class))).andReturn(-1);

    PowerMock.replayAll();/*www  . j  a v  a  2 s  . co  m*/
    CSDSchema<CSDFieldPattern> s = this.sniffer.sniff(this.sp, p, 10);
    Assert.assertEquals(null, s);
    PowerMock.verifyAll();
}

From source file:de.upb.wdqa.wdvd.processors.output.CsvFeatureWriter.java

@Override
public void startRevisionProcessing() {
    logger.debug("Starting (" + featureFile + ")...");

    try {/*from w  w w.ja v a 2s.c o  m*/
        OutputStreamWriter writer = new OutputStreamWriter(
                getPipedOutputStreamStream(new BZip2CompressorOutputStream(
                        new BufferedOutputStream(new FileOutputStream(featureFile)), BZIP2_BLOCKSIZE)),
                "utf-8");

        String[] header = new String[features.size()];

        for (int i = 0; i < features.size(); i++) {
            header[i] = features.get(i).getName();
        }

        csvPrinter = CSVFormat.RFC4180.withHeader(header).print(writer);

    } catch (IOException e) {
        logger.error("", e);
    }
}

From source file:co.cask.hydrator.transforms.CSVParser2.java

@Override
public void initialize(TransformContext context) throws Exception {
    super.initialize(context);

    String csvFormatString = config.format.toLowerCase();
    switch (csvFormatString) {
    case "default":
        csvFormat = CSVFormat.DEFAULT;// w ww  .  j  ava 2 s.c  om
        break;

    case "excel":
        csvFormat = CSVFormat.EXCEL;
        break;

    case "mysql":
        csvFormat = CSVFormat.MYSQL;
        break;

    case "rfc4180":
        csvFormat = CSVFormat.RFC4180;
        break;

    case "tdf":
        csvFormat = CSVFormat.TDF;
        break;

    default:
        throw new IllegalArgumentException(
                "Format {} specified is not one of the allowed format. Allowed formats are"
                        + "DEFAULT, EXCEL, MYSQL, RFC4180 and TDF");
    }

    if (config.field == null || config.field.isEmpty()) {
        throw new IllegalArgumentException("Field for applying transformation is not specified.");
    }

    try {
        outSchema = Schema.parseJson(config.schema);
        fields = outSchema.getFields();
    } catch (IOException e) {
        throw new IllegalArgumentException("Format of schema specified is invalid. Please check the format.");
    }
}

From source file:com.ibm.g11n.pipeline.example.CSVFilter.java

@Override
public void merge(InputStream baseStream, OutputStream outStream, LanguageBundle languageBundle,
        FilterOptions options) throws IOException, ResourceFilterException {
    // create key-value map
    Map<String, String> kvMap = new HashMap<String, String>();
    for (ResourceString resString : languageBundle.getResourceStrings()) {
        kvMap.put(resString.getKey(), resString.getValue());
    }//  w w w  .ja v  a  2  s .c  om

    CSVParser parser = CSVParser.parse(baseStream, StandardCharsets.UTF_8,
            CSVFormat.RFC4180.withHeader("key", "value").withSkipHeaderRecord(true));
    BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(outStream, StandardCharsets.UTF_8));
    CSVPrinter printer = CSVFormat.RFC4180.withHeader("key", "value").print(writer);
    for (CSVRecord record : parser) {
        String key = record.get(0);
        String value = record.get(1);
        String trValue = kvMap.get(key);
        if (trValue != null) {
            value = trValue;
        }
        printer.printRecord(key, value);
    }
    printer.flush();
}

From source file:com.ibm.g11n.pipeline.example.MultiBundleCSVFilter.java

@Override
public void write(OutputStream outStream, Map<String, LanguageBundle> languageBundles, FilterOptions options)
        throws IOException, ResourceFilterException {
    BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(outStream, StandardCharsets.UTF_8));
    CSVPrinter printer = CSVFormat.RFC4180.withHeader("module", "key", "value").print(writer);

    // Sort by bundle
    TreeMap<String, LanguageBundle> sortedBundles = new TreeMap<>(languageBundles);

    for (Entry<String, LanguageBundle> bundleEntry : sortedBundles.entrySet()) {
        String module = bundleEntry.getKey();
        LanguageBundle languageBundle = bundleEntry.getValue();
        for (ResourceString resString : languageBundle.getSortedResourceStrings()) {
            printer.printRecord(module, resString.getKey(), resString.getValue());
        }//from www .  j  av  a2  s  . c  o  m
    }
    printer.flush();
}