Example usage for org.apache.commons.csv CSVFormat RFC4180

List of usage examples for org.apache.commons.csv CSVFormat RFC4180

Introduction

In this page you can find the example usage for org.apache.commons.csv CSVFormat RFC4180.

Prototype

CSVFormat RFC4180

To view the source code for org.apache.commons.csv CSVFormat RFC4180.

Click Source Link

Document

Comma separated format as defined by <a href="http://tools.ietf.org/html/rfc4180">RFC 4180</a>.

Usage

From source file:ddf.catalog.transformer.csv.CsvQueryResponseTransformer.java

private Appendable writeSearchResultsToCsv(final SourceResponse upstreamResponse,
        Map<String, String> columnAliasMap, List<AttributeDescriptor> sortedAttributeDescriptors)
        throws CatalogTransformerException {
    StringBuilder stringBuilder = new StringBuilder();

    try {//from w  ww.j a va2  s . c  o  m
        CSVPrinter csvPrinter = new CSVPrinter(stringBuilder, CSVFormat.RFC4180);
        printColumnHeaders(csvPrinter, sortedAttributeDescriptors, columnAliasMap);

        upstreamResponse.getResults().stream().map(Result::getMetacard)
                .forEach(mc -> printMetacard(csvPrinter, mc, sortedAttributeDescriptors));

        return csvPrinter.getOut();
    } catch (IOException ioe) {
        throw new CatalogTransformerException(ioe.getMessage(), ioe);
    }
}

From source file:net.iaeste.iws.core.services.ExchangeCSVFetchService.java

private static CSVPrinter getDefaultCsvPrinter(final Appendable output) {
    try {//  w w w.  ja  v  a2s . c  o m
        return CSVFormat.RFC4180.withDelimiter(DELIMITER.getDescription()).withNullString("").print(output);
    } catch (IOException e) {
        throw new IWSException(IWSErrors.PROCESSING_FAILURE, "Creating CSVPrinter failed", e);
    }
}

From source file:co.cask.hydrator.plugin.CSVParser.java

@Override
public void initialize(TransformContext context) throws Exception {
    super.initialize(context);

    String csvFormatString = config.format.toLowerCase();
    switch (csvFormatString) {
    case "default":
        csvFormat = CSVFormat.DEFAULT;//from   w w  w  .ja v a2 s  .  c  om
        break;

    case "excel":
        csvFormat = CSVFormat.EXCEL;
        break;

    case "mysql":
        csvFormat = CSVFormat.MYSQL;
        break;

    case "rfc4180":
        csvFormat = CSVFormat.RFC4180;
        break;

    case "tdf":
        csvFormat = CSVFormat.TDF;
        break;

    case "pdl":
        csvFormat = PDL;
        break;

    default:
        throw new IllegalArgumentException(
                "Format {} specified is not one of the allowed format. Allowed formats are"
                        + "DEFAULT, EXCEL, MYSQL, RFC4180, PDL and TDF");
    }

    try {
        outSchema = Schema.parseJson(config.schema);
        fields = outSchema.getFields();
    } catch (IOException e) {
        throw new IllegalArgumentException("Format of schema specified is invalid. Please check the format.");
    }
}

From source file:edu.ucla.cs.scai.swim.qa.ontology.dbpedia.DBpediaOntologyOld.java

private void processFile(BufferedReader in, DBpediaCategory category,
        HashMap<String, HashSet<DBpediaAttribute>> map) throws IOException {

    //The first header contains the properties labels.
    //The second header contains the properties URIs.
    //The third header contains the properties range labels.
    //The fourth header contains the properties range URIs.
    String l1 = in.readLine();/* w  ww.j a  v  a 2 s. c  om*/
    String l2 = in.readLine();
    String l3 = in.readLine();
    String l4 = in.readLine();

    Iterator<CSVRecord> it = CSVParser.parse(l1 + "\n" + l2 + "\n" + l3 + "\n" + l4, CSVFormat.RFC4180)
            .iterator();
    Iterator<String> r1 = it.next().iterator();
    Iterator<String> r2 = it.next().iterator();
    Iterator<String> r3 = it.next().iterator();
    Iterator<String> r4 = it.next().iterator();

    while (r1.hasNext() && r2.hasNext() && r3.hasNext() && r4.hasNext()) {

        String name = r1.next();
        String uri = r2.next();
        String range = r3.next();
        String rangeUri = r4.next();

        HashSet<DBpediaAttribute> as = map.get(name);
        if (as == null) {
            as = new HashSet<>();
            map.put(name, as);
        }

        DBpediaAttribute a = attributesByUri.get(uri);

        if (a == null) {
            a = new DBpediaAttribute();
            a.setLabel(name);
            a.setRange(range);
            a.rangeUri.add(rangeUri);
            a.setUri(uri);
            attributesByUri.put(a.getUri(), a);
        }
        as.add(a);

        if (abstractAttribute == null && uri.equals("http://www.w3.org/2000/01/rdf-schema#comment")) {
            abstractAttribute = a;
            System.out.println("Abstract attribute found");
        }

        category.domainOfAttributes.add(a);
    }

    if (r1.hasNext() || r2.hasNext() || r3.hasNext() || r4.hasNext()) {
        System.out.println(
                "Error: number of columns not matching in first rows of " + category.getLabel() + " csv file");
    }
}

From source file:edu.ucla.cs.scai.swim.qa.ontology.dbpedia.tipicality.Test.java

private static ArrayList<HashSet<String>> extractEntities(File csvData, int nOfAttributes) throws IOException {
    CSVParser parser = CSVParser.parse(csvData, Charset.defaultCharset(), CSVFormat.RFC4180);
    int r = 0;//from   w w  w . j  av a  2 s .co m
    ArrayList<Integer> attributePositions = new ArrayList<>();
    ArrayList<String> attributeNames = new ArrayList<>();
    ArrayList<HashSet<String>> res = new ArrayList<>();
    for (CSVRecord csvRecord : parser) {
        if (r == 0) {
            Iterator<String> it = csvRecord.iterator();
            it.next(); //skip URI
            if (!it.hasNext()) { //it is an empty file
                return res;
            }
            it.next(); //skip rdf-schema#label
            it.next(); //skip rdf-schema#comment
            int c = 2;
            for (; it.hasNext();) {
                c++;
                String attr = it.next();
                if (!attr.endsWith("_label")) {
                    attributePositions.add(c);
                }
            }
        } else if (r == 1) {
            Iterator<String> it = csvRecord.iterator();
            it.next(); //skip uri
            it.next(); //skip rdf-schema#label
            it.next(); //skip rdf-schema#comment
            int c = 2;
            int i = 0;
            while (i < attributePositions.size()) {
                c++;
                String attr = it.next();
                if (attributePositions.get(i) == c) {
                    if (!stopAttributes.contains(attr)) {
                        attributes.add(attr);
                    }
                    attributeNames.add(attr);
                    i++;
                }
            }
        } else if (r > 3) {
            ArrayList<String> attributesOfThisEntity = new ArrayList<>();
            Iterator<String> it = csvRecord.iterator();
            String uri = it.next();
            it.next(); //skip rdf-schema#label
            it.next(); //skip rdf-schema#comment
            int c = 2;
            int i = 0;
            while (i < attributePositions.size()) {
                c++;
                String val = it.next();
                if (attributePositions.get(i) == c) {
                    if (!val.equalsIgnoreCase("null")) {
                        String attribute = attributeNames.get(i);
                        if (!stopAttributes.contains(attribute)) {
                            attributesOfThisEntity.add(attribute);
                        }
                    }
                    i++;
                }
            }
            Collections.shuffle(attributesOfThisEntity);
            HashSet<String> s = new HashSet<>();
            for (int k = 0; k < Math.min(nOfAttributes, attributesOfThisEntity.size()); k++) {
                s.add(attributesOfThisEntity.get(k));
            }
            res.add(s);
        }
        r++;
    }
    return res;
}

From source file:com.ggvaidya.scinames.ui.DatasetImporterController.java

private Dataset loadDataset() throws IOException {
    String format = fileFormatComboBox.getSelectionModel().getSelectedItem();
    CSVFormat csvFormat = null;//from   w ww. jav  a  2  s  .com
    if (format == null) {
        csvFormat = CSVFormat.DEFAULT;
    } else {
        switch (format) {
        case "List of names":
            return Checklist.fromListInFile(currentFile);
        case "Default CSV":
            csvFormat = CSVFormat.DEFAULT;
            break;
        case "Microsoft Excel CSV":
            csvFormat = CSVFormat.EXCEL;
            break;
        case "RFC 4180 CSV":
            csvFormat = CSVFormat.RFC4180;
            break;
        case "Oracle MySQL CSV":
            csvFormat = CSVFormat.MYSQL;
            break;
        case "Tab-delimited file":
            csvFormat = CSVFormat.TDF;
            break;
        case "TaxDiff file":
            return ChecklistDiff.fromTaxDiffFile(currentFile);
        case "Excel file":
            return new ExcelImporter(currentFile).asDataset(0);
        }
    }

    if (csvFormat == null) {
        LOGGER.info("Could not determine CSV format from format '" + format + "', using CSV default.");
        csvFormat = CSVFormat.DEFAULT;
    }

    return Dataset.fromCSV(csvFormat, currentFile);
}

From source file:de.speexx.csv.table.app.Application.java

CSVPrinter createCsvPrinter(final RowReader rows, final Configuration conf) throws IOException {
    final List<EntryDescriptor> descriptors = rows.getEntryDescriptors();
    if (conf.isWithoutHeader()) {
        return CSVFormat.RFC4180.print(System.out);
    }/*from   w  w  w .  j  a v  a 2 s  .c o m*/
    final List<String> headers = descriptors.stream().map(desc -> desc.getName()).collect(Collectors.toList());
    return CSVFormat.RFC4180.withHeader(headers.toArray(new String[headers.size()])).print(System.out);
}

From source file:com.hurence.logisland.service.cache.CSVKeyValueCacheService.java

@Override
// @OnEnabled//from  w  w w . j a v a 2 s .co m
public void init(ControllerServiceInitializationContext context) throws InitializationException {
    super.init(context);
    try {

        if (context.getPropertyValue(DATABASE_FILE_URI).isSet()) {
            dbUri = context.getPropertyValue(DATABASE_FILE_URI).asString();
        }

        if (context.getPropertyValue(DATABASE_FILE_PATH).isSet()) {
            dbPath = context.getPropertyValue(DATABASE_FILE_PATH).asString();
        }

        if ((dbUri == null) && (dbPath == null)) {
            throw new Exception(
                    "You must declare " + DATABASE_FILE_URI.getName() + " or " + DATABASE_FILE_PATH.getName());
        }

        InputStream is = null;
        if (dbUri != null) {
            logger.info("opening csv database from hdfs : " + dbUri);
            is = initFromUri(dbUri);
        }

        if (dbPath != null) {
            logger.info("opening csv database from local fs : " + dbPath);
            is = initFromPath(context, dbPath);
        }

        if (is == null) {
            throw new InitializationException("Something went wrong while initializing csv db from "
                    + DATABASE_FILE_URI.getName() + " or " + DATABASE_FILE_PATH.getName());
        }

        // final Reader reader = new InputStreamReader(is);
        CSVFormat format = CSVFormat.DEFAULT;
        if (context.getPropertyValue(CSV_FORMAT).asString().equals(CSV_EXCEL.getValue())) {
            format = CSVFormat.EXCEL;
        } else if (context.getPropertyValue(CSV_FORMAT).asString().equals(CSV_EXCEL_FR.getValue())) {
            format = CSVFormat.EXCEL.withDelimiter(';');
        } else if (context.getPropertyValue(CSV_FORMAT).asString().equals(CSV_MYSQL.getValue())) {
            format = CSVFormat.MYSQL;
        } else if (context.getPropertyValue(CSV_FORMAT).asString().equals(CSV_RFC4180.getValue())) {
            format = CSVFormat.RFC4180;
        } else if (context.getPropertyValue(CSV_FORMAT).asString().equals(CSV_TDF.getValue())) {
            format = CSVFormat.TDF;
        }

        if (context.getPropertyValue(CSV_HEADER).isSet()) {
            String[] columnNames = context.getPropertyValue(CSV_HEADER).asString().split(",");
            for (String name : columnNames) {
                headers.get().put(name, "string");
            }
            format = format.withHeader(columnNames);
        } else if (context.getPropertyValue(FIRST_LINE_HEADER).isSet()) {
            format = format.withFirstRecordAsHeader();
        } else {
            throw new InitializationException("unable to get headers from somewhere");
        }

        Charset charset = Charset.forName("UTF-8");
        if (context.getPropertyValue(ENCODING_CHARSET).isSet()) {
            String encoding = context.getPropertyValue(ENCODING_CHARSET).asString();
            charset = Charset.forName(encoding);
        }

        rowKey = context.getPropertyValue(ROW_KEY).asString();
        CSVParser parser = CSVParser.parse(is, charset, format); //new CSVParser(reader, format);

        /*
        *    CSVParser parser = null;
                
        if (context.getPropertyValue(ENCODING_CHARSET).isSet()) {
        String encoding = context.getPropertyValue(ENCODING_CHARSET).asString();
        parser = CSVParser.parse(reader, Charset.forName(encoding), format);
        } else {
        parser = CSVParser.parse(reader, format);
        }
        */
        long count = 0;
        try {
            final Set<String> columnNames = parser.getHeaderMap().keySet();
            for (final CSVRecord record : parser) {

                Record logislandRecord = new StandardRecord();
                for (final String column : columnNames) {
                    logislandRecord.setStringField(column, record.get(column));
                }

                set(logislandRecord.getField(rowKey).asString(), logislandRecord);
                count++;
            }
        } finally {
            logger.info("successfully loaded " + count + " records from CSV file");

            parser.close();
            is.close();
        }

    } catch (Exception e) {
        getLogger().error("Could not load database file: {}", new Object[] { e.getMessage() });
        throw new InitializationException(e);
    }
}

From source file:com.ggvaidya.scinames.complexquery.ComplexQueryViewController.java

@FXML
private void exportToCSV(ActionEvent evt) {
    FileChooser chooser = new FileChooser();
    chooser.getExtensionFilters().setAll(new FileChooser.ExtensionFilter("CSV file", "*.csv"),
            new FileChooser.ExtensionFilter("Tab-delimited file", "*.txt"));
    File file = chooser.showSaveDialog(scene.getWindow());
    if (file != null) {
        CSVFormat format = CSVFormat.RFC4180;

        String outputFormat = chooser.getSelectedExtensionFilter().getDescription();
        if (outputFormat.equalsIgnoreCase("Tab-delimited file"))
            format = CSVFormat.TDF;/*from   w  w w  .j a v a  2s  .c  om*/

        try {
            List<List<String>> dataAsTable = getDataAsTable();
            fillCSVFormat(format, new FileWriter(file), dataAsTable);

            Alert window = new Alert(Alert.AlertType.CONFIRMATION,
                    "CSV file '" + file + "' saved with " + (dataAsTable.get(0).size() - 1) + " rows.");
            window.showAndWait();

        } catch (IOException e) {
            Alert window = new Alert(Alert.AlertType.ERROR, "Could not save CSV to '" + file + "': " + e);
            window.showAndWait();
        }
    }
}

From source file:net.iaeste.iws.core.services.ExchangeCSVService.java

private static CSVParser getDefaultCsvParser(final Reader input, final char delimiter) {
    try {/*from   www .  j a va2s.c om*/
        return CSVFormat.RFC4180.withDelimiter(delimiter).withHeader().parse(input);
    } catch (IOException e) {
        throw new IWSException(IWSErrors.PROCESSING_FAILURE, "Creating CSVParser failed", e);
    }
}