Example usage for org.apache.commons.csv CSVFormat EXCEL

List of usage examples for org.apache.commons.csv CSVFormat EXCEL

Introduction

In this page you can find the example usage for org.apache.commons.csv CSVFormat EXCEL.

Prototype

CSVFormat EXCEL

To view the source code for org.apache.commons.csv CSVFormat EXCEL.

Click Source Link

Document

Excel file format (using a comma as the value delimiter).

Usage

From source file:nz.ac.waikato.cms.supernova.SupernovaCSV.java

public static void main(String[] args) throws Exception {
    ArgumentParser parser;//  www  .j av a2 s .co  m

    parser = ArgumentParsers.newArgumentParser("I am supernova");
    parser.description("Generates output according to 'I am supernova' by Keith Soo.\n"
            + "Loads scores/percentiles from a CSV file to generate multiple outputs at once.\n"
            + "Expected four columns (name of column is irrelevant):\n"
            + "- ID: the filename (excluding path and extension)\n" + "- Measure: the measure (" + MEASURE_LIST
            + ")\n" + "- Score: the score of the measure\n" + "- Percentile: the percentile of the measure\n"
            + "\n" + "Project homepage:\n" + "https://github.com/fracpete/i-am-supernova");

    // colors
    parser.addArgument("--" + AbstractOutputGenerator.OPENNESS + COLOR_SUFFIX)
            .metavar(AbstractOutputGenerator.OPENNESS + COLOR_SUFFIX).type(String.class)
            .setDefault(ColorHelper.toHex(Color.ORANGE))
            .help("The color for '" + AbstractOutputGenerator.OPENNESS + "' in hex format (e.g., "
                    + ColorHelper.toHex(Color.ORANGE) + ").");
    parser.addArgument("--" + AbstractOutputGenerator.EXTRAVERSION + COLOR_SUFFIX)
            .metavar(AbstractOutputGenerator.EXTRAVERSION + COLOR_SUFFIX).type(String.class)
            .setDefault(ColorHelper.toHex(Color.YELLOW))
            .help("The color for '" + AbstractOutputGenerator.EXTRAVERSION + "' in hex format (e.g., "
                    + ColorHelper.toHex(Color.YELLOW) + ").");
    parser.addArgument("--" + AbstractOutputGenerator.AGREEABLENESS + COLOR_SUFFIX)
            .metavar(AbstractOutputGenerator.AGREEABLENESS + COLOR_SUFFIX).type(String.class)
            .setDefault(ColorHelper.toHex(Color.GREEN))
            .help("The color for '" + AbstractOutputGenerator.AGREEABLENESS + "' in hex format (e.g., "
                    + ColorHelper.toHex(Color.GREEN) + ").");
    parser.addArgument("--" + AbstractOutputGenerator.CONSCIENTIOUSNESS + COLOR_SUFFIX)
            .metavar(AbstractOutputGenerator.CONSCIENTIOUSNESS + COLOR_SUFFIX).type(String.class)
            .setDefault(ColorHelper.toHex(Color.BLUE))
            .help("The color for '" + AbstractOutputGenerator.CONSCIENTIOUSNESS + "' in hex format (e.g., "
                    + ColorHelper.toHex(Color.BLUE) + ").");
    parser.addArgument("--" + AbstractOutputGenerator.NEUROTICISM + COLOR_SUFFIX)
            .metavar(AbstractOutputGenerator.NEUROTICISM + COLOR_SUFFIX).type(String.class)
            .setDefault(ColorHelper.toHex(Color.RED))
            .help("The color for '" + AbstractOutputGenerator.NEUROTICISM + "' in hex format (e.g., "
                    + ColorHelper.toHex(Color.RED) + ").");

    // other parameters
    parser.addArgument("--" + CSV).metavar(CSV).type(String.class).required(true)
            .help("The CSV file containing the scores/percentiles (header must be present).");

    parser.addArgument("--" + ID).metavar(ID).type(Integer.class).setDefault(1)
            .help("The 1-based index of the column in the CSV file containing the ID for the output file.");

    parser.addArgument("--" + MEASURE).metavar(MEASURE).type(Integer.class).setDefault(2)
            .help("The 1-based index of the column in the CSV file containing the measure name.\n"
                    + "Allowed values: " + MEASURE_LIST);

    parser.addArgument("--" + SCORE).metavar(SCORE).type(Integer.class).setDefault(3)
            .help("The 1-based index of the column in the CSV file containing the scores.");

    parser.addArgument("--" + PERCENTILE).metavar(PERCENTILE).type(Integer.class).setDefault(4)
            .help("The 1-based index of the column in the CSV file containing the percentiles.");

    parser.addArgument("--" + BACKGROUND).metavar(BACKGROUND).type(String.class)
            .setDefault(ColorHelper.toHex(Color.BLACK)).help("The background color.");

    parser.addArgument("--" + OPACITY).metavar(OPACITY).type(Double.class).setDefault(0.1)
            .help("The opacity (0-1).");

    parser.addArgument("--" + MARGIN).metavar(MARGIN).type(Double.class).setDefault(0.2)
            .help("The margin in the output (0-1).");

    parser.addArgument("--" + WIDTH).metavar(WIDTH).type(Integer.class).setDefault(2000)
            .help("The width of the output.");

    parser.addArgument("--" + HEIGHT).metavar(HEIGHT).type(Integer.class).setDefault(2000)
            .help("The height of the output.");

    parser.addArgument("--" + CENTER).metavar(CENTER).type(String.class).setDefault(Incenter.class.getName())
            .help("The name of the algorithm for calculating the center of a triangle.\n" + "Available: "
                    + Registry.toString(Registry.getCenters(), true));

    parser.addArgument("--" + GENERATOR).metavar(GENERATOR).type(String.class).setDefault(PNG.class.getName())
            .help("The name of the generator class to use.\n" + "Available: "
                    + Registry.toString(Registry.getGenerators(), true));

    parser.addArgument("--" + OUTPUT).metavar(OUTPUT).type(String.class)
            .help("The directory to store the output in.");

    parser.addArgument("--" + VERBOSE).metavar(VERBOSE).type(Boolean.class).action(Arguments.storeTrue())
            .help("Whether to output logging information.");

    Namespace namespace;
    try {
        namespace = parser.parseArgs(args);
    } catch (Exception e) {
        if (!(e instanceof HelpScreenException))
            parser.printHelp();
        return;
    }

    // colors
    Map<String, Color> colors = new HashMap<>();
    colors.put(AbstractOutputGenerator.OPENNESS, ColorHelper
            .valueOf(namespace.getString(AbstractOutputGenerator.OPENNESS + COLOR_SUFFIX), Color.ORANGE));
    colors.put(AbstractOutputGenerator.EXTRAVERSION, ColorHelper
            .valueOf(namespace.getString(AbstractOutputGenerator.EXTRAVERSION + COLOR_SUFFIX), Color.YELLOW));
    colors.put(AbstractOutputGenerator.AGREEABLENESS, ColorHelper
            .valueOf(namespace.getString(AbstractOutputGenerator.AGREEABLENESS + COLOR_SUFFIX), Color.GREEN));
    colors.put(AbstractOutputGenerator.CONSCIENTIOUSNESS, ColorHelper.valueOf(
            namespace.getString(AbstractOutputGenerator.CONSCIENTIOUSNESS + COLOR_SUFFIX), Color.BLUE));
    colors.put(AbstractOutputGenerator.NEUROTICISM, ColorHelper
            .valueOf(namespace.getString(AbstractOutputGenerator.NEUROTICISM + COLOR_SUFFIX), Color.RED));

    File outdir = new File(namespace.getString(OUTPUT));

    String centerCls = namespace.getString(CENTER);
    if (!centerCls.contains("."))
        centerCls = AbstractTriangleCenterCalculation.class.getPackage().getName() + "." + centerCls;
    String generatorCls = namespace.getString(GENERATOR);
    if (!generatorCls.contains("."))
        generatorCls = AbstractOutputGenerator.class.getPackage().getName() + "." + generatorCls;
    AbstractOutputGenerator generator = (AbstractOutputGenerator) Class.forName(generatorCls).newInstance();
    generator.setVerbose(namespace.getBoolean(VERBOSE));
    generator.setColors(colors);
    generator.setBackground(ColorHelper.valueOf(namespace.getString(BACKGROUND), Color.BLACK));
    generator.setOpacity(namespace.getDouble(OPACITY));
    generator.setMargin(namespace.getDouble(MARGIN));
    generator.setCenter((AbstractTriangleCenterCalculation) Class.forName(centerCls).newInstance());
    if (generator instanceof AbstractOutputGeneratorWithDimensions) {
        AbstractOutputGeneratorWithDimensions pixel = (AbstractOutputGeneratorWithDimensions) generator;
        pixel.setWidth(namespace.getInt(WIDTH));
        pixel.setHeight(namespace.getInt(HEIGHT));
    }

    int colID = namespace.getInt(ID) - 1;
    int colMeasure = namespace.getInt(MEASURE) - 1;
    int colScore = namespace.getInt(SCORE) - 1;
    int colPercentile = namespace.getInt(PERCENTILE) - 1;
    Reader reader = new FileReader(namespace.getString(CSV));
    CSVParser csvparser = new CSVParser(reader, CSVFormat.EXCEL.withHeader());
    String oldID = "";
    Map<String, List<Double>> test = new HashMap<>();
    for (CSVRecord rec : csvparser) {
        if (rec.size() < 4)
            continue;
        String id = rec.get(colID);
        if (!id.equals(oldID)) {
            if (!test.isEmpty()) {
                File outfile = new File(outdir + File.separator + oldID + "." + generator.getExtension());
                String msg = generator.generate(test, outfile);
                if (msg != null)
                    System.err.println("Failed to generate output for ID: " + oldID);
            }
            test.clear();
            oldID = id;
        }
        String measure = rec.get(colMeasure);
        double score = Double.parseDouble(rec.get(colScore));
        double percentile = Double.parseDouble(rec.get(colPercentile));
        test.put(measure, new ArrayList<>(Arrays.asList(new Double[] { score, percentile })));
    }
    if (!test.isEmpty()) {
        File outfile = new File(outdir + File.separator + oldID + "." + generator.getExtension());
        String msg = generator.generate(test, outfile);
        if (msg != null)
            System.err.println("Failed to generate output for ID: " + oldID);
    }
}

From source file:nzilbb.agcsv.AgCsvDeserializer.java

/**
 * Loads the serialized form of the graph, using the given set of named streams.
 * @param streams A list of named streams that contain all the transcription/annotation data required.
 * @param schema The layer schema, definining layers and the way they interrelate.
 * @return A list of parameters that require setting before {@link IDeserializer#deserialize()} can be invoked. This may be an empty list, and may include parameters with the value already set to a workable default. If there are parameters, and user interaction is possible, then the user may be presented with an interface for setting/confirming these parameters, before they are then passed to {@link IDeserializer#setParameters(ParameterSet)}.
 * @throws SerializationException If the graph could not be loaded.
 * @throws IOException On IO error.// w w  w. j av  a 2s  .c o  m
 * @throws SerializerNotConfiguredException If the configuration is not sufficient for deserialization.
 */
@SuppressWarnings({ "rawtypes", "unchecked" })
public ParameterSet load(NamedStream[] streams, Schema schema)
        throws IOException, SerializationException, SerializerNotConfiguredException {
    if (getFieldDelimiter() == null)
        throw new SerializerNotConfiguredException("fieldDelimiter must be set.");
    ParameterSet parameters = new ParameterSet();

    // take the first csv stream, ignore all others.
    NamedStream csv = Utility.FindSingleStream(streams, ".csv", "text/csv");
    if (csv == null)
        throw new SerializationException("No CSV stream found");
    setName(csv.getName());
    setName(getName().replaceFirst("\\.csv$", "").replaceFirst("\\.ag$", ""));

    reset();

    CSVParser parser = new CSVParser(new InputStreamReader(csv.getStream()),
            CSVFormat.EXCEL.withDelimiter(fieldDelimiter.charAt(0)));
    mDiscoveredLayers = new HashMap<String, Layer>();
    Vector<CSVRecord> vRecords = new Vector<CSVRecord>();
    mCsvData.put("anchor", vRecords); // start with anchors

    // read all the lines, and extract the layer names
    for (CSVRecord line : parser) {
        // does it have only one field? - the layer name
        if (line.get(0).equals("layer")) {
            Layer layer = new Layer(line.get(1), line.get(2), Integer.parseInt(line.get(5)), true, // peers
                    false, // peersOverlap
                    false, // saturated
                    line.get(4).equals("W") ? schema.getWordLayerId() // parentId
                            : line.get(4).equals("M") ? schema.getTurnLayerId() // parentId
                                    : line.get(4).equals("F") ? "graph" : "segments", // parentId
                    true); // parentIncludes
            int layerId = Integer.parseInt(line.get(6));
            if (layerId == 11) // turn
            {
                layer.setParentId(schema.getParticipantLayerId());
            } else if (layerId == 12) // utterance
            {
                layer.setSaturated(true);
            } else if (layerId == 0) // transcription
            {
                layer.setParentId(schema.getTurnLayerId());
            } else if (layerId == 2) // orthography
            {
                layer.setPeers(false);
                layer.setSaturated(true);
            } else if (layerId == 1) // segments
            {
                layer.setSaturated(true);
            }
            layer.put("@layer_id", layerId);
            layer.put("@type", line.get(3));
            layer.put("@scope", line.get(4));
            mDiscoveredLayers.put(line.get(1), layer);
            Parameter p = new Parameter(layer.getId(), Layer.class, layer.getId(), layer.getDescription(),
                    true);
            p.setValue(schema.getLayer(layer.getId()));
            p.setPossibleValues(schema.getLayers().values());
            parameters.addParameter(p);

            // start a new set of records
            vRecords = new Vector<CSVRecord>();
            mCsvData.put(layer.getId(), vRecords);
        }
        vRecords.add(line);
    } // next line
    parser.close();

    return parameters;
}

From source file:nzilbb.csv.CsvDeserializer.java

/**
 * Loads the serialized form of the graph, using the given set of named streams.
 * @param streams A list of named streams that contain all the
 *  transcription/annotation data required, and possibly (a) stream(s) for the media annotated.
 * @param schema The layer schema, definining layers and the way they interrelate.
 * @return A list of parameters that require setting before {@link IDeserializer#deserialize()}
 * can be invoked. This may be an empty list, and may include parameters with the value already
 * set to a workable default. If there are parameters, and user interaction is possible, then
 * the user may be presented with an interface for setting/confirming these parameters, before
 * they are then passed to {@link IDeserializer#setParameters(ParameterSet)}.
 * @throws SerializationException If the graph could not be loaded.
 * @throws IOException On IO error./*from ww w  .  ja  v a 2 s  .c o m*/
 */
@SuppressWarnings({ "rawtypes", "unchecked" })
public ParameterSet load(NamedStream[] streams, Schema schema) throws SerializationException, IOException {
    // take the first stream, ignore all others.
    NamedStream csv = Utility.FindSingleStream(streams, ".csv", "text/csv");
    if (csv == null)
        throw new SerializationException("No CSV stream found");
    setName(csv.getName());

    setSchema(schema);

    // create a list of layers we need and possible matching layer names
    LinkedHashMap<Parameter, List<String>> layerToPossibilities = new LinkedHashMap<Parameter, List<String>>();
    HashMap<String, LinkedHashMap<String, Layer>> layerToCandidates = new HashMap<String, LinkedHashMap<String, Layer>>();

    LinkedHashMap<String, Layer> metadataLayers = new LinkedHashMap<String, Layer>();
    for (Layer layer : schema.getRoot().getChildren().values()) {
        if (layer.getAlignment() == Constants.ALIGNMENT_NONE) {
            metadataLayers.put(layer.getId(), layer);
        }
    } // next turn child layer

    // look for person attributes
    for (Layer layer : schema.getParticipantLayer().getChildren().values()) {
        if (layer.getAlignment() == Constants.ALIGNMENT_NONE) {
            metadataLayers.put(layer.getId(), layer);
        }
    } // next turn child layer
    LinkedHashMap<String, Layer> utteranceAndMetadataLayers = new LinkedHashMap<String, Layer>(metadataLayers);
    utteranceAndMetadataLayers.put(getUtteranceLayer().getId(), getUtteranceLayer());
    LinkedHashMap<String, Layer> whoAndMetadataLayers = new LinkedHashMap<String, Layer>(metadataLayers);
    whoAndMetadataLayers.put(getParticipantLayer().getId(), getParticipantLayer());

    // read the header line

    setParser(CSVParser.parse(csv.getStream(), java.nio.charset.Charset.forName("UTF-8"),
            CSVFormat.EXCEL.withHeader()));
    setHeaderMap(parser.getHeaderMap());
    Vector<String> possibleIDHeaders = new Vector<String>();
    Vector<String> possibleUtteranceHeaders = new Vector<String>();
    Vector<String> possibleParticipantHeaders = new Vector<String>();
    for (String header : getHeaderMap().keySet()) {
        if (header.trim().length() == 0)
            continue;
        Vector<String> possibleMatches = new Vector<String>();
        possibleMatches.add("transcript" + header);
        possibleMatches.add("participant" + header);
        possibleMatches.add("speaker" + header);
        possibleMatches.add(header);

        // special cases
        if (header.equalsIgnoreCase("id") || header.equalsIgnoreCase("transcript")) {
            possibleIDHeaders.add(header);
        } else if (header.equalsIgnoreCase("text") || header.equalsIgnoreCase("document")) {
            possibleUtteranceHeaders.add(header);
        } else if (header.equalsIgnoreCase("name") || header.equalsIgnoreCase("participant")
                || header.equalsIgnoreCase("participantid")) {
            possibleParticipantHeaders.add(header);
        }

        layerToPossibilities.put(new Parameter("header_" + getHeaderMap().get(header), Layer.class, header),
                possibleMatches);
        layerToCandidates.put("header_" + getHeaderMap().get(header), metadataLayers);
    } // next header

    ParameterSet parameters = new ParameterSet();

    // add utterance/participant parameters
    int defaultUtterancePossibilityIndex = 0;

    // if there are no obvious participant column possibilities...      
    Parameter idColumn = new Parameter("id", String.class, "ID Column", "Column containing the ID of the text.",
            false);
    if (possibleIDHeaders.size() == 0) { // ...include all columns
        possibleIDHeaders.addAll(getHeaderMap().keySet());
    } else {
        idColumn.setValue(possibleIDHeaders.firstElement());
    }
    idColumn.setPossibleValues(possibleIDHeaders);
    parameters.addParameter(idColumn);

    // if there are no obvious participant column possibilities...      
    if (possibleParticipantHeaders.size() == 0) { // ...include all columns
        possibleParticipantHeaders.addAll(getHeaderMap().keySet());
        // default participant column will be the first column,
        // so default utterance should be the second (if we didn't find obvious possible text column)
        if (possibleParticipantHeaders.size() > 1) // but only if there's more than one column
        {
            defaultUtterancePossibilityIndex = 1;
        }
    }
    Parameter participantColumn = new Parameter("who", "Participant Column",
            "Column containing the ID of the author of the text.", true,
            possibleParticipantHeaders.firstElement());
    participantColumn.setPossibleValues(possibleParticipantHeaders);
    parameters.addParameter(participantColumn);

    // if there are no obvious text column possibilities...
    if (possibleUtteranceHeaders.size() == 0) { // ...include all columns
        possibleUtteranceHeaders.addAll(getHeaderMap().keySet());
    } else {
        // we found a possible text column, so run with it regardless of whether we also found
        // a possible participant column
        defaultUtterancePossibilityIndex = 0;
    }
    Parameter utteranceColumn = new Parameter("text", "Text Column", "Column containing the transcript text.",
            true, possibleUtteranceHeaders.elementAt(defaultUtterancePossibilityIndex));
    utteranceColumn.setPossibleValues(possibleUtteranceHeaders);
    parameters.addParameter(utteranceColumn);

    // add column-mapping parameters, and set possibile/default values
    for (Parameter p : layerToPossibilities.keySet()) {
        List<String> possibleNames = layerToPossibilities.get(p);
        LinkedHashMap<String, Layer> candidateLayers = layerToCandidates.get(p.getName());
        parameters.addParameter(p);
        if (p.getValue() == null && candidateLayers != null && possibleNames != null) {
            p.setValue(Utility.FindLayerById(candidateLayers, possibleNames));
        }
        if (p.getPossibleValues() == null && candidateLayers != null) {
            p.setPossibleValues(candidateLayers.values());
        }
    }
    return parameters;
}

From source file:org.andresoft.datasource.FileReformatTest.java

@Test
public void testReformatChicagoFoodInpectionCsv() throws IOException {
    // Inspection ID | DBA Name | AKA Name| License # | Facility Type| Risk| Address| City|
    // State| Zip| Inspection Date| Inspection Type| Results| Violations| Latitude| Longitude|
    // Location//from ww  w  .  j ava  2s .  c  o  m
    System.setProperty("line.separator", "\n");
    Reader in = new FileReader("/Development/andresoft/hadoop_data/Food_Inspections_chicago.csv");
    File file = new File("/Development/andresoft/hadoop_data/Food_Inspections_chicago_nomalized-2.csv");
    FileWriter fw = new FileWriter(file);

    final CSVPrinter printer = CSVFormat.DEFAULT
            .withHeader("Inspection ID", "DBA Name", "AKA Name", "License #", "Facility Type", "Risk",
                    "Address", "City", "State", "Zip", "Inspection Date", "Inspection Type", "Results",
                    "Violation Number", "Violation", "Comments", "Latitude", "Longitude", "Loacation")
            .print(fw);

    final CSVParser parser = new CSVParser(in, CSVFormat.EXCEL.withHeader());

    // Iterable<CSVRecord> records = CSVFormat.EXCEL.parse(in);
    for (CSVRecord record : parser) {
        String inspectionId = record.get("Inspection ID");
        String dbaName = record.get("DBA Name");
        String akaName = record.get("AKA Name");
        String licenseNum = record.get("License #");
        String facility = record.get("Facility Type");
        String risk = record.get("Risk");
        String address = record.get("Address");
        String city = record.get("City");
        String state = record.get("State");
        String zip = record.get("Zip");
        String inspectionDate = record.get("Inspection Date");
        String inspectionType = record.get("Inspection Type");
        String results = record.get("Results");
        String violations = record.get("Violations");
        String latitude = record.get("Latitude");
        String longitude = record.get("Longitude");
        String location = record.get("Location");

        String violationsArray[] = violations.split("\\|");
        for (String v : violationsArray) {
            String comments = "None";
            String violation = "None";
            String[] violationWihComment = v.split("Comments:");
            if (violationWihComment.length == 2) {
                violation = violationWihComment[0];
                comments = violationWihComment[1];
            } else {
                violation = violationWihComment[0];
            }
            if (!StringUtils.isBlank(violation)) {
                int violationNumberEndIndex = violation.indexOf('.');
                int viloationNumber = Integer.valueOf((violation.substring(0, violationNumberEndIndex)).trim());

                printer.printRecord(inspectionId, dbaName, akaName, licenseNum, facility, risk, address, city,
                        state, zip, inspectionDate, inspectionType, results, viloationNumber,
                        violation.substring(violationNumberEndIndex + 1), comments, latitude, longitude,
                        location);
            }
        }

    }
    printer.close();
    in.close();
}

From source file:org.apache.ambari.view.hive.resources.uploads.CSVParser.java

public CSVParser(Reader reader, ParseOptions parseOptions) throws IOException {
    this.originalReader = reader;
    this.parseOptions = parseOptions;
    // always create without headers
    parser = new org.apache.commons.csv.CSVParser(reader, CSVFormat.EXCEL);
    iterator = new CSVIterator(parser.iterator());
}

From source file:org.apache.ambari.view.hive.resources.uploads.parsers.csv.CSVParser.java

public CSVParser(Reader reader, ParseOptions parseOptions) throws IOException {
    super(reader, parseOptions);
    parser = new org.apache.commons.csv.CSVParser(this.reader, CSVFormat.EXCEL);
    iterator = new CSVIterator(parser.iterator());
}

From source file:org.apache.beam.sdk.extensions.sql.meta.provider.text.TextTableProviderTest.java

@Test
public void testBuildBeamSqlTable_customizedFormat() throws Exception {
    Table table = mockTable("hello", "Excel");
    BeamSqlTable sqlTable = provider.buildBeamSqlTable(table);

    assertNotNull(sqlTable);//from  w  w  w .j  av a  2 s. co m
    assertTrue(sqlTable instanceof BeamTextCSVTable);

    BeamTextCSVTable csvTable = (BeamTextCSVTable) sqlTable;
    assertEquals(CSVFormat.EXCEL, csvTable.getCsvFormat());
}

From source file:org.apache.camel.component.salesforce.AnalyticsApiIntegrationTest.java

@Override
protected RouteBuilder doCreateRouteBuilder() throws Exception {
    return new RouteBuilder() {
        @Override/*from w  ww .ja va 2  s. co m*/
        public void configure() throws Exception {

            // get Report SObject by DeveloperName
            from("direct:queryReport")
                    .to("salesforce:query?sObjectClass=" + QueryRecordsReport.class.getName());

            from("direct:getRecentReports").to("salesforce:getRecentReports");

            from("direct:getReportDescription").to("salesforce:getReportDescription");

            from("direct:executeSyncReport").to("salesforce:executeSyncReport");

            from("direct:executeAsyncReport").to("salesforce:executeAsyncReport?includeDetails=true");

            from("direct:getReportInstances").to("salesforce:getReportInstances");

            from("direct:getReportResults").to("salesforce:getReportResults");

            CsvDataFormat csv = new CsvDataFormat(CSVFormat.EXCEL);

            // type converter test
            from("direct:convertResults").convertBodyTo(List.class).marshal(csv);
        }
    };
}

From source file:org.apache.camel.dataformat.csv.CsvDataFormatTest.java

@Test
public void shouldUseFormatFromConstructor() {
    CsvDataFormat dataFormat = new CsvDataFormat(CSVFormat.EXCEL);

    // Properly initialized
    assertSame(CSVFormat.EXCEL, dataFormat.getFormat());

    // Properly used
    assertEquals(CSVFormat.EXCEL, dataFormat.getActiveFormat());
}

From source file:org.apache.camel.dataformat.csv.CsvDataFormatTest.java

@Test
public void shouldFallbackToDefaultFormat() {
    CsvDataFormat dataFormat = new CsvDataFormat(CSVFormat.EXCEL).setFormat(null);

    // Properly saved
    assertSame(CSVFormat.DEFAULT, dataFormat.getFormat());

    // Properly used
    assertEquals(CSVFormat.DEFAULT, dataFormat.getActiveFormat());
}