Example usage for org.apache.commons.csv CSVParser getHeaderMap

List of usage examples for org.apache.commons.csv CSVParser getHeaderMap

Introduction

In this page you can find the example usage for org.apache.commons.csv CSVParser getHeaderMap.

Prototype

public Map<String, Integer> getHeaderMap() 

Source Link

Document

Returns a copy of the header map that iterates in column order.

Usage

From source file:com.chargebee.Application.MappingHeaders.java

private void extractJsonData(JSONObject jobj, CSVPrinter printer, CSVParser parser) throws Exception { //Extracts the customer_id and token nad stores them in the HashMap hm as key-value pair.
    ArrayList<String> listMerger = new ArrayList();
    Map map = parser.getHeaderMap();
    System.out.println(map);//from w ww  .  ja  v a2s. c o m
    Iterator iterator = jobj.keys();

    while (iterator.hasNext()) {
        String key = (String) iterator.next();
        JSONArray tempo = jobj.getJSONArray(key);

        if (tempo.getString(1).equals("print")) {
            if (map.containsKey(tempo.getString(0))) {
                colNumber.add((int) map.get(tempo.getString(0)));
                map.put(tempo.getString(0), key);

            }
        }

        else if (tempo.getString(1).equals("merge")) {
            JSONArray merging = tempo.getJSONArray(0);
            for (int i = 0; i < merging.length(); i++) {
                listMerger.add(merging.getString(i));
                if (map.containsKey(merging.getString(i))) {
                    map.remove(merging.getString(i));
                }
            }
            map.put("merge", key);
        }

    }
    System.out.println(map);
    Collection headers = map.values();
    System.out.println(headers);
    System.out.println(colNumber);
    print(headers, printer);
    validateAndWrite(parser, printer, listMerger);
}

From source file:io.mindmaps.migration.csv.CSVDataMigrator.java

/**
 * @param entityName name to be given to the migrated entity
 * @param parser CSVparser of the file to migrate
 *//*from   w w w  .j  a  v  a  2  s .  c  om*/
public CSVDataMigrator configure(String entityName, CSVParser parser) {
    this.entityName = entityName;
    this.records = parser.iterator();
    this.headers = parser.getHeaderMap();
    return this;
}

From source file:ch.eitchnet.csvrestendpoint.marshaller.CsvDataToJsonMarshaller.java

@Override
public JsonObject marshall(CSVParser csvParser) {

    // validate any references to column names
    Map<String, Integer> headerMap = csvParser.getHeaderMap();
    validate(headerMap);/*from ww w. j  av  a  2  s.  c  o m*/

    // filter
    List<CSVRecord> allRecords = new ArrayList<>();
    long dataSetSize = 0;
    for (CSVRecord record : csvParser) {
        dataSetSize++;
        if (isSelected(headerMap, record))
            allRecords.add(record);
    }

    // sort
    if (StringHelper.isNotEmpty(sortBy)) {
        Integer columnIndex = headerMap.get(sortBy);
        if (this.ascending)
            allRecords.sort((r1, r2) -> r1.get(columnIndex).compareTo(r2.get(columnIndex)));
        else
            allRecords.sort((r1, r2) -> r2.get(columnIndex).compareTo(r1.get(columnIndex)));
    }

    // paging
    Paging<CSVRecord> paging = Paging.asPage(allRecords, this.pageSize, this.page);

    // get page
    List<CSVRecord> page = paging.getPage();

    // build JSON response
    JsonObject root = new JsonObject();
    root.addProperty("msg", "-");
    root.addProperty("draw", this.draw);
    root.addProperty("dataSetSize", dataSetSize);
    root.addProperty("nrOfElements", paging.getNrOfElements());

    if (StringHelper.isNotEmpty(sortBy))
        root.addProperty("sortBy", this.sortBy);
    root.addProperty("ascending", this.ascending);
    root.addProperty("nrOfPages", paging.getNrOfPages());
    root.addProperty("pageSize", paging.getPageSize());
    root.addProperty("page", paging.getPageToReturn());

    // prune any unwanted columns
    if (!this.returnFields.isEmpty()) {
        headerMap.keySet().retainAll(this.returnFields);
    }

    // add items
    JsonArray data = new JsonArray();
    for (CSVRecord record : page) {
        JsonObject element = new JsonObject();
        for (Entry<String, Integer> entry : headerMap.entrySet()) {
            String column = entry.getKey();
            String value = record.get(entry.getValue());
            element.addProperty(column, value);
        }
        data.add(element);
    }
    root.add("data", data);

    return root;
}

From source file:com.edu.duke.FileResource.java

/**
 * Allows access to the column names of the header row of a CSV file (the first line in the
 * file) one at a time. If the CSV file did not have a header row, then an empty
 * <code>Iterator</code> is returned.
 * // ww  w. j  a  v a2 s.  co  m
 * @param parser the <code>CSVParser</code> that has been created for this file
 * @return an <code>Iterable</code> that allows access one header name at a time
 */
public Iterable<String> getCSVHeaders(CSVParser parser) {
    return parser.getHeaderMap().keySet();
}

From source file:com.ge.research.semtk.load.dataset.CSVDataset.java

/**
 * Constructor that takes a string (either a file path or file content)
 * /*from w  w  w. j a  v a2 s.c o m*/
 * @param filePathOrContent file path, or file content
 * @param isFileContent true for file contents, false for file path
 */
public CSVDataset(String filePathOrContent, boolean isFileContent) throws Exception {

    if (isFileContent) {
        this.csvString = filePathOrContent;
    } else {
        this.csvString = FileUtils.readFileToString(new File(filePathOrContent));
    }
    CSVParser parser = getParser(new StringReader(this.csvString));
    this.recordIterator = parser.iterator();

    // get and set the headr info
    Map<String, Integer> headerMap = parser.getHeaderMap();
    this.headers = new String[headerMap.size()];

    // TODO: this test was causing other problems so it was removed.
    if (false) {
        throw new Exception("Duplicate or empty column headers on CSV file");
    }

    for (String s : headerMap.keySet()) {
        int location = headerMap.get(s);
        this.headers[location] = s;
    }
}

From source file:com.ibm.util.merge.directive.provider.ProviderTag.java

/**
 * Reset the table, and if the Tag exists, add a row with the tag name/value
 * @param cf/*from w w  w  .j  ava 2s  . c o  m*/
 */
@Override
public void getData(MergeContext rtc) throws MergeException {
    reset();
    DataTable table = addNewTable();
    Template template = getDirective().getTemplate();
    String theTag = Template.wrap(tag);
    log.info("Getting Tag Data for " + tag);

    switch (condition) {
    case ProviderTag.CONDITION_EXISTS:
        if (!template.hasReplaceKey(theTag)) {
            log.info("Tag not found for Exists Condition");
            return;
        }
        break;
    case ProviderTag.CONDITION_BLANK:
        if (!template.hasReplaceKey(theTag) || template.hasReplaceValue(theTag)) {
            log.info("Tag not found or Data found for Blank Condition");
            return;
        }
        break;
    case ProviderTag.CONDITION_NONBLANK:
        if (!template.hasReplaceKey(theTag) || !template.hasReplaceValue(theTag)) {
            log.info("Tag or Empty Data found for Non-Blank Condition");
            return;
        }
        break;
    case ProviderTag.CONDITION_EQUALS:
        if (!template.hasReplaceKey(theTag) || !template.hasReplaceValue(theTag)
                || !template.getReplaceValue(theTag).equals(value)) {
            log.info("Tag not Equals or not found");
            return;
        }
        break;
    }

    // We have a match, so add data
    String data = template.getReplaceValue(Template.wrap(tag));
    log.info("Data Found: " + data);
    table.addCol(tag);
    if (isList()) {
        CSVParser parser;
        try {
            parser = new CSVParser(new StringReader(data), CSVFormat.EXCEL.withHeader());
            for (String colName : parser.getHeaderMap().keySet()) {
                ArrayList<String> row = table.addNewRow();
                row.add(colName);
            }
            parser.close();
        } catch (IOException e) {
            throw new MergeException(this, e, "CSV Parser Stringreader IO Exception", data);
        }
    } else {
        ArrayList<String> row = table.addNewRow();
        row.add(data);
    }
}

From source file:javalibs.CSVExtractor.java

private void readCSV() {
    try {/*from w  w w. ja va 2s .co m*/
        CSVParser parser = new CSVParser(Files.newBufferedReader(Paths.get(this.inCSV)),
                CSVFormat.DEFAULT.withHeader().withIgnoreHeaderCase().withTrim());

        // Get all headers
        Map<String, Integer> rawHeaders = parser.getHeaderMap();

        // Store the inRecords
        this.inRecords = parser.getRecords();
        parser.close();

        orderHeaders(rawHeaders);
    } catch (IOException e) {
        log_.die(e);
    }
}

From source file:ch.silviowangler.i18n.ResourceBundler.java

public void generateResourceBundle() throws IOException {

    CSVParser records = CSVFormat.RFC4180.withDelimiter(separator.charAt(0)).withFirstRecordAsHeader()
            .withQuoteMode(QuoteMode.ALL)
            .parse(new InputStreamReader(new FileInputStream(this.csvFile), this.inputEncoding));

    final Map<String, Integer> headers = records.getHeaderMap();

    processHeader(headers.keySet());//w  w  w  . j av  a2  s.com

    for (CSVRecord record : records) {
        processData(record);
    }

    final int propertiesFilesAmount = this.propertiesStore.size();
    LOGGER.info("Will generate {} properties files with {} records each", propertiesFilesAmount,
            records.getRecordNumber());

    // Properties Dateien schreiben
    for (int i = 0; i < propertiesFilesAmount; i++) {
        Map<String, String> properties = this.propertiesStore.get(i);
        File outputFile = new File(this.outputDir,
                this.bundleBaseName + "_" + this.languages.get(i) + ".properties");

        LOGGER.info("Writing {} to {}", outputFile.getName(), outputFile.getParentFile().getAbsolutePath());

        FileOutputStream outputStream = new FileOutputStream(outputFile);

        try (OutputStreamWriter writer = new OutputStreamWriter(outputStream,
                this.native2ascii ? Consts.ASCII : this.outputEncoding)) {
            properties.forEach((key, value) -> {
                try {
                    writer.append(key).append("=").append(value).append("\n");
                } catch (IOException e) {
                    e.printStackTrace();
                }
            });
            writer.flush();
        }
    }
}

From source file:javalibs.CSVDataNormalizer.java

private void readCSV() {
    try {/*from w  w w .ja v  a  2s.  co  m*/
        CSVParser parser = new CSVParser(Files.newBufferedReader(Paths.get(this.csvPath)),
                CSVFormat.DEFAULT.withHeader().withIgnoreHeaderCase().withTrim());

        // Get all headers in the CSV file so they can be used later when writing the file
        this.headerMap = parser.getHeaderMap();

        // Add them to the records list for later use
        this.allRecords = parser.getRecords();

        parser.close();

        reverseHeaderMap();
    } catch (IOException e) {
        log_.die(e);
    }
}

From source file:com.xceptance.xlt.common.tests.AbstractURLTestCase.java

/**
 * Loading of the data. There is a state variable used to indicate that we already did that.
 * /*from   w  w  w  .j av  a  2 s  . c o m*/
 * @throws IOException
 */
@Before
public void loadData() throws IOException {
    login = getProperty("login", getProperty("com.xceptance.xlt.auth.userName"));
    password = getProperty("password", getProperty("com.xceptance.xlt.auth.password"));

    // load the data. Ideally we would offload the file searching to
    // XltProperties.getDataFile(String name)
    // or XltProperties.getDataFile(String name, String locale)
    // or XltProperties.getDataFile(String name, Locale locale)
    final String dataDirectory = XltProperties.getInstance().getProperty(
            XltConstants.XLT_PACKAGE_PATH + ".data.directory", "config" + File.separatorChar + "data");
    final File file = new File(dataDirectory,
            getProperty("filename", Session.getCurrent().getUserName() + ".csv"));

    BufferedReader br = null;
    boolean incorrectLines = false;

    try {
        br = new BufferedReader(new InputStreamReader(new FileInputStream(file), "UTF-8"));

        // permit # as comment, empty lines, set comma as separator, and activate the header
        final CSVFormat csvFormat = CSVFormat.RFC4180.toBuilder().withIgnoreEmptyLines(true)
                .withCommentStart('#').withHeader().withIgnoreSurroundingSpaces(true).build();
        final CSVParser parser = new CSVParser(br, csvFormat);
        final Iterator<CSVRecord> csvRecords = parser.iterator();

        // verify header fields to avoid problems with incorrect spelling or spaces
        final Map<String, Integer> headerMap = parser.getHeaderMap();

        for (final String headerField : headerMap.keySet()) {
            if (!CSVBasedURLAction.isPermittedHeaderField(headerField)) {
                Assert.fail(MessageFormat.format("Unsupported or misspelled header field: {0}", headerField));
            }
        }

        // go over all lines, this is a little odd, because we have to catch the iterator exception
        while (true) {
            try {
                final boolean hasNext = csvRecords.hasNext();
                if (!hasNext) {
                    break;
                }
            } catch (final Exception e) {
                // the plus 1 is meant to correct the increment missing because of the exception
                throw new RuntimeException(
                        MessageFormat.format("Line at {0} is invalid, because of <{1}>. Line is ignored.",
                                parser.getLineNumber() + 1, e.getMessage()));
            }

            final CSVRecord csvRecord = csvRecords.next();

            // only take ok lines
            if (csvRecord.isConsistent()) {
                // guard against data exceptions
                try {
                    // do we have an url?
                    if (csvRecord.get(CSVBasedURLAction.URL) != null) {
                        // take it
                        csvBasedActions.add(new CSVBasedURLAction(csvRecord, interpreter));
                    } else {
                        XltLogger.runTimeLogger.error(MessageFormat.format(
                                "Line at {0} does not contain any URL. Line is ignored: {1}",
                                parser.getLineNumber(), csvRecord));
                    }
                } catch (final Exception e) {
                    throw new RuntimeException(MessageFormat.format(
                            "Line at {0} is invalid, because of <{2}>. Line is ignored: {1}",
                            parser.getLineNumber(), csvRecord, e.getMessage()));
                }
            } else {
                XltLogger.runTimeLogger.error(MessageFormat.format(
                        "Line at {0} has not been correctly formatted. Line is ignored: {1}",
                        parser.getLineNumber(), csvRecord));
                incorrectLines = true;
            }
        }
    } finally {
        IOUtils.closeQuietly(br);
    }

    // stop if we have anything the is incorrect, avoid half running test cases
    if (incorrectLines) {
        throw new RuntimeException("Found incorrectly formatted lines. Stopping here.");
    }
}