Example usage for org.apache.commons.csv CSVRecord get

List of usage examples for org.apache.commons.csv CSVRecord get

Introduction

In this page you can find the example usage for org.apache.commons.csv CSVRecord get.

Prototype

public String get(final String name) 

Source Link

Document

Returns a value by name.

Usage

From source file:org.apache.phoenix.util.csv.CsvUpsertExecutor.java

/**
 * Upsert a single record.//from   ww  w .  j  av  a2 s.  c om
 *
 * @param csvRecord CSV record containing the data to be upserted
 */
void execute(CSVRecord csvRecord) {
    try {
        if (csvRecord.size() < conversionFunctions.size()) {
            String message = String.format("CSV record does not have enough values (has %d, but needs %d)",
                    csvRecord.size(), conversionFunctions.size());
            throw new IllegalArgumentException(message);
        }
        for (int fieldIndex = 0; fieldIndex < conversionFunctions.size(); fieldIndex++) {
            Object sqlValue = conversionFunctions.get(fieldIndex).apply(csvRecord.get(fieldIndex));
            if (sqlValue != null) {
                preparedStatement.setObject(fieldIndex + 1, sqlValue);
            } else {
                preparedStatement.setNull(fieldIndex + 1, dataTypes.get(fieldIndex).getSqlType());
            }
        }
        preparedStatement.execute();
        upsertListener.upsertDone(++upsertCount);
    } catch (Exception e) {
        if (LOG.isDebugEnabled()) {
            // Even though this is an error we only log it with debug logging because we're notifying the
            // listener, and it can do its own logging if needed
            LOG.debug("Error on CSVRecord " + csvRecord, e);
        }
        upsertListener.errorOnRecord(csvRecord, e);
    }
}

From source file:org.apache.ranger.unixusersync.process.FileSourceUserGroupBuilder.java

public Map<String, List<String>> readTextFile(File textFile) throws Exception {

    Map<String, List<String>> ret = new HashMap<String, List<String>>();

    String delimiter = config.getUserSyncFileSourceDelimiter();

    CSVFormat csvFormat = CSVFormat.newFormat(delimiter.charAt(0));

    CSVParser csvParser = new CSVParser(new BufferedReader(new FileReader(textFile)), csvFormat);

    List<CSVRecord> csvRecordList = csvParser.getRecords();

    if (csvRecordList != null) {
        for (CSVRecord csvRecord : csvRecordList) {
            List<String> groups = new ArrayList<String>();
            String user = csvRecord.get(0);

            user = user.replaceAll("^\"|\"$", "");

            int i = csvRecord.size();

            for (int j = 1; j < i; j++) {
                String group = csvRecord.get(j);
                if (group != null && !group.isEmpty()) {
                    group = group.replaceAll("^\"|\"$", "");
                    groups.add(group);/*w  ww .  j  a  v  a  2  s.com*/
                }
            }
            ret.put(user, groups);
        }
    }

    csvParser.close();

    return ret;
}

From source file:org.apache.storm.sql.runtime.serde.csv.CsvScheme.java

@Override
public List<Object> deserialize(ByteBuffer ser) {
    try {//from  ww  w  .  j  a va2s .  c om
        String data = new String(Utils.toByteArray(ser), StandardCharsets.UTF_8);
        CSVParser parser = CSVParser.parse(data, CSVFormat.RFC4180);
        CSVRecord record = parser.getRecords().get(0);
        Preconditions.checkArgument(record.size() == fieldNames.size(), "Invalid schema");

        ArrayList<Object> list = new ArrayList<>(fieldNames.size());
        for (int i = 0; i < record.size(); i++) {
            list.add(record.get(i));
        }
        return list;
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
}

From source file:org.apache.tika.parser.isatab.ISATabUtils.java

public static void parseStudy(InputStream stream, XHTMLContentHandler xhtml, Metadata metadata,
        ParseContext context) throws IOException, TikaException, SAXException {
    TikaInputStream tis = TikaInputStream.get(stream);
    // Automatically detect the character encoding
    TikaConfig tikaConfig = context.get(TikaConfig.class);
    if (tikaConfig == null) {
        tikaConfig = TikaConfig.getDefaultConfig();
    }/*  w ww .  j a v  a 2s. c o  m*/
    try (AutoDetectReader reader = new AutoDetectReader(new CloseShieldInputStream(tis), metadata,
            tikaConfig.getEncodingDetector()); CSVParser csvParser = new CSVParser(reader, CSVFormat.TDF)) {
        Iterator<CSVRecord> iterator = csvParser.iterator();

        xhtml.startElement("table");

        xhtml.startElement("thead");
        if (iterator.hasNext()) {
            CSVRecord record = iterator.next();
            for (int i = 0; i < record.size(); i++) {
                xhtml.startElement("th");
                xhtml.characters(record.get(i));
                xhtml.endElement("th");
            }
        }
        xhtml.endElement("thead");

        xhtml.startElement("tbody");
        while (iterator.hasNext()) {
            CSVRecord record = iterator.next();
            xhtml.startElement("tr");
            for (int j = 0; j < record.size(); j++) {
                xhtml.startElement("td");
                xhtml.characters(record.get(j));
                xhtml.endElement("td");
            }
            xhtml.endElement("tr");
        }
        xhtml.endElement("tbody");

        xhtml.endElement("table");
    }
}

From source file:org.apache.tika.parser.isatab.ISATabUtils.java

public static void parseAssay(InputStream stream, XHTMLContentHandler xhtml, Metadata metadata,
        ParseContext context) throws IOException, TikaException, SAXException {
    TikaInputStream tis = TikaInputStream.get(stream);

    // Automatically detect the character encoding

    TikaConfig tikaConfig = context.get(TikaConfig.class);
    if (tikaConfig == null) {
        tikaConfig = TikaConfig.getDefaultConfig();
    }/*  w ww.  j  a va 2  s.  c  o  m*/
    try (AutoDetectReader reader = new AutoDetectReader(new CloseShieldInputStream(tis), metadata,
            tikaConfig.getEncodingDetector()); CSVParser csvParser = new CSVParser(reader, CSVFormat.TDF)) {
        xhtml.startElement("table");

        Iterator<CSVRecord> iterator = csvParser.iterator();

        xhtml.startElement("thead");
        if (iterator.hasNext()) {
            CSVRecord record = iterator.next();
            for (int i = 0; i < record.size(); i++) {
                xhtml.startElement("th");
                xhtml.characters(record.get(i));
                xhtml.endElement("th");
            }
        }
        xhtml.endElement("thead");

        xhtml.startElement("tbody");
        while (iterator.hasNext()) {
            CSVRecord record = iterator.next();
            xhtml.startElement("tr");
            for (int j = 0; j < record.size(); j++) {
                xhtml.startElement("td");
                xhtml.characters(record.get(j));
                xhtml.endElement("td");
            }
            xhtml.endElement("tr");
        }
        xhtml.endElement("tbody");

        xhtml.endElement("table");
    }
}

From source file:org.apache.tika.parser.isatab.ISATabUtils.java

private static void extractMetadata(Reader reader, Metadata metadata, String studyFileName) throws IOException {
    boolean investigationSection = false;
    boolean studySection = false;
    boolean studyTarget = false;

    Map<String, String> map = new HashMap<String, String>();

    try (CSVParser csvParser = new CSVParser(reader, CSVFormat.TDF)) {
        Iterator<CSVRecord> iterator = csvParser.iterator();

        while (iterator.hasNext()) {
            CSVRecord record = iterator.next();
            String field = record.get(0);
            if ((field.toUpperCase(Locale.ENGLISH).equals(field)) && (record.size() == 1)) {
                investigationSection = Arrays.asList(sections).contains(field);
                studySection = (studyFileName != null) && (field.equals(studySectionField));
            } else {
                if (investigationSection) {
                    addMetadata(field, record, metadata);
                } else if (studySection) {
                    if (studyTarget) {
                        break;
                    }/*from   w w w . j  a va 2 s. c  om*/
                    String value = record.get(1);
                    map.put(field, value);
                    studyTarget = (field.equals(studyFileNameField)) && (value.equals(studyFileName));
                    if (studyTarget) {
                        mapStudyToMetadata(map, metadata);
                        studySection = false;
                    }
                } else if (studyTarget) {
                    addMetadata(field, record, metadata);
                }
            }
        }
    } catch (IOException ioe) {
        throw ioe;
    }
}

From source file:org.apache.tika.parser.isatab.ISATabUtils.java

private static void addMetadata(String field, CSVRecord record, Metadata metadata) {
    if ((record == null) || (record.size() <= 1)) {
        return;/* w  w  w  .j a va2  s.  c om*/
    }

    for (int i = 1; i < record.size(); i++) {
        metadata.add(field, record.get(i));
    }
}

From source file:org.asoem.greyfish.utils.space.cluster.DBSCANTest.java

@Test
public void testCluster() throws Exception {
    // given//from w w w .  ja v a 2  s.com
    final ImmutableList<ImmutablePoint1D> objects = ImmutableList
            .copyOf(Iterables.transform(csvRecords, new Function<CSVRecord, ImmutablePoint1D>() {
                @Nullable
                @Override
                public ImmutablePoint1D apply(final CSVRecord input) {
                    final double petalLength = Double.parseDouble(input.get("Petal.Length"));
                    return ImmutablePoint1D.at(petalLength);
                }
            }));
    assert objects.size() == 150;
    final double eps = 0.2;
    final int minPts = 5;
    final DBSCAN<ImmutablePoint1D> dbscan = DBSCAN.create(eps, minPts, Points.euclideanDistance());

    // when
    final DBSCANResult result = dbscan.apply(objects);

    // then
    assertThat((Collection<Object>) result.cluster(), hasSize(2));
}

From source file:org.asoem.greyfish.utils.space.cluster.DBSCANTest.java

@Test
public void testAlternativeNeighborSearchAlgorithm() throws Exception {
    // given// w w w. jav a  2  s.c  o  m
    final ImmutableList<ImmutablePoint1D> objects = ImmutableList
            .copyOf(Iterables.transform(csvRecords, new Function<CSVRecord, ImmutablePoint1D>() {
                @Nullable
                @Override
                public ImmutablePoint1D apply(final CSVRecord input) {
                    final double petalLength = Double.parseDouble(input.get("Petal.Length"));
                    return ImmutablePoint1D.at(petalLength);
                }
            }));
    assert objects.size() == 150;
    final double eps = 0.2;
    final int minPts = 5;
    final NeighborSearch<ImmutablePoint1D> mock = mock(NeighborSearch.class);
    final DBSCAN<ImmutablePoint1D> dbscan = DBSCAN.create(eps, minPts, mock);

    // when
    dbscan.apply(objects);

    // then
    verify(mock, atLeastOnce()).filterNeighbors(eq(objects), any(ImmutablePoint1D.class), eq(eps));
}

From source file:org.asqatasun.referential.creator.CodeGeneratorMojo.java

private void writeToI18NFile(FileGenerator fg, CSVRecord record, String lang)
        throws IOException, InvalidParameterException {
    Integer themeIndex = Integer.valueOf(record.get(THEME_CODE_COLUMN_NAME));
    String theme = record.get(THEME_LABEL_COLUMN_NAME + lang);
    String critere;/*  w ww  . j  av a2s.  c om*/
    String critereCode;
    String test = record.get(TEST_LABEL_COLUMN_NAME + lang);
    String testCode = record.get(TEST_CODE_COLUMN_NAME);
    if (isCriterionPresent) {
        critere = record.get(CRITERION_LABEL_COLUMN_NAME + lang);
        critereCode = record.get(CRITERION_CODE_COLUMN_NAME);
    } else {
        critere = test;
        critereCode = testCode;
    }
    if (StringUtils.isBlank(theme) || StringUtils.isBlank(critere) || StringUtils.isBlank(critereCode)) {
        throw new InvalidParameterException("Your csv file has an empty column");
    }
    Map themeMap = Collections.singletonMap(themeIndex, theme);
    Map critereMap = Collections.singletonMap(critereCode, critere);
    Map testMap = Collections.singletonMap(testCode, test);
    if (StringUtils.isNotBlank(theme) && StringUtils.isNotBlank(String.valueOf(themeIndex))) {
        fg.writei18NFile(themeMap, lang, langSet.first(), "theme");
    }
    if (StringUtils.isNotBlank(critere) && StringUtils.isNotBlank(critereCode)) {
        fg.writei18NFile(critereMap, lang, langSet.first(), "criterion");
    }
    if (StringUtils.isNotBlank(test) && StringUtils.isNotBlank(testCode)) {
        fg.writei18NFile(testMap, lang, langSet.first(), "rule");
    }
    if (IS_I18N_REFERENTIAL_CREATED == false) {
        fg.writei18NFile(null, lang, langSet.first(), "referential");
    }
}