Example usage for org.apache.commons.csv CSVFormat TDF

List of usage examples for org.apache.commons.csv CSVFormat TDF

Introduction

In this page you can find the example usage for org.apache.commons.csv CSVFormat TDF.

Prototype

CSVFormat TDF

To view the source code for org.apache.commons.csv CSVFormat TDF.

Click Source Link

Document

Tab-delimited format.

Usage

From source file:org.ohdsi.whiteRabbit.WhiteRabbitMain.java

private DbSettings getTargetDbSettings() {
    DbSettings dbSettings = new DbSettings();
    if (targetType.getSelectedItem().equals("Delimited text files")) {
        dbSettings.dataType = DbSettings.CSVFILES;

        switch ((String) targetCSVFormat.getSelectedItem()) {
        case "Default (comma, CRLF)":
            dbSettings.csvFormat = CSVFormat.DEFAULT;
            break;
        case "RFC4180":
            dbSettings.csvFormat = CSVFormat.RFC4180;
            break;
        case "Excel CSV":
            dbSettings.csvFormat = CSVFormat.EXCEL;
            break;
        case "TDF (tab, CRLF)":
            dbSettings.csvFormat = CSVFormat.TDF;
            break;
        case "MySQL (tab, LF)":
            dbSettings.csvFormat = CSVFormat.MYSQL;
            break;
        default://from   w w  w .  j av a 2 s  .  c o m
            dbSettings.csvFormat = CSVFormat.RFC4180;
        }

    } else {
        dbSettings.dataType = DbSettings.DATABASE;
        dbSettings.user = targetUserField.getText();
        dbSettings.password = targetPasswordField.getText();
        dbSettings.server = targetServerField.getText();
        dbSettings.database = targetDatabaseField.getText();
        if (targetType.getSelectedItem().toString().equals("MySQL"))
            dbSettings.dbType = DbType.MYSQL;
        else if (targetType.getSelectedItem().toString().equals("Oracle"))
            dbSettings.dbType = DbType.ORACLE;
        else if (sourceType.getSelectedItem().toString().equals("PostgreSQL"))
            dbSettings.dbType = DbType.POSTGRESQL;
        else if (sourceType.getSelectedItem().toString().equals("SQL Server")) {
            dbSettings.dbType = DbType.MSSQL;
            if (sourceUserField.getText().length() != 0) { // Not using windows authentication
                String[] parts = sourceUserField.getText().split("/");
                if (parts.length == 2) {
                    dbSettings.user = parts[1];
                    dbSettings.domain = parts[0];
                }
            }
        } else if (sourceType.getSelectedItem().toString().equals("PDW")) {
            dbSettings.dbType = DbType.PDW;
            if (sourceUserField.getText().length() != 0) { // Not using windows authentication
                String[] parts = sourceUserField.getText().split("/");
                if (parts.length == 2) {
                    dbSettings.user = parts[1];
                    dbSettings.domain = parts[0];
                }
            }
        }

        if (dbSettings.database.trim().length() == 0) {
            String message = "Please specify a name for the target database";
            JOptionPane.showMessageDialog(frame, StringUtilities.wordWrap(message, 80), "Database error",
                    JOptionPane.ERROR_MESSAGE);
            return null;
        }
    }
    return dbSettings;
}

From source file:org.phenotips.oo.OmimSourceParser.java

private void loadSymptoms(boolean positive) {
    try (BufferedReader in = new BufferedReader(
            new InputStreamReader(new URL(positive ? POSITIVE_ANNOTATIONS_URL : NEGATIVE_ANNOTATIONS_URL)
                    .openConnection().getInputStream(), ENCODING))) {
        for (CSVRecord row : CSVFormat.TDF.parse(in)) {
            if ("OMIM".equals(row.get(0))) {
                SolrInputDocument term = this.data.get(row.get(1));
                if (term != null) {
                    term.addField(positive ? "actual_symptom" : "actual_not_symptom", row.get(4));
                }/*from ww w.  jav a2  s .c o  m*/
            }
        }
    } catch (IOException ex) {
        this.logger.error("Failed to load OMIM-HPO links: {}", ex.getMessage(), ex);
    }
}

From source file:org.phenotips.oo.OmimSourceParser.java

private void loadGenes() {
    final String missing = "-";
    try (BufferedReader in = new BufferedReader(
            new InputStreamReader(new URL(GENE_ANNOTATIONS_URL).openConnection().getInputStream(), ENCODING))) {
        for (CSVRecord row : CSVFormat.TDF.withHeader().parse(in)) {
            if (!row.get("Type").contains("gene")) {
                continue;
            }/*from   www .  j a v a 2s.  com*/
            SolrInputDocument term = this.data.get(row.get(2));
            if (term != null) {
                String gs = row.get("Approved Gene Symbol");
                if (!missing.equals(gs)) {
                    term.addField(GENE_FIELD, gs);
                }
                String eid = row.get("Ensembl Gene ID");
                if (!missing.equals(eid)) {
                    term.addField(GENE_FIELD, eid);
                }
            }
        }
    } catch (IOException ex) {
        this.logger.error("Failed to load OMIM-Gene links: {}", ex.getMessage(), ex);
    }
}

From source file:org.phenotips.oo.OmimSourceParser.java

private void loadGeneReviews() {
    try (BufferedReader in = new BufferedReader(new InputStreamReader(
            new URL(GENEREVIEWS_MAPPING_URL).openConnection().getInputStream(), ENCODING))) {
        for (CSVRecord row : CSVFormat.TDF.withHeader().parse(in)) {
            SolrInputDocument term = this.data.get(row.get(2));
            if (term != null) {
                term.addField("gene_reviews_link", "https://www.ncbi.nlm.nih.gov/books/" + row.get(0));
            }//from www  . j a  v a2 s  .c om
        }
    } catch (IOException ex) {
        this.logger.error("Failed to load OMIM-GeneReviews links: {}", ex.getMessage(), ex);
    }
}

From source file:org.phenotips.variantstore.input.exomiser6.tsv.Exomiser6TSVIterator.java

/**
 * Create a new TSV iterator for files output by Exomiser.
 *
 * @param path          the path to the file
 * @param variantHeader the header with file meta-information
 *///from  www.j  a v a  2 s  .c o  m
public Exomiser6TSVIterator(Path path, VariantHeader variantHeader) {
    super(path, variantHeader);

    Reader reader = null;
    try {
        reader = new FileReader(this.path.toString());
        this.tsvParser = CSVFormat.TDF.parse(reader);
    } catch (IOException e) {
        logger.error(String.format("Error when opening file %s, this should NOT be happening", this.path), e);
    }

    this.tsvRecordIterator = tsvParser.iterator();
    // skip first row >.>
    if (this.hasNext()) {
        tsvRecordIterator.next();
    }
}

From source file:org.phenotips.variantstore.input.tsv.AbstractTSVIterator.java

/**
 * Create a new TSV iterator for files, such as those outputted by Exomiser.
 *
 * @param path          the path to the file
 * @param variantHeader the header with file meta-information
 *//* w w w .  j a v  a  2  s .  co m*/
public AbstractTSVIterator(Path path, VariantHeader variantHeader) {
    super(path, variantHeader);

    Reader reader = null;
    try {
        reader = new FileReader(this.path.toString());
        this.tsvParser = CSVFormat.TDF.parse(reader);
    } catch (IOException e) {
        logger.error(String.format("Error when opening file %s, this should NOT be happening", this.path), e);
    }

    this.tsvRecordIterator = tsvParser.iterator();
    // Read column names
    if (this.hasNext()) {
        this.columns = new ArrayList<String>();
        for (String field : tsvRecordIterator.next()) {
            // Remove leading hashes
            columns.add(field.replaceAll("^#+", ""));
        }
    }
}

From source file:org.phenotips.variantstore.input.tsv.ExomiserTSVIterator.java

/**
 * Create a new TSV iterator for files output by Exomiser.
 *
 * @param path          the path to the file
 * @param variantHeader the header with file meta-information
 *///  ww  w  .jav  a2  s  .  c o m
public ExomiserTSVIterator(Path path, VariantHeader variantHeader) {
    super(path, variantHeader);

    Reader reader = null;
    try {
        reader = new FileReader(this.path.toString());
        this.tsvParser = CSVFormat.TDF.parse(reader);
    } catch (IOException e) {
        logger.error(String.format("Error when opening file %s, this should NOT be happening", this.path), e);
    }

    this.tsvRecordIterator = tsvParser.iterator();
    // skip first row >.>
    if (this.hasNext()) {
        tsvRecordIterator.next();
    }
}

From source file:org.phenotips.vocabulary.internal.GeneNomenclature.java

@Override
protected Collection<SolrInputDocument> load(URL url) {
    try {//from w  w  w.  java2  s. c om
        Collection<SolrInputDocument> solrDocuments = new HashSet<>();

        Reader in = new InputStreamReader(url.openConnection().getInputStream(), Charset.forName("UTF-8"));
        for (CSVRecord row : CSVFormat.TDF.withHeader().parse(in)) {
            SolrInputDocument crtTerm = new SolrInputDocument();
            for (Map.Entry<String, String> item : row.toMap().entrySet()) {
                if ("hgnc_id".equals(item.getKey())) {
                    crtTerm.addField(ID_FIELD_NAME, item.getValue());
                } else if (StringUtils.isNotBlank(item.getValue())) {
                    crtTerm.addField(item.getKey(), StringUtils.split(item.getValue(), "|"));
                }
            }
            solrDocuments.add(crtTerm);
        }
        addMetaInfo(solrDocuments);
        return solrDocuments;
    } catch (IOException ex) {
        this.logger.warn("Failed to read/parse the HGNC source: {}", ex.getMessage());
    }
    return null;
}

From source file:org.phenotips.vocabulary.internal.hpoannotations.AbstractPhenotypeForDiseaseAnnotationsExtension.java

@Override
protected CSVFormat setupCSVParser(Vocabulary vocabulary) {
    return CSVFormat.TDF;
}

From source file:org.phenotips.vocabulary.internal.hpoannotations.GeneForPhenotypesAnnotationsExtension.java

@Override
protected CSVFormat setupCSVParser(Vocabulary vocabulary) {
    // Bug in commons-csv: although duplicate null headers are allowed in CSVParser, CSVFormat#validate doesn't
    // allow more than one null header
    return CSVFormat.TDF.withHeader("id", null, "", "associated_genes").withAllowMissingColumnNames()
            .withCommentMarker('#');
}