List of usage examples for org.apache.commons.csv CSVRecord get
public String get(final String name)
From source file:org.phenotips.vocabulary.AbstractCSVAnnotationsExtension.java
/** * Helper method that gets the cell on the specified column, as string, if it exists, without throwing exceptions. * * @param row the {@link CSVRecord row} currently being processed * @param colNumber the number of the column of interest * @return the value on the target column, if such value exists, {@code null} otherwise *///from w ww.j av a2s . c om protected String getRowItem(@Nonnull final CSVRecord row, final int colNumber) { if (colNumber < row.size()) { return row.get(colNumber); } return null; }
From source file:org.phenotips.vocabulary.internal.solr.MendelianInheritanceInMan.java
private void parseOmimData(URL sourceUrl) { try {// w ww . ja va2 s. com Reader in = new InputStreamReader(sourceUrl.openConnection().getInputStream(), Charset.forName(ENCODING)); for (CSVRecord row : CSVFormat.TDF.withCommentMarker('#').parse(in)) { // Ignore moved or removed entries if ("Caret".equals(row.get(0))) { continue; } SolrInputDocument crtTerm = new SolrInputDocument(); // set id addFieldValue(ID_FIELD, row.get(1), crtTerm); // set symbol addFieldValue(SYMBOL_FIELD, SYMBOLS.get(row.get(0)), crtTerm); // set type (multivalued) for (String type : TYPES.get(row.get(0))) { addFieldValue(TYPE_FIELD, type, crtTerm); } // set name String name = StringUtils.substringBefore(row.get(2), TITLE_SEPARATOR).trim(); addFieldValue(NAME_FIELD, name, crtTerm); // set short name String shortNameString = StringUtils.substringAfter(row.get(2), TITLE_SEPARATOR).trim(); String[] shortNames = StringUtils.split(shortNameString, TITLE_SEPARATOR); for (String shortName : shortNames) { addFieldValue(SHORT_NAME_FIELD, shortName.trim(), crtTerm); } // set synonyms setListFieldValue(SYNONYM_FIELD, row.get(3), crtTerm); // set included name setListFieldValue(INCLUDED_NAME_FIELD, row.get(4), crtTerm); this.data.put(String.valueOf(crtTerm.get(ID_FIELD).getFirstValue()), crtTerm); } } catch (IOException ex) { this.logger.warn("Failed to read/parse the OMIM source: {}", ex.getMessage()); } }
From source file:org.phenotips.vocabulary.internal.solr.MendelianInheritanceInMan.java
private void loadGenes() { try (BufferedReader in = new BufferedReader( new InputStreamReader(new URL(GENE_ANNOTATIONS_URL).openConnection().getInputStream(), ENCODING))) { for (CSVRecord row : CSVFormat.TDF.withCommentMarker('#').parse(in)) { SolrInputDocument term = this.data.get(row.get(0).trim()); if (term != null) { String gs = row.get(3).trim(); if (StringUtils.isNotBlank(gs)) { term.addField(GENE_FIELD, gs); }/*from w ww . ja va2 s. c o m*/ String eidLine = row.get(4).trim(); if (StringUtils.isNotBlank(eidLine)) { String[] eids = StringUtils.split(eidLine, ","); for (String eid : eids) { term.addField(GENE_FIELD, eid.trim()); } } } } } catch (IOException ex) { this.logger.error("Failed to load OMIM-Gene links: {}", ex.getMessage(), ex); } }
From source file:org.phenotips.vocabulary.internal.solr.MendelianInheritanceInMan.java
private void loadGeneReviews() { try (BufferedReader in = new BufferedReader(new InputStreamReader( new URL(GENEREVIEWS_MAPPING_URL).openConnection().getInputStream(), ENCODING))) { for (CSVRecord row : CSVFormat.TDF.withHeader().parse(in)) { SolrInputDocument term = this.data.get(row.get(2)); if (term != null) { term.setField("gene_reviews_link", "https://www.ncbi.nlm.nih.gov/books/" + row.get(0)); }/* w ww.j av a2s . c o m*/ } } catch (IOException ex) { this.logger.error("Failed to load OMIM-GeneReviews links: {}", ex.getMessage(), ex); } }
From source file:org.phenotips.vocabulary.internal.solr.OmimSourceParser.java
private void loadSymptoms(boolean positive) { String omimId = ""; String previousOmimId = null; Set<String> ancestors = new HashSet<>(); try (BufferedReader in = new BufferedReader( new InputStreamReader(new URL(positive ? POSITIVE_ANNOTATIONS_URL : NEGATIVE_ANNOTATIONS_URL) .openConnection().getInputStream(), ENCODING))) { for (CSVRecord row : CSVFormat.TDF.parse(in)) { if ("OMIM".equals(row.get(0))) { omimId = row.get(1);// w ww . j a v a 2 s. c o m addAncestors(previousOmimId, omimId, ancestors, positive); previousOmimId = omimId; SolrInputDocument term = this.data.get(omimId); if (term != null) { term.addField(positive ? "actual_symptom" : "actual_not_symptom", row.get(4)); } VocabularyTerm vterm = this.hpo.getTerm(row.get(4)); if (vterm != null) { for (VocabularyTerm ancestor : vterm.getAncestorsAndSelf()) { ancestors.add(ancestor.getId()); } } } } addAncestors(omimId, null, ancestors, positive); } catch (IOException ex) { this.logger.error("Failed to load OMIM-HPO links: {}", ex.getMessage(), ex); } }
From source file:org.seasr.meandre.components.transform.text.CSVTextToTokenCounts.java
@Override public void executeCallBack(ComponentContext cc) throws Exception { Hashtable<String, Integer> htCounts = new Hashtable<String, Integer>(); for (String text : DataTypeParser.parseAsString(cc.getDataComponentFromInput(IN_TEXT))) { // boolean skippedHeader = false; //String[][] data = ... .getAllValues(); // CSVParser parser = new CSVParser(new StringReader(text), strategy); // CSVParser parser = new CSVParser(new StringReader(text), format); // String[] tokens = uninitialisedLine; // while (tokens != null) { console.finer("received text:\n" + text + "\n"); for (CSVRecord tokens : format.parse(new StringReader(text))) { // tokens = parser.getLine(); // if (tokens == null) break; // if (bHeader && !skippedHeader) { // skippedHeader = true; // continue; // } // String token = tokens[tokenPos]; console.fine("processing row " + tokens.toString()); if (tokens.size() <= tokenPos || tokens.size() <= countPos) { console.warning(//from w w w . j a va2s . co m String.format("csv row %d too short (%d) for count pos %d or token pos %d - discarding", tokens.getRecordNumber(), tokens.size(), countPos, tokenPos)); continue; } String token = tokens.get(tokenPos); int count = 0; try { count = Integer.parseInt(tokens.get(countPos)); } catch (NumberFormatException e) { console.warning(String.format("Token '%s' had malformed count '%s' - assigning zero!", token, tokens.get(countPos))); } if (htCounts.containsKey(token)) console.warning(String.format( "Token '%s' occurs more than once in the dataset - replacing previous count %d with %d...", token, htCounts.get(token), count)); htCounts.put(token, count); } } cc.pushDataComponentToOutput(OUT_TOKEN_COUNTS, BasicDataTypesTools.mapToIntegerMap(htCounts, bOrdered)); }
From source file:org.shareok.data.documentProcessor.CsvHandler.java
/** * Reads out the data in an excel file and stores data in a hashmap * <p>Also sets the total record number and file heading</p> * /*from w w w . ja va2s.c om*/ * @throws Exception */ @Override public void readData() { FileReader fileReader = null; CSVParser csvFileParser = null; String[] headMapping = null; //CSVFormat csvFileFormat = CSVFormat.DEFAULT.withHeader(FILE_HEADER_MAPPING); try { //initialize FileReader object fileReader = new FileReader(fileName); //initialize CSVParser object if (null == csvFormat) { csvFormat = CSVFormat.DEFAULT; } csvFileParser = new CSVParser(fileReader, csvFormat); //Get a list of CSV file records List csvRecords = csvFileParser.getRecords(); int size = csvRecords.size(); setRecordCount(size); data = new HashMap(); //Read the CSV file records starting from the second record to skip the header for (int i = 0; i < size; i++) { CSVRecord record = (CSVRecord) csvRecords.get(i); if (null != record) { if (i == 0) { List headMappingList = new ArrayList(); Iterator it = record.iterator(); while (it.hasNext()) { String value = (String) it.next(); headMappingList.add(value); } headMapping = new String[headMappingList.size()]; headMapping = (String[]) headMappingList.toArray(headMapping); setFileHeadMapping(headMapping); } else { for (int j = 0; j < fileHeadMapping.length; j++) { String colName = fileHeadMapping[j].trim(); String key = colName + "-" + i; data.put(key, record.get(j)); } } } } } catch (Exception e) { System.out.println("Error in CsvFileReader !!!"); e.printStackTrace(); } finally { try { fileReader.close(); csvFileParser.close(); } catch (IOException e) { System.out.println("Error while closing fileReader/csvFileParser !!!"); e.printStackTrace(); } } }
From source file:org.softinica.maven.jmeter.report.parser.CSVReportParser.java
@Override public Input parseInput(InputDefinition definition) { CSVParser parser = null;/*from w ww .ja v a 2 s.c o m*/ Input input = new Input(); try { Reader reader = new InputStreamReader(new FileInputStream(definition.getInputFile())); parser = new CSVParser(reader, CSVFormat.DEFAULT); Iterator<CSVRecord> it = parser.iterator(); while (it.hasNext()) { Sample sample = new Sample(); CSVRecord record = it.next(); sample.setTimestamp(Long.valueOf(record.get(0))); sample.setLabel(record.get(2)); sample.setValue(Double.valueOf(record.get(4)) * definition.getScale()); sample.setSuccess(Boolean.parseBoolean(record.get(7))); input.getSamples().add(sample); } } catch (IOException e) { throw new RuntimeException(e); } finally { Utils.close(parser); } return input; }
From source file:org.softinica.maven.jmeter.report.parser.SimpleCSVParser.java
@Override public Input parseInput(InputDefinition definition) { CSVParser parser = null;/*from www.j av a 2s .c o m*/ List<String> headers = new LinkedList<String>(); Input input = new Input(); try { Reader reader = new InputStreamReader(new FileInputStream(definition.getInputFile())); parser = new CSVParser(reader, CSVFormat.DEFAULT); Iterator<CSVRecord> it = parser.iterator(); if (it.hasNext()) { CSVRecord header = it.next(); for (String value : header) { headers.add(value); } while (it.hasNext()) { Sample sample = new Sample(); CSVRecord record = it.next(); for (int i = 0; i < record.size(); i++) { sample.put(headers.get(i), record.get(i)); } input.getSamples().add(sample); } } } catch (IOException e) { throw new RuntimeException(e); } finally { Utils.close(parser); } return input; }
From source file:org.sonar.db.version.v51.FeedFileSourcesBinaryData.java
private byte[] toBinary(Long fileSourceId, @Nullable String data) { DbFileSources.Data.Builder dataBuilder = DbFileSources.Data.newBuilder(); CSVParser parser = null;/*from ww w . jav a 2 s . c o m*/ try { if (data != null) { parser = CSVParser.parse(data, CSVFormat.DEFAULT); Iterator<CSVRecord> rows = parser.iterator(); int line = 1; while (rows.hasNext()) { CSVRecord row = rows.next(); if (row.size() == 16) { DbFileSources.Line.Builder lineBuilder = dataBuilder.addLinesBuilder(); lineBuilder.setLine(line); String s = row.get(0); if (StringUtils.isNotEmpty(s)) { lineBuilder.setScmRevision(s); } s = row.get(1); if (StringUtils.isNotEmpty(s)) { lineBuilder.setScmAuthor(s); } Date scmDate = DateUtils.parseDateTimeQuietly(row.get(2)); if (scmDate != null) { lineBuilder.setScmDate(scmDate.getTime()); } s = row.get(3); if (StringUtils.isNotEmpty(s)) { lineBuilder.setUtLineHits(Integer.parseInt(s)); } s = row.get(4); if (StringUtils.isNotEmpty(s)) { lineBuilder.setUtConditions(Integer.parseInt(s)); } s = row.get(5); if (StringUtils.isNotEmpty(s)) { lineBuilder.setUtCoveredConditions(Integer.parseInt(s)); } s = row.get(6); if (StringUtils.isNotEmpty(s)) { lineBuilder.setItLineHits(Integer.parseInt(s)); } s = row.get(7); if (StringUtils.isNotEmpty(s)) { lineBuilder.setItConditions(Integer.parseInt(s)); } s = row.get(8); if (StringUtils.isNotEmpty(s)) { lineBuilder.setItCoveredConditions(Integer.parseInt(s)); } s = row.get(9); if (StringUtils.isNotEmpty(s)) { lineBuilder.setOverallLineHits(Integer.parseInt(s)); } s = row.get(10); if (StringUtils.isNotEmpty(s)) { lineBuilder.setOverallConditions(Integer.parseInt(s)); } s = row.get(11); if (StringUtils.isNotEmpty(s)) { lineBuilder.setOverallCoveredConditions(Integer.parseInt(s)); } s = row.get(12); if (StringUtils.isNotEmpty(s)) { lineBuilder.setHighlighting(s); } s = row.get(13); if (StringUtils.isNotEmpty(s)) { lineBuilder.setSymbols(s); } s = row.get(14); if (StringUtils.isNotEmpty(s)) { lineBuilder.addAllDuplication(splitIntegers(s)); } s = row.get(15); if (s != null) { lineBuilder.setSource(s); } } line++; } } return FileSourceDto.encodeSourceData(dataBuilder.build()); } catch (Exception e) { throw new IllegalStateException( "Invalid FILE_SOURCES.DATA on row with ID " + fileSourceId + ": " + data, e); } finally { IOUtils.closeQuietly(parser); } }