List of usage examples for java.util TreeSet add
public boolean add(E e)
From source file:org.ala.dao.TaxonConceptSHDaoImpl.java
/** * Adds a scientific name to the lucene index in multiple forms to increase * chances of matches/*from ww w .ja v a 2s. c o m*/ * * @param doc * @param scientificName * @param taxonRank */ public void addScientificNameToIndex(SolrInputDocument doc, String scientificName, String taxonRank, Integer taxonRankId) { NameParser nameParser = new NameParser(); Integer rankId = -1; if (taxonRank != null) { Rank rank = Rank.getForField(taxonRank.toLowerCase()); if (rank != null) { rankId = rank.getId(); } else { //logger.warn("Unknown rank string: " + taxonRank); } } // remove the subgenus String normalized = ""; if (scientificName != null) { normalized = scientificName.replaceFirst("\\([A-Za-z]{1,}\\) ", ""); } ParsedName parsedName = null; try { parsedName = nameParser.parse(normalized); } catch (UnparsableException e) { } // store scientific name values in a set before adding to Lucene so we // don't get duplicates TreeSet<String> sciNames = new TreeSet<String>(); if (parsedName != null) { if (parsedName.isBinomial() && parsedName.authorsParsed && !parsedName.isIndetermined()) { // add multiple versions sciNames.add(parsedName.canonicalName().toLowerCase()); sciNames.add(parsedName .buildName(true, false, true, false, true, true, false, false, false, false, false) .toLowerCase()); } // add lowercased version // NameParser not working correctly, solr exact_text/scientificName field are incorrect. // added condition check (eg: olearia) if ((taxonRankId == null || !isIncorrectRank(parsedName, taxonRankId)) && parsedName.authorsParsed && !parsedName.isIndetermined()) { sciNames.add(parsedName.canonicalName().toLowerCase()); } else { //add the supplied scientific name sciNames.add(scientificName.toLowerCase()); } // add to Lucene for (String sciName : sciNames) { // doc.add(new Field(SCI_NAME, sciName, Store.YES, // Index.NOT_ANALYZED_NO_NORMS)); doc.addField(SCI_NAME, sciName); } Float boost = 0.8f; if (rankId != null) { // if (rankId == 6000) { // // genus higher than species so it appears first // boost = 3f; // } else if (rankId == 7000) { // // species higher than subspecies so it appears first // boost = 2f; // } // boost the major taxon classification // genus = 3f - kingdom;phylum;class;order;family;species = 2f - subspecies;section;....etc = null if (RankType.getForId(rankId) != null && RankType.getForId(rankId).getBoost() != null) { boost = RankType.getForId(rankId).getBoost(); } } // Field f = new Field(SCI_NAME_TEXT, StringUtils.join(sciNames, // " "), Store.YES, Index.ANALYZED); // f.setBoost(boost); // doc.add(f); doc.addField(SCI_NAME_TEXT, StringUtils.join(sciNames, " "), boost); } else { // add lowercased version if name parser failed // doc.add(new Field(SCI_NAME, normalized.toLowerCase(), Store.YES, // Index.NOT_ANALYZED_NO_NORMS)); // doc.add(new Field(SCI_NAME_TEXT, normalized.toLowerCase(), // Store.YES, Index.ANALYZED)); doc.addField(SCI_NAME, normalized.toLowerCase(), 0.8f); doc.addField(SCI_NAME_TEXT, normalized.toLowerCase(), 0.8f); } if (scientificName != null) { // doc.add(new Field(SCI_NAME_RAW, scientificName, Store.YES, // Index.NOT_ANALYZED_NO_NORMS)); doc.addField(SCI_NAME_RAW, scientificName, 0.8f); } }
From source file:edu.umass.cs.gigapaxos.SQLPaxosLogger.java
private static Set<Filename> getAllButLatest(File[] files, int keep) { TreeSet<Filename> allFiles = new TreeSet<Filename>(); TreeSet<Filename> oldFiles = new TreeSet<Filename>(); for (File file : files) allFiles.add(new Filename(file)); if (allFiles.size() <= keep) return oldFiles; Iterator<Filename> iter = allFiles.iterator(); for (int i = 0; i < allFiles.size() - keep; i++) oldFiles.add(iter.next());/*from w w w . j a v a 2s .c o m*/ return oldFiles; }
From source file:edu.umass.cs.gigapaxos.SQLPaxosLogger.java
private static SortedSet<Filename> getLatest(File[] files, int numLatest) { TreeSet<Filename> allFiles = new TreeSet<Filename>(); TreeSet<Filename> oldFiles = new TreeSet<Filename>(); for (File file : files) allFiles.add(new Filename(file)); if (allFiles.size() <= numLatest) return allFiles; Iterator<Filename> iter = allFiles.descendingIterator(); for (int i = 0; i < numLatest; i++) oldFiles.add(iter.next());/*from ww w. java 2 s . co m*/ return oldFiles; }
From source file:io.github.msdk.features.ransacaligner.RansacAlignerMethod.java
private Hashtable<FeatureTableRow, FeatureTableRow> getAlignmentMap(FeatureTable featureTable) { // Create a table of mappings for best scores Hashtable<FeatureTableRow, FeatureTableRow> alignmentMapping = new Hashtable<FeatureTableRow, FeatureTableRow>(); // Create a sorted set of scores matching TreeSet<RowVsRowScore> scoreSet = new TreeSet<RowVsRowScore>(); // RANSAC algorithm List<AlignStructMol> list = ransacPeakLists(result, featureTable); PolynomialFunction function = this.getPolynomialFunction(list); List<FeatureTableRow> allRows = featureTable.getRows(); for (FeatureTableRow row : allRows) { // Calculate limits for a row with which the row can be aligned Range<Double> mzRange = mzTolerance.getToleranceRange(row.getMz()); double rt; try {/*from www . j a va2 s . c o m*/ rt = function.value(row.getChromatographyInfo().getRetentionTime()); } catch (NullPointerException e) { rt = row.getChromatographyInfo().getRetentionTime(); } if (Double.isNaN(rt) || rt == -1) { rt = row.getChromatographyInfo().getRetentionTime(); } Range<Double> rtRange = rtToleranceAfterCorrection.getToleranceRange(rt); // Get all rows of the aligned feature table within the m/z and // RT limits List<FeatureTableRow> candidateRows = result.getRowsInsideRange(rtRange, mzRange); for (FeatureTableRow candidateRow : candidateRows) { RowVsRowScore score; if (requireSameCharge) { FeatureTableColumn<Integer> chargeColumn1 = featureTable.getColumn(ColumnName.CHARGE, null); FeatureTableColumn<Integer> chargeColumn2 = result.getColumn(ColumnName.CHARGE, null); Integer charge1 = row.getData(chargeColumn1); Integer charge2 = candidateRow.getData(chargeColumn2); if (!charge1.equals(charge2)) continue; } // Check ion annotation if (requireSameAnnotation) { FeatureTableColumn<List<IonAnnotation>> ionAnnotationColumn1 = featureTable .getColumn(ColumnName.IONANNOTATION, null); FeatureTableColumn<List<IonAnnotation>> ionAnnotationColumn2 = result .getColumn(ColumnName.IONANNOTATION, null); List<IonAnnotation> ionAnnotations1 = row.getData(ionAnnotationColumn1); List<IonAnnotation> ionAnnotations2 = candidateRow.getData(ionAnnotationColumn2); // Check that all ion annotations in first row are in // the candidate row boolean equalIons = false; if (ionAnnotations1 != null && ionAnnotations2 != null) { for (IonAnnotation ionAnnotation : ionAnnotations1) { for (IonAnnotation targetIonAnnotation : ionAnnotations2) { if (targetIonAnnotation.compareTo(ionAnnotation) == 0) equalIons = true; } } } if (!equalIons) continue; } try { double mzLength = mzRange.upperEndpoint() - mzRange.lowerEndpoint(); double rtLength = rtRange.upperEndpoint() - rtRange.lowerEndpoint(); score = new RowVsRowScore(row, candidateRow, mzLength, rtLength, new Float(rt)); scoreSet.add(score); } catch (Exception e) { return null; } } } // Iterate scores by descending order Iterator<RowVsRowScore> scoreIterator = scoreSet.iterator(); while (scoreIterator.hasNext()) { RowVsRowScore score = scoreIterator.next(); // Check if the row is already mapped if (alignmentMapping.containsKey(score.getFeatureTableRow())) { continue; } // Check if the aligned row is already filled if (alignmentMapping.containsValue(score.getAlignedRow())) { continue; } alignmentMapping.put(score.getFeatureTableRow(), score.getAlignedRow()); } return alignmentMapping; }
From source file:ec.gob.ceaaces.controller.MallasController.java
private void cargarDatosReporte(MallaCurricularDTO mallaReporte) throws Exception { mallaReporte.setAsignaturasDTO(registroServicio.obtenerAsignaturasPorMalla(mallaReporte.getId())); mallaReporte.setOrganizacionesCurricularDTO( registroServicio.obtenerOrganizacionCurricular(mallaReporte.getId())); for (OrganizacionCurricularDTO organizacion : mallaReporte.getOrganizacionesCurricularDTO()) { organizacion.setAsignaturasDTO(/* w w w .j a v a 2 s .c om*/ registroServicio.obtenerAsignaturas(mallaReporte.getId(), organizacion.getId())); TreeSet<Integer> niveles = new TreeSet<>(); List<AsignaturasPorNivelDTO> asignaturasPorNivelDTOLista = new ArrayList<>(); for (AsignaturaDTO asignatura : organizacion.getAsignaturasDTO()) { List<RequisitoAsignaturaDTO> correquisitos = new ArrayList<>(); for (RequisitoAsignaturaDTO co : asignatura.getCorRequisitoMalla()) { if (co.getActivo()) { correquisitos.add(co); } } asignatura.getCorRequisitoMalla().clear(); asignatura.getCorRequisitoMalla().addAll(correquisitos); List<RequisitoAsignaturaDTO> prerrequisitos = new ArrayList<>(); for (RequisitoAsignaturaDTO pre : asignatura.getPreRequisitoMalla()) { if (pre.getActivo()) { prerrequisitos.add(pre); } } asignatura.getPreRequisitoMalla().clear(); asignatura.getPreRequisitoMalla().addAll(prerrequisitos); AsignaturasPorNivelDTO asignaturasPorNivelDTO = new AsignaturasPorNivelDTO(); if (TipoAsignaturaEnum.ASIGNATURA_COMPUESTA.getValue().equals(asignatura.getTipoAsignatura())) { asignatura.setAsignaturasHijas(registroServicio.obtenerAsignaturaHijas(asignatura.getId())); } if (asignatura.getIdAsignaturaPadre() == null && asignatura.getNivelMateria() != null) { if (niveles.add(asignatura.getNivelMateria())) { List<AsignaturaDTO> asignaturas = new ArrayList<>(); asignaturas.add(asignatura); asignaturasPorNivelDTO.setNivelMateria(asignatura.getNivelMateria()); asignaturasPorNivelDTO.setAsignaturasDTO(asignaturas); asignaturasPorNivelDTOLista.add(asignaturasPorNivelDTO); } else { for (AsignaturasPorNivelDTO asignaturaNivel : asignaturasPorNivelDTOLista) { if (asignaturaNivel.getNivelMateria().equals(asignatura.getNivelMateria())) { asignaturaNivel.getAsignaturasDTO().add(asignatura); break; } } } } if (asignatura.getNivelMateria() == null) { if (niveles.add(9999)) { List<AsignaturaDTO> asignaturas = new ArrayList<>(); asignaturas.add(asignatura); asignaturasPorNivelDTO.setNivelMateria(9999); asignaturasPorNivelDTO.setAsignaturasDTO(asignaturas); asignaturasPorNivelDTOLista.add(asignaturasPorNivelDTO); } else { for (AsignaturasPorNivelDTO asignaturaNivel : asignaturasPorNivelDTOLista) { if (new Integer(9999).equals(asignaturaNivel.getNivelMateria())) { asignaturaNivel.getAsignaturasDTO().add(asignatura); break; } } } } } organizacion.setAsignaturasPorNivel(asignaturasPorNivelDTOLista); } }
From source file:com.hichinaschool.flashcards.anki.DeckPicker.java
private void updateDecksList(TreeSet<Object[]> decks, int eta, int count) { if (decks == null) { Log.e(AnkiDroidApp.TAG, "updateDecksList: empty decks list"); return;//from w w w . j a v a 2s. c om } ArrayList<String> deckTitles = new ArrayList<String>(); String currentName = null; Object[] defaultDeck = null; for (Object[] d : decks) { currentName = readableDeckName(((String[]) d[0])); if (!currentName.equals("Default")) deckTitles.add(currentName); else defaultDeck = d; } decks.remove(defaultDeck); if (!deckTitles.contains("HSK 1 Vocabulary")) { String[] strings = { "HSK 1 Vocabulary" }; decks.add(new Object[] { strings, Long.valueOf(-10), 0, 0, 0, false, "https://s3.amazonaws.com/s3.hichinaschool.com.br/HSK+1+Vocabulary.apkg" }); } if (!deckTitles.contains("HSK 2 Vocabulary")) { String[] strings = { "HSK 2 Vocabulary" }; decks.add(new Object[] { strings, Long.valueOf(-20), 0, 0, 0, false, "https://s3.amazonaws.com/s3.hichinaschool.com.br/HSK+2+Vocabulary.apkg" }); } if (!deckTitles.contains("HSK 3 Vocabulary")) { String[] strings = { "HSK 3 Vocabulary" }; decks.add(new Object[] { strings, Long.valueOf(-30), 0, 0, 0, false, "https://s3.amazonaws.com/s3.hichinaschool.com.br/HSK+3+Vocabulary.apkg" }); } if (!deckTitles.contains("HSK 4 Vocabulary")) { String[] strings = { "HSK 4 Vocabulary" }; decks.add(new Object[] { strings, Long.valueOf(-40), 0, 0, 0, false, "https://s3.amazonaws.com/s3.hichinaschool.com.br/HSK+4+Vocabulary.apkg" }); } if (!deckTitles.contains("HSK 5 Vocabulary")) { String[] strings = { "HSK 5 Vocabulary" }; decks.add(new Object[] { strings, Long.valueOf(-50), 0, 0, 0, false, "https://s3.amazonaws.com/s3.hichinaschool.com.br/HSK+5+Vocabulary.apkg" }); } mDeckList.clear(); int due = 0; for (Object[] d : decks) { HashMap<String, String> m = new HashMap<String, String>(); String[] name = ((String[]) d[0]); m.put("name", readableDeckName(name)); m.put("did", ((Long) d[1]).toString()); m.put("new", ((Integer) d[2]).toString()); m.put("lrn", ((Integer) d[3]).toString()); m.put("rev", ((Integer) d[4]).toString()); m.put("dyn", ((Boolean) d[5]) ? "d1" : "d0"); if (d.length > 6) m.put("url", ((String) d[6].toString())); // m.put("complMat", ((Float)d[5]).toString()); // m.put("complAll", ((Float)d[6]).toString()); if (name.length == 1) { due += Integer.parseInt(m.get("new")) + Integer.parseInt(m.get("lrn")) + Integer.parseInt(m.get("rev")); // top position m.put("sep", "top"); // correct previous deck if (mDeckList.size() > 0) { HashMap<String, String> map = mDeckList.get(mDeckList.size() - 1); if (map.get("sep").equals("top")) { map.put("sep", "ful"); } else { map.put("sep", "bot"); } } } else { // center position m.put("sep", "cen"); } if (mDeckList.size() > 0 && mDeckList.size() == decks.size() - 1) { // bottom position if (name.length == 1) { m.put("sep", "ful"); } else { m.put("sep", "bot"); } } mDeckList.add(m); } mDeckListAdapter.notifyDataSetChanged(); // set title Resources res = getResources(); // if (count != -1) { // String time = "-"; // if (eta != -1) { // time = res.getQuantityString(R.plurals.deckpicker_title_minutes, eta, eta); // } // AnkiDroidApp.getCompat().setSubtitle(this, res.getQuantityString(R.plurals.deckpicker_title, due, due, count, time)); // } setTitle(res.getString(R.string.app_name)); // update widget WidgetStatus.update(this, decks); }
From source file:com.actelion.research.table.view.JVisualization.java
protected TreeMap<byte[], VisualizationPoint> createReferenceMap(int referencingColumn, int referencedColumn) { // create list of referencing keys TreeSet<byte[]> set = new TreeSet<byte[]>(new ByteArrayComparator()); for (VisualizationPoint vp : mPoint) { byte[] data = (byte[]) vp.record.getData(referencingColumn); if (data != null) for (String ref : mTableModel.separateEntries(new String(data))) set.add(ref.getBytes()); }//from www . j a v a 2 s .com // create map of existing and referenced VisualizationPoints TreeMap<byte[], VisualizationPoint> map = new TreeMap<byte[], VisualizationPoint>( new ByteArrayComparator()); for (VisualizationPoint vp : mPoint) { byte[] key = (byte[]) vp.record.getData(referencedColumn); if (set.contains(key)) map.put(key, vp); } return map; }
From source file:net.sourceforge.fenixedu.domain.student.Registration.java
public PrecedentDegreeInformation getLatestPrecedentDegreeInformation() { TreeSet<PrecedentDegreeInformation> degreeInformations = new TreeSet<PrecedentDegreeInformation>( Collections.reverseOrder(PrecedentDegreeInformation.COMPARATOR_BY_EXECUTION_YEAR)); ExecutionYear currentExecutionYear = ExecutionYear.readCurrentExecutionYear(); for (PrecedentDegreeInformation pdi : getPrecedentDegreesInformationsSet()) { if (!pdi.getExecutionYear().isAfter(currentExecutionYear)) { degreeInformations.add(pdi); }//from w w w . j a v a 2 s. c o m } if (degreeInformations.isEmpty()) { return null; } return degreeInformations.iterator().next(); }
From source file:org.mwc.debrief.track_shift.views.StackedDotHelper.java
/** * sort out data of interest// w w w . j a va2 s . c o m * */ public static TreeSet<Doublet> getDoublets(final TrackWrapper sensorHost, final ISecondaryTrack targetTrack, final boolean onlyVis, final boolean needBearing, final boolean needFrequency) { final TreeSet<Doublet> res = new TreeSet<Doublet>(); // friendly fix-wrapper to save us repeatedly creating it final FixWrapper index = new FixWrapper(new Fix(null, new WorldLocation(0, 0, 0), 0.0, 0.0)); // loop through our sensor data final Enumeration<Editable> sensors = sensorHost.getSensors().elements(); if (sensors != null) { while (sensors.hasMoreElements()) { final SensorWrapper wrapper = (SensorWrapper) sensors.nextElement(); if (!onlyVis || (onlyVis && wrapper.getVisible())) { final Enumeration<Editable> cuts = wrapper.elements(); while (cuts.hasMoreElements()) { final SensorContactWrapper scw = (SensorContactWrapper) cuts.nextElement(); if (!onlyVis || (onlyVis && scw.getVisible())) { // is this cut suitable for what we're looking for? if (needBearing) { if (!scw.getHasBearing()) continue; } // aaah, but does it meet the frequency requirement? if (needFrequency) { if (!scw.getHasFrequency()) continue; } FixWrapper targetFix = null; TrackSegment targetParent = null; if (targetTrack != null) { // right, get the track segment and fix nearest to // this // DTG final Enumeration<Editable> trkData = targetTrack.segments(); final Vector<TrackSegment> _theSegments = new Vector<TrackSegment>(); while (trkData.hasMoreElements()) { final Editable thisI = trkData.nextElement(); if (thisI instanceof SegmentList) { final SegmentList thisList = (SegmentList) thisI; final Enumeration<Editable> theElements = thisList.elements(); while (theElements.hasMoreElements()) { final TrackSegment ts = (TrackSegment) theElements.nextElement(); _theSegments.add(ts); } } if (thisI instanceof TrackSegment) { final TrackSegment ts = (TrackSegment) thisI; _theSegments.add(ts); } } if (_theSegments.size() > 0) { final Iterator<TrackSegment> iter = _theSegments.iterator(); while (iter.hasNext()) { final TrackSegment ts = iter.next(); final TimePeriod validPeriod = new TimePeriod.BaseTimePeriod(ts.startDTG(), ts.endDTG()); if (validPeriod.contains(scw.getDTG())) { // sorted. here we go targetParent = ts; // create an object with the right time index.getFix().setTime(scw.getDTG()); // and find any matching items final SortedSet<Editable> items = ts.tailSet(index); if (items.size() > 0) { targetFix = (FixWrapper) items.first(); } } } } } final Watchable[] matches = sensorHost.getNearestTo(scw.getDTG()); if ((matches != null) && (matches.length > 0) && (targetFix != null)) { final FixWrapper hostFix = (FixWrapper) matches[0]; final Doublet thisDub = new Doublet(scw, targetFix, targetParent, hostFix); // if we've no target track add all the points if (targetTrack == null) { // store our data res.add(thisDub); } else { // if we've got a target track we only add points // for which we // have // a target location if (targetFix != null) { // store our data res.add(thisDub); } } // if we know the track } // if there are any matching items // if we find a match } // if cut is visible } // loop through cuts } // if sensor is visible } // loop through sensors } // if there are sensors return res; }
From source file:edu.umass.cs.gigapaxos.SQLPaxosLogger.java
private void compactLogfiles() { File[] logfiles = this.getJournalFiles(); TreeSet<Filename> sortedLogfiles = new TreeSet<Filename>(); for (File f : logfiles) sortedLogfiles.add(new Filename(f)); int empties = 0; File prevFile = null;/*ww w.ja v a 2s.co m*/ for (Filename filename : sortedLogfiles) { File logfile = filename.file; // never try to compact the current log file if (logfile.toString().equals(this.journaler.curLogfile)) break; log.log(Level.FINE, "{0} attempting to compact logfile {1}", new Object[] { this, logfile }); try { compactLogfile(logfile, this.getPacketizer(), this.messageLog, this.journaler.fidMap); if (!logfile.exists()) { if (++empties > JOURNAL_GC_FREQUENCY * COMPACTION_FREQUENCY) return; } else if (System.currentTimeMillis() - logfile.lastModified() < LOGFILE_AGE_THRESHOLD * 1000) continue; // we allow merging to double the file size limit if (prevFile != null && prevFile.exists() && logfile.exists() && (prevFile.length() + logfile.length() <= 2 * MAX_LOG_FILE_SIZE)) mergeLogfiles(prevFile, logfile, this.getPacketizer(), this.messageLog, this.journaler.fidMap); } catch (IOException | JSONException e) { /* IOExceptions here are not necessarily bad and can happen * because files being compacted or merged can be deleted by a * parallel thread garbage collecting journal files. We could * use something like stringLocker to efficiently synchronize * between the two threads, but it is simpler to just incur the * exception and move on. */ if (logfile.exists() && (prevFile == null || prevFile.exists())) log.severe(this + " incurred IOException " + e.getMessage()); e.printStackTrace(); } if (logfile.exists()) prevFile = logfile; if (logfile.length() < 3 * MAX_LOG_FILE_SIZE / 4) continue; /* The break in the else clause below assumes that once we have * reached a point where logfiles can not be compacted by more than * 25%, we might as well stop instead of trying to compact the * remaining files. But it is still possible with some workloads for * more recent files to be compactable even though older files are * not compactable. For example, a recent flurry of requests all to * the same or a small number of paxos groups could result in all or * most of the logfile being unnecessary. To aggressively try to * compact anyway, LAZY_COMPACTION should be disabled; that will * also increase the compaction overhead even for less "adversarial" * workloads. */ else if (LAZY_COMPACTION) break; } }