List of usage examples for java.util TreeMap values
public Collection<V> values()
From source file:org.openvpms.archetype.rules.patient.PatientRules.java
/** * Returns the active identity with the specified short name. * If there are multiple identities, these will be ordered with the highest id first. * * @param patient the patient/*from w ww .j a v a2 s . co m*/ * @param shortName the identity archetype short name * @return the identities */ @SuppressWarnings("unchecked") private Collection<EntityIdentity> getIdentities(Party patient, String shortName) { TreeMap<Long, EntityIdentity> result = new TreeMap<Long, EntityIdentity>( ComparatorUtils.reversedComparator(ComparatorUtils.NATURAL_COMPARATOR)); for (EntityIdentity identity : patient.getIdentities()) { if (identity.isActive() && TypeHelper.isA(identity, shortName)) { result.put(identity.getId(), identity); } } return result.values(); }
From source file:br.gov.lexml.oaicat.LexMLOAICatalog.java
/** * <b>LEXML ready</b>//ww w. j ava 2 s .c o m * * @return lista dos sets da base lexml-db */ private ArrayList getSets() { TreeMap treeMap = new TreeMap(); String propertyPrefix = "Sets."; List<ConjuntoItem> lista = m_ci_dao.list(); if (null != lista) { Iterator<ConjuntoItem> iter = lista.iterator(); int i = 0; while (iter.hasNext()) { i++; treeMap.put(propertyPrefix + i, helper.ConjuntoItem2Sets(iter.next())); } } return new ArrayList(treeMap.values()); }
From source file:com.serphacker.serposcope.db.google.GoogleSerpRescanDB.java
public void rescanNonBulk(Integer specificRunId, Collection<GoogleTarget> targets, Collection<GoogleSearch> searches, boolean updateSummary) { LOG.debug("SERP rescan (non-bulk) : starting"); long _start = System.currentTimeMillis(); Run specPrevRun = null;//from w w w . j a va 2 s. c o m Map<Integer, GoogleTargetSummary> specPrevRunSummaryByTarget = new HashMap<>(); if (specificRunId != null) { specPrevRun = runDB.findPrevious(specificRunId); if (specPrevRun != null) { specPrevRunSummaryByTarget = targetSummaryDB.list(specPrevRun.getId()).stream() .collect(Collectors.toMap(GoogleTargetSummary::getTargetId, Function.identity())); } } for (GoogleTarget target : targets) { Map<Integer, GoogleTargetSummary> summaryByRunId = new HashMap<>(); GoogleTargetSummary specificPreviousSummary = specPrevRunSummaryByTarget.get(target.getId()); if (specificPreviousSummary != null) { summaryByRunId.put(specPrevRun.getId(), specificPreviousSummary); } for (GoogleSearch search : searches) { final MutableInt previousRunId = new MutableInt(0); final MutableInt previousRank = new MutableInt(GoogleRank.UNRANKED); GoogleBest searchBest = new GoogleBest(target.getGroupId(), target.getId(), search.getId(), GoogleRank.UNRANKED, null, null); if (specPrevRun != null) { previousRunId.setValue(specPrevRun.getId()); previousRank.setValue( rankDB.get(specPrevRun.getId(), target.getGroupId(), target.getId(), search.getId())); GoogleBest specificBest = rankDB.getBest(target.getGroupId(), target.getId(), search.getId()); if (specificBest != null) { searchBest = specificBest; } } final GoogleBest best = searchBest; serpDB.stream(specificRunId, specificRunId, search.getId(), (GoogleSerp res) -> { int rank = GoogleRank.UNRANKED; String rankedUrl = null; for (int i = 0; i < res.getEntries().size(); i++) { if (target.match(res.getEntries().get(i).getUrl())) { rankedUrl = res.getEntries().get(i).getUrl(); rank = i + 1; break; } } // only update last run GoogleRank gRank = new GoogleRank(res.getRunId(), target.getGroupId(), target.getId(), search.getId(), rank, previousRank.shortValue(), rankedUrl); rankDB.insert(gRank); if (updateSummary) { GoogleTargetSummary summary = summaryByRunId.get(res.getRunId()); if (summary == null) { summaryByRunId.put(res.getRunId(), summary = new GoogleTargetSummary(target.getGroupId(), target.getId(), res.getRunId(), 0)); } summary.addRankCandidat(gRank); } if (rank != GoogleRank.UNRANKED && rank <= best.getRank()) { best.setRank((short) rank); best.setUrl(rankedUrl); best.setRunDay(res.getRunDay()); } previousRunId.setValue(res.getRunId()); previousRank.setValue(rank); }); if (best.getRank() != GoogleRank.UNRANKED) { rankDB.insertBest(best); } } // fill previous summary score if (updateSummary) { TreeMap<Integer, GoogleTargetSummary> summaries = new TreeMap<>(summaryByRunId); GoogleTargetSummary previousSummary = null; for (Map.Entry<Integer, GoogleTargetSummary> entry : summaries.entrySet()) { if (previousSummary != null) { entry.getValue().setPreviousScoreBP(previousSummary.getScoreBP()); } previousSummary = entry.getValue(); } if (specPrevRun != null) { summaries.remove(specPrevRun.getId()); } if (!summaries.isEmpty()) { targetSummaryDB.insert(summaries.values()); } } } LOG.debug("SERP rescan : done, duration = {}", DurationFormatUtils.formatDurationHMS(System.currentTimeMillis() - _start)); }
From source file:ai.susi.mind.SusiMind.java
/** * This is the core principle of creativity: being able to match a given input * with problem-solving knowledge.//from w w w . ja v a 2s .c o m * This method finds ideas (with a query instantiated skills) for a given query. * The skills are selected using a scoring system and pattern matching with the query. * Not only the most recent user query is considered for skill selection but also * previously requested queries and their answers to be able to set new skill selections * in the context of the previous conversation. * @param query the user input * @param previous_argument the latest conversation with the same user * @param maxcount the maximum number of ideas to return * @return an ordered list of ideas, first idea should be considered first. */ public List<SusiIdea> creativity(String query, SusiThought latest_thought, int maxcount) { // tokenize query to have hint for idea collection final List<SusiIdea> ideas = new ArrayList<>(); this.reader.tokenizeSentence(query).forEach(token -> { Set<SusiSkill> skill_for_category = this.skilltrigger.get(token.categorized); Set<SusiSkill> skill_for_original = token.original.equals(token.categorized) ? null : this.skilltrigger.get(token.original); Set<SusiSkill> r = new HashSet<>(); if (skill_for_category != null) r.addAll(skill_for_category); if (skill_for_original != null) r.addAll(skill_for_original); r.forEach(skill -> ideas.add(new SusiIdea(skill).setIntent(token))); }); for (SusiIdea idea : ideas) DAO.log("idea.phrase-1: score=" + idea.getSkill().getScore().score + " : " + idea.getSkill().getPhrases().toString() + " " + idea.getSkill().getActionsClone()); // add catchall skills always (those are the 'bad ideas') Collection<SusiSkill> ca = this.skilltrigger.get(SusiSkill.CATCHALL_KEY); if (ca != null) ca.forEach(skill -> ideas.add(new SusiIdea(skill))); // create list of all ideas that might apply TreeMap<Long, List<SusiIdea>> scored = new TreeMap<>(); AtomicLong count = new AtomicLong(0); ideas.forEach(idea -> { int score = idea.getSkill().getScore().score; long orderkey = Long.MAX_VALUE - ((long) score) * 1000L + count.incrementAndGet(); List<SusiIdea> r = scored.get(orderkey); if (r == null) { r = new ArrayList<>(); scored.put(orderkey, r); } r.add(idea); }); // make a sorted list of all ideas ideas.clear(); scored.values().forEach(r -> ideas.addAll(r)); for (SusiIdea idea : ideas) DAO.log("idea.phrase-2: score=" + idea.getSkill().getScore().score + " : " + idea.getSkill().getPhrases().toString() + " " + idea.getSkill().getActionsClone()); // test ideas and collect those which match up to maxcount List<SusiIdea> plausibleIdeas = new ArrayList<>(Math.min(10, maxcount)); for (SusiIdea idea : ideas) { SusiSkill skill = idea.getSkill(); Collection<Matcher> m = skill.matcher(query); if (m.isEmpty()) continue; // TODO: evaluate leading SEE flow commands right here as well plausibleIdeas.add(idea); if (plausibleIdeas.size() >= maxcount) break; } for (SusiIdea idea : plausibleIdeas) { DAO.log("idea.phrase-3: score=" + idea.getSkill().getScore().score + " : " + idea.getSkill().getPhrases().toString() + " " + idea.getSkill().getActionsClone()); DAO.log("idea.phrase-3: log=" + idea.getSkill().getScore().log); } return plausibleIdeas; }
From source file:com.google.gwt.benchmarks.viewer.server.ReportImageServer.java
private JFreeChart createChart(String testName, Result result, String title, List<Result> comparativeResults) { // Find the maximum values across both axes for all of the results // (this chart's own results, plus all comparative results). ////from ww w .j a v a 2 s . co m // This is a stop-gap solution that helps us compare different charts for // the same benchmark method (usually with different user agents) until we // get real comparative functionality in version two. double maxTime = 0; for (Result r : comparativeResults) { for (Trial t : r.getTrials()) { maxTime = Math.max(maxTime, t.getRunTimeMillis()); } } // Determine the number of variables in this benchmark method List<Trial> trials = result.getTrials(); Trial firstTrial = new Trial(); int numVariables = 0; if (trials.size() > 0) { firstTrial = trials.get(0); numVariables = firstTrial.getVariables().size(); } // Display the trial data. // // First, pick the domain and series variables for our graph. // Right now we only handle up to two "user" variables. // We set the domain variable to the be the one containing the most unique // values. // This might be easier if the results had meta information telling us // how many total variables there are, what types they are of, etc.... String domainVariable = null; String seriesVariable = null; Map<String, Set<String>> variableValues = null; if (numVariables == 1) { domainVariable = firstTrial.getVariables().keySet().iterator().next(); } else { // TODO(tobyr): Do something smarter, like allow the user to specify which // variables are domain and series, along with the variables which are // held constant. variableValues = new HashMap<String, Set<String>>(); for (int i = 0; i < trials.size(); ++i) { Trial trial = trials.get(i); Map<String, String> variables = trial.getVariables(); for (Map.Entry<String, String> entry : variables.entrySet()) { String variable = entry.getKey(); Set<String> set = variableValues.get(variable); if (set == null) { set = new TreeSet<String>(); variableValues.put(variable, set); } set.add(entry.getValue()); } } TreeMap<Integer, List<String>> numValuesMap = new TreeMap<Integer, List<String>>(); for (Map.Entry<String, Set<String>> entry : variableValues.entrySet()) { Integer numValues = new Integer(entry.getValue().size()); List<String> variables = numValuesMap.get(numValues); if (variables == null) { variables = new ArrayList<String>(); numValuesMap.put(numValues, variables); } variables.add(entry.getKey()); } if (numValuesMap.values().size() > 0) { domainVariable = numValuesMap.get(numValuesMap.lastKey()).get(0); seriesVariable = numValuesMap.get(numValuesMap.firstKey()).get(0); } } String valueTitle = "time (ms)"; // This axis is time across all charts. if (numVariables == 0) { // Show a bar graph, with a single centered simple bar // 0 variables means there is only 1 trial DefaultCategoryDataset data = new DefaultCategoryDataset(); data.addValue(firstTrial.getRunTimeMillis(), "result", "result"); JFreeChart chart = ChartFactory.createBarChart(title, testName, valueTitle, data, PlotOrientation.VERTICAL, false, false, false); CategoryPlot p = chart.getCategoryPlot(); ValueAxis axis = p.getRangeAxis(); axis.setUpperBound(maxTime + maxTime * 0.1); return chart; } else if (numVariables == 1) { // Show a line graph with only 1 series // Or.... choose between a line graph and a bar graph depending upon // whether the type of the domain is numeric. XYSeriesCollection data = new XYSeriesCollection(); XYSeries series = new XYSeries(domainVariable); for (Trial trial : trials) { double time = trial.getRunTimeMillis(); String domainValue = trial.getVariables().get(domainVariable); series.add(Double.parseDouble(domainValue), time); } data.addSeries(series); JFreeChart chart = ChartFactory.createXYLineChart(title, domainVariable, valueTitle, data, PlotOrientation.VERTICAL, false, false, false); XYPlot plot = chart.getXYPlot(); plot.getRangeAxis().setUpperBound(maxTime + maxTime * 0.1); double maxDomainValue = getMaxValue(comparativeResults, domainVariable); plot.getDomainAxis().setUpperBound(maxDomainValue + maxDomainValue * 0.1); return chart; } else if (numVariables == 2) { // Show a line graph with two series XYSeriesCollection data = new XYSeriesCollection(); Set<String> seriesValues = variableValues.get(seriesVariable); for (String seriesValue : seriesValues) { XYSeries series = new XYSeries(seriesValue); for (Trial trial : trials) { Map<String, String> variables = trial.getVariables(); if (variables.get(seriesVariable).equals(seriesValue)) { double time = trial.getRunTimeMillis(); String domainValue = trial.getVariables().get(domainVariable); series.add(Double.parseDouble(domainValue), time); } } data.addSeries(series); } // TODO(tobyr) - Handle graphs above 2 variables JFreeChart chart = ChartFactory.createXYLineChart(title, domainVariable, valueTitle, data, PlotOrientation.VERTICAL, true, true, false); XYPlot plot = chart.getXYPlot(); plot.getRangeAxis().setUpperBound(maxTime + maxTime * 0.1); double maxDomainValue = getMaxValue(comparativeResults, domainVariable); plot.getDomainAxis().setUpperBound(maxDomainValue + maxDomainValue * 0.1); return chart; } throw new RuntimeException("The ReportImageServer is not yet able to " + "create charts for benchmarks with more than two variables."); // Sample JFreeChart code for creating certain charts: // Leaving this around until we can handle multivariate charts in dimensions // greater than two. // Code for creating a category data set - probably better with a bar chart // instead of line chart /* * DefaultCategoryDataset data = new DefaultCategoryDataset(); String series * = domainVariable; * * for ( Iterator it = trials.iterator(); it.hasNext(); ) { Trial trial = * (Trial) it.next(); double time = trial.getRunTimeMillis(); String * domainValue = (String) trial.getVariables().get( domainVariable ); * data.addValue( time, series, domainValue ); } * * String title = ""; String categoryTitle = domainVariable; PlotOrientation * orientation = PlotOrientation.VERTICAL; * * chart = ChartFactory.createLineChart( title, categoryTitle, valueTitle, * data, orientation, true, true, false ); */ /* * DefaultCategoryDataset data = new DefaultCategoryDataset(); String * series1 = "firefox"; String series2 = "ie"; * * data.addValue( 1.0, series1, "1024"); data.addValue( 2.0, series1, * "2048"); data.addValue( 4.0, series1, "4096"); data.addValue( 8.0, * series1, "8192"); * * data.addValue( 2.0, series2, "1024"); data.addValue( 4.0, series2, * "2048"); data.addValue( 8.0, series2, "4096"); data.addValue( 16.0, * series2,"8192"); * * String title = ""; String categoryTitle = "size"; PlotOrientation * orientation = PlotOrientation.VERTICAL; * * chart = ChartFactory.createLineChart( title, categoryTitle, valueTitle, * data, orientation, true, true, false ); */ }
From source file:org.apache.hadoop.hbase.regionserver.TestSplit.java
private void basicSplit(final HRegion region) throws Exception { LOG.info("" + addContent(region, COLFAMILY_NAME3)); region.flushcache();/*from www.ja v a 2 s. c o m*/ byte[] splitRow = region.compactStores(); assertNotNull(splitRow); LOG.info("SplitRow: " + Bytes.toString(splitRow)); HRegion[] regions = split(region, splitRow); try { // Need to open the regions. // TODO: Add an 'open' to HRegion... don't do open by constructing // instance. for (int i = 0; i < regions.length; i++) { regions[i] = openClosedRegion(regions[i]); } // Assert can get rows out of new regions. Should be able to get first // row from first region and the midkey from second region. assertGet(regions[0], COLFAMILY_NAME3, Bytes.toBytes(START_KEY)); assertGet(regions[1], COLFAMILY_NAME3, splitRow); // Test I can get scanner and that it starts at right place. assertScan(regions[0], COLFAMILY_NAME3, Bytes.toBytes(START_KEY)); assertScan(regions[1], COLFAMILY_NAME3, splitRow); // Now prove can't split regions that have references. for (int i = 0; i < regions.length; i++) { // Add so much data to this region, we create a store file that is > // than one of our unsplitable references. it will. for (int j = 0; j < 2; j++) { addContent(regions[i], COLFAMILY_NAME3); } addContent(regions[i], COLFAMILY_NAME2); addContent(regions[i], COLFAMILY_NAME1); regions[i].flushcache(); } byte[][] midkeys = new byte[regions.length][]; // To make regions splitable force compaction. for (int i = 0; i < regions.length; i++) { midkeys[i] = regions[i].compactStores(); } TreeMap<String, HRegion> sortedMap = new TreeMap<String, HRegion>(); // Split these two daughter regions so then I'll have 4 regions. Will // split because added data above. for (int i = 0; i < regions.length; i++) { HRegion[] rs = null; if (midkeys[i] != null) { rs = split(regions[i], midkeys[i]); for (int j = 0; j < rs.length; j++) { sortedMap.put(Bytes.toString(rs[j].getRegionName()), openClosedRegion(rs[j])); } } } LOG.info("Made 4 regions"); // The splits should have been even. Test I can get some arbitrary row out // of each. int interval = (LAST_CHAR - FIRST_CHAR) / 3; byte[] b = Bytes.toBytes(START_KEY); for (HRegion r : sortedMap.values()) { assertGet(r, COLFAMILY_NAME3, b); b[0] += interval; } } finally { for (int i = 0; i < regions.length; i++) { try { regions[i].close(); } catch (IOException e) { // Ignore. } } } }
From source file:org.lockss.config.TdbTitle.java
/** Print a full description of the title and all its AUs */ public void prettyPrint(PrintStream ps, int indent) { ps.println(StringUtil.tab(indent) + "Title: " + name); TreeMap<String, TdbAu> sorted = new TreeMap<String, TdbAu>(CatalogueOrderComparator.SINGLETON); for (TdbAu au : getTdbAus()) { sorted.put(au.getName(), au);//from w w w . jav a 2 s. c om } for (TdbAu au : sorted.values()) { au.prettyPrint(ps, indent + 2); } }
From source file:com.espertech.esper.epl.core.OrderByProcessorImpl.java
public EventBean[] sort(EventBean[] outgoingEvents, Object[] orderKeys, ExprEvaluatorContext exprEvaluatorContext) { TreeMap<Object, Object> sort = new TreeMap<Object, Object>(factory.getComparator()); if (outgoingEvents == null || outgoingEvents.length < 2) { return outgoingEvents; }/*from ww w . j a va2 s. co m*/ for (int i = 0; i < outgoingEvents.length; i++) { Object entry = sort.get(orderKeys[i]); if (entry == null) { sort.put(orderKeys[i], outgoingEvents[i]); } else if (entry instanceof EventBean) { List<EventBean> list = new ArrayList<EventBean>(); list.add((EventBean) entry); list.add(outgoingEvents[i]); sort.put(orderKeys[i], list); } else { List<EventBean> list = (List<EventBean>) entry; list.add(outgoingEvents[i]); } } EventBean[] result = new EventBean[outgoingEvents.length]; int count = 0; for (Object entry : sort.values()) { if (entry instanceof List) { List<EventBean> output = (List<EventBean>) entry; for (EventBean theEvent : output) { result[count++] = theEvent; } } else { result[count++] = (EventBean) entry; } } return result; }
From source file:com.ichi2.libanki.Finder.java
private List<Long> dids(Long did) { if (did == null) { return null; }/*w w w.ja v a 2 s . c o m*/ TreeMap<String, Long> children = mCol.getDecks().children(did); List<Long> res = new ArrayList<Long>(); res.add(did); res.addAll(children.values()); return res; }
From source file:au.edu.ausstage.networks.LookupManager.java
/** * A method to lookup the key collaborators for a contributor * * @param id the unique id of the contributor * @param formatType the required format of the data * @param sortType the required way in which the data is to be sorted * * @return the results of the lookup *///from w ww .j a v a 2 s . c o m public String getKeyCollaborators(String id, String formatType, String sortType) { // check on the parameters if (InputUtils.isValidInt(id) == false || InputUtils.isValid(formatType) == false || InputUtils.isValid(sortType) == false) { throw new IllegalArgumentException("All parameters to this method are required"); } // define a Tree Set to store the results java.util.LinkedList<Collaborator> collaborators = new java.util.LinkedList<Collaborator>(); // define other helper variables QuerySolution row = null; Collaborator collaborator = null; // define the base sparql query String sparqlQuery = "PREFIX foaf: <" + FOAF.NS + ">" + "PREFIX ausestage: <" + AuseStage.NS + "> " + "SELECT ?collaborator ?collabGivenName ?collabFamilyName ?function ?firstDate ?lastDate ?collabCount " + "WHERE { " + " @ a foaf:Person ; " + " ausestage:hasCollaboration ?collaboration. " + " ?collaboration ausestage:collaborator ?collaborator; " + " ausestage:collaborationFirstDate ?firstDate; " + " ausestage:collaborationLastDate ?lastDate; " + " ausestage:collaborationCount ?collabCount. " + " ?collaborator foaf:givenName ?collabGivenName; " + " foaf:familyName ?collabFamilyName; " + " ausestage:function ?function. " + " FILTER (?collaborator != @) " + "}"; // do we need to sort by name? if (sortType.equals("count") == true) { sparqlQuery += " ORDER BY DESC(?collabCount)"; } else if (sortType.equals("name") == true) { sparqlQuery += " ORDER BY ?collabFamilyName ?collabGivenName"; } // build a URI from the id id = AusStageURI.getContributorURI(id); // add the contributor URI to the query sparqlQuery = sparqlQuery.replaceAll("@", "<" + id + ">"); // execute the query ResultSet results = rdf.executeSparqlQuery(sparqlQuery); // build the dataset // use a numeric sort order while (results.hasNext()) { // loop though the resulset // get a new row of data row = results.nextSolution(); // instantiate a collaborator object collaborator = new Collaborator(AusStageURI.getId(row.get("collaborator").toString())); // check to see if the list contains this collaborator if (collaborators.indexOf(collaborator) != -1) { // collaborator is already in the list collaborator = collaborators.get(collaborators.indexOf(collaborator)); // update the function collaborator.setFunction(row.get("function").toString()); } else { // collaborator is not on the list // get the name collaborator.setGivenName(row.get("collabGivenName").toString()); collaborator.setFamilyName(row.get("collabFamilyName").toString(), true); // get the dates collaborator.setFirstDate(row.get("firstDate").toString()); collaborator.setLastDate(row.get("lastDate").toString()); // get the collaboration count collaborator.setCollaborations(Integer.toString(row.get("collabCount").asLiteral().getInt())); // add the url collaborator.setUrl(AusStageURI.getURL(row.get("collaborator").toString())); // add the function collaborator.setFunction(row.get("function").toString()); collaborators.add(collaborator); } } // play nice and tidy up rdf.tidyUp(); // sort by the id if (sortType.equals("id") == true) { TreeMap<Integer, Collaborator> collaboratorsToSort = new TreeMap<Integer, Collaborator>(); for (int i = 0; i < collaborators.size(); i++) { collaborator = collaborator = collaborators.get(i); collaboratorsToSort.put(Integer.parseInt(collaborator.getId()), collaborator); } // empty the list collaborators.clear(); // add the collaborators back to the list Collection values = collaboratorsToSort.values(); Iterator iterator = values.iterator(); while (iterator.hasNext()) { // get the collaborator collaborator = (Collaborator) iterator.next(); collaborators.add(collaborator); } collaboratorsToSort = null; } // define a variable to store the data String dataString = null; if (formatType.equals("html") == true) { dataString = createHTMLOutput(collaborators); } else if (formatType.equals("xml") == true) { dataString = createXMLOutput(collaborators); } else if (formatType.equals("json") == true) { dataString = createJSONOutput(collaborators); } // return the data return dataString; }