List of usage examples for java.util SortedSet iterator
Iterator<E> iterator();
From source file:com.wakacommerce.common.cache.StatisticsServiceImpl.java
@Override public MBeanInfo getMBeanInfo() { SortedSet<String> names = new TreeSet<String>(); for (Map.Entry<String, CacheStat> stats : cacheStats.entrySet()) { names.add(stats.getKey());/* w w w .j a v a 2 s. c o m*/ } MBeanAttributeInfo[] attrs = new MBeanAttributeInfo[names.size()]; Iterator<String> it = names.iterator(); for (int i = 0; i < attrs.length; i++) { String name = it.next(); attrs[i] = new MBeanAttributeInfo(name, "java.lang.Double", name, true, // isReadable false, // isWritable false); // isIs } attrs = ArrayUtils.add(attrs, new MBeanAttributeInfo("LOG_RESOLUTION", "java.lang.Double", "LOG_RESOLUTION", true, // isReadable true, // isWritable false) // isIs ); MBeanOperationInfo[] opers = { new MBeanOperationInfo("activate", "Activate statistic logging", null, // no parameters "void", MBeanOperationInfo.ACTION), new MBeanOperationInfo("disable", "Disable statistic logging", null, // no parameters "void", MBeanOperationInfo.ACTION) }; return new MBeanInfo("com.wakacommerce:name=StatisticsService." + appName, "Runtime Statistics", attrs, null, // constructors opers, null); // notifications }
From source file:org.opencastproject.archive.opencast.solr.SolrIndexManager.java
/** * Generates a string with the most important kewords from the text annotation. * /* w ww . j av a 2s. c o m*/ * @param sortedAnnotations * @return The keyword string. */ static StringBuffer importantKeywordsString(SortedSet<TextAnnotation> sortedAnnotations) { // important keyword: // - high relevance // - high confidence // - occur often // - more than MAX_CHAR chars // calculate keyword occurences (histogram) and importance ArrayList<String> list = new ArrayList<String>(); Iterator<TextAnnotation> textAnnotations = sortedAnnotations.iterator(); TextAnnotation textAnnotation = null; String keyword = null; HashMap<String, Integer> histogram = new HashMap<String, Integer>(); HashMap<String, Double> importance = new HashMap<String, Double>(); int occ = 0; double imp; while (textAnnotations.hasNext()) { textAnnotation = textAnnotations.next(); Iterator<KeywordAnnotation> keywordAnnotations = textAnnotation.keywordAnnotations(); while (keywordAnnotations.hasNext()) { KeywordAnnotation annotation = keywordAnnotations.next(); keyword = annotation.getKeyword().toLowerCase(); if (keyword.length() > MAX_CHAR) { occ = 0; if (histogram.keySet().contains(keyword)) { occ = histogram.get(keyword); } histogram.put(keyword, occ + 1); // here the importance value is calculated // from relevance, confidence and frequency of occurence. imp = (RELEVANCE_BOOST * getMaxRelevance(keyword, sortedAnnotations) + getMaxConfidence(keyword, sortedAnnotations)) * (occ + 1); importance.put(keyword, imp); } } } // get the MAX_IMPORTANT_COUNT most important keywords StringBuffer buf = new StringBuffer(); while (list.size() < MAX_IMPORTANT_COUNT && importance.size() > 0) { double max = 0.0; String maxKeyword = null; // get maximum from importance list for (Entry<String, Double> entry : importance.entrySet()) { keyword = entry.getKey(); if (max < entry.getValue()) { max = entry.getValue(); maxKeyword = keyword; } } // pop maximum importance.remove(maxKeyword); // append keyword to string if (buf.length() > 0) buf.append(" "); buf.append(maxKeyword); } return buf; }
From source file:org.apache.hadoop.hive.ql.exec.ACLTask.java
private int showGroup(Hive db, showGroupsDesc sGD) throws HiveException { List<String> groups = null; if (sGD.getPattern() != null) { LOG.info("pattern: " + sGD.getPattern()); groups = db.getGroups(sGD.getPattern()); LOG.info("results : " + groups.size()); } else//www. j a va 2 s . c o m groups = db.getGroups(".*"); try { FileSystem fs = sGD.getResFile().getFileSystem(conf); DataOutput outStream = (DataOutput) fs.create(sGD.getResFile()); SortedSet<String> sortedTbls = new TreeSet<String>(groups); Iterator<String> iterTbls = sortedTbls.iterator(); while (iterTbls.hasNext()) { outStream.writeBytes(iterTbls.next()); outStream.write(terminator); } ((FSDataOutputStream) outStream).close(); } catch (FileNotFoundException e) { LOG.warn("show groups: " + StringUtils.stringifyException(e)); if (SessionState.get() != null) SessionState.get().ssLog("show groups: " + StringUtils.stringifyException(e)); return 1; } catch (IOException e) { LOG.warn("show groups: " + StringUtils.stringifyException(e)); if (SessionState.get() != null) SessionState.get().ssLog("show groups: " + StringUtils.stringifyException(e)); return 1; } catch (Exception e) { throw new HiveException(e.toString()); } return 0; }
From source file:org.apache.hadoop.hive.ql.exec.ACLTask.java
private int showRoles(Hive db, showRolesDesc showRolesD) throws HiveException { List<String> roles; if (showRolesD.getUser() == null) roles = db.showRoles(showRolesD.getWho()); else//from w w w. j av a 2 s . c om return 0; try { FileSystem fs = showRolesD.getTmpFile().getFileSystem(conf); DataOutput outStream = (DataOutput) fs.create(showRolesD.getTmpFile()); LOG.info("show roles tmp file:" + showRolesD.getTmpFile().toString()); SortedSet<String> sortedRoles = new TreeSet<String>(roles); Iterator<String> iterRoles = sortedRoles.iterator(); outStream.writeBytes("ALL roles in TDW:"); outStream.write(terminator); while (iterRoles.hasNext()) { outStream.writeBytes(iterRoles.next()); outStream.write(terminator); } ((FSDataOutputStream) outStream).close(); } catch (FileNotFoundException e) { LOG.warn("show roles: " + StringUtils.stringifyException(e)); return 1; } catch (IOException e) { LOG.warn("show roles: " + StringUtils.stringifyException(e)); return 1; } catch (Exception e) { throw new HiveException(e.toString()); } LOG.info("show roles OK"); return 0; }
From source file:edu.utah.further.core.api.collections.SortedSetComparator.java
/** * Compare two {@link Double}s. If {@link #sortingOrder} is * {@link SortingOrder#ASCENDING}, this is their natural ordering. If * {@link #sortingOrder} is {@link SortingOrder#DESCENDING}, the order is reversed. * //from w w w . ja v a 2s .c om * @param o1 * left operand * @param o2 * right operand * @return result of comparison of <code>o1</code> and <code>o2</code> * @see java.util.Comparator#compare(java.lang.Object, java.lang.Object) */ @Override public int compare(final SortedSet<E> o1, final SortedSet<E> o2) { ValidationUtil.validateIsTrue(o1.comparator() == null, "First set must have natural ordering"); ValidationUtil.validateIsTrue(o2.comparator() == null, "Second set must have natural ordering"); final CompareToBuilder builder = new CompareToBuilder(); // Compare the first corresponding min(o1.size(),o2.size()) element pairs final Iterator<E> iterator2 = o2.iterator(); for (final E element1 : o1) { if (!iterator2.hasNext()) { // o2.size() < o1.size() break; } // Pair exists, add to comparison builder.append(element1, iterator2.next()); } // If we're still tied, compare by set sizes return builder.append(o1.size(), o2.size()).toComparison(); }
From source file:org.cloudata.core.commitlog.pipe.BufferPool.java
int clearExpiredEntries(int msec) { PoolEntry e = new PoolEntry((System.currentTimeMillis() + 10) - msec); int sizeOfDeallocated = 0; synchronized (bufferMap) { Iterator<TreeSet<PoolEntry>> iter = bufferMap.values().iterator(); while (iter.hasNext()) { TreeSet<PoolEntry> entrySet = iter.next(); SortedSet<PoolEntry> expiredSet = entrySet.headSet(e); if (expiredSet.isEmpty() == false) { LOG.debug(expiredSet.size() + " pool entries are removed"); Iterator<PoolEntry> expiredIter = expiredSet.iterator(); while (expiredIter.hasNext()) { PoolEntry expiredEntry = expiredIter.next(); poolMonitor.deallocated(expiredEntry.buffer.capacity()); sizeOfDeallocated += expiredEntry.buffer.capacity(); }/* ww w. ja v a2 s . c o m*/ expiredSet.clear(); if (entrySet.isEmpty()) { LOG.debug("entry set is removed"); iter.remove(); } } } } return sizeOfDeallocated; }
From source file:nz.co.senanque.workflowui.LaunchWizard.java
public int setup() { SortedSet<ProcessDefinitionHolder> options = getVisibleProcesses(); final Container c = new IndexedContainer(); if (options != null) { for (final Iterator<?> i = options.iterator(); i.hasNext();) { c.addItem(i.next());/*from ww w. ja v a2 s.c o m*/ } } select.setContainerDataSource(c); select.setRows(Math.min(10, options.size() + 2)); return options.size(); }
From source file:com.yahoo.pulsar.common.naming.NamespaceBundlesTest.java
@Test public void testFindBundle() throws Exception { SortedSet<Long> partitions = Sets.newTreeSet(); partitions.add(0l);//from ww w . j a v a 2 s . co m partitions.add(0x40000000l); partitions.add(0xa0000000l); partitions.add(0xb0000000l); partitions.add(0xc0000000l); partitions.add(0xffffffffl); NamespaceBundles bundles = new NamespaceBundles(new NamespaceName("pulsar/global/ns1"), partitions, factory); DestinationName dn = DestinationName.get("persistent://pulsar/global/ns1/topic-1"); NamespaceBundle bundle = bundles.findBundle(dn); assertTrue(bundle.includes(dn)); dn = DestinationName.get("persistent://pulsar/use/ns2/topic-2"); try { bundles.findBundle(dn); fail("Should have failed due to mismatched namespace name"); } catch (IllegalArgumentException iae) { // OK, expected } Long hashKey = factory.getLongHashCode(dn.toString()); // The following code guarantees that we have at least two ranges after the hashKey till the end SortedSet<Long> tailSet = partitions.tailSet(hashKey); tailSet.add(hashKey); // Now, remove the first range to ensure the hashKey is not included in <code>newPar</code> Iterator<Long> iter = tailSet.iterator(); iter.next(); SortedSet<Long> newPar = tailSet.tailSet(iter.next()); try { bundles = new NamespaceBundles(dn.getNamespaceObject(), newPar, factory); bundles.findBundle(dn); fail("Should have failed due to out-of-range"); } catch (ArrayIndexOutOfBoundsException iae) { // OK, expected } }
From source file:nl.nn.adapterframework.configuration.Configuration.java
public Iterator<IAdapter> getRegisteredAdapterNames() { // Why is the set copied? SortedSet<IAdapter> sortedKeys = new TreeSet(adapterTable.keySet()); return sortedKeys.iterator(); }
From source file:uk.co.danielrendall.imagetiler.strategies.CircleStrategy.java
public void doAfterInitialise() { Point center = new Point(((double) xMin) + ((double) width / 2.0d), ((double) yMin) + ((double) height / 2.0d)); SortedSet<Pixel> pixels = new TreeSet<Pixel>(getRadiusComparator(center)); GridStrategy strategy = new GridStrategy(); strategy.initialise(this); while (strategy.hasNext()) { Pixel next = strategy.next();/* w ww . ja v a2 s.com*/ if (filter.shouldInclude(next)) pixels.add(next); } log.info("Max pixels " + width * height + " pixels, I have " + pixels.size()); pixelIterator = pixels.iterator(); }