Example usage for java.util TreeSet descendingSet

List of usage examples for java.util TreeSet descendingSet

Introduction

In this page you can find the example usage for java.util TreeSet descendingSet.

Prototype

public NavigableSet<E> descendingSet() 

Source Link

Usage

From source file:com.b2international.index.SortIndexTest.java

@Test
public void sortShortField() throws Exception {
    final PrimitiveIterator.OfInt intIterator = new Random().ints().iterator();
    final TreeSet<Short> orderedItems = newTreeSet();
    final Map<String, Data> documents = newHashMap();

    for (short i = 0; i < NUM_DOCS; i++) {
        short item = 0;
        while (item == 0 || orderedItems.contains(item)) {
            item = (short) intIterator.nextInt();
        }/*from   w w w.j  av a 2 s .co  m*/
        orderedItems.add(item);

        final Data data = new Data();
        data.setShortField(item);
        documents.put(Integer.toString(i), data);
    }

    indexDocuments(documents);

    final Query<Data> ascendingQuery = Query.select(Data.class).where(Expressions.matchAll()).limit(NUM_DOCS)
            .sortBy(SortBy.field("shortField", Order.ASC)).build();

    checkDocumentOrder(ascendingQuery, data -> data.getShortField(), orderedItems, Short.class);

    final Query<Data> descendingQuery = Query.select(Data.class).where(Expressions.matchAll()).limit(NUM_DOCS)
            .sortBy(SortBy.field("shortField", Order.DESC)).build();

    checkDocumentOrder(descendingQuery, data -> data.getShortField(), orderedItems.descendingSet(),
            Short.class);
}

From source file:com.b2international.index.SortIndexTest.java

@Test
public void sortFloatField() throws Exception {
    final PrimitiveIterator.OfDouble doubleIterator = new Random().doubles().iterator();
    final TreeSet<Float> orderedItems = newTreeSet();
    final Map<String, Data> documents = newHashMap();

    for (int i = 0; i < NUM_DOCS; i++) {
        float item = 0.0f;
        while (item == 0.0f || orderedItems.contains(item)) {
            item = (float) doubleIterator.nextDouble();
        }/*  www .  ja v a  2s  .  c om*/
        orderedItems.add(item);

        final Data data = new Data();
        data.setFloatField(item);
        documents.put(Integer.toString(i), data);
    }

    indexDocuments(documents);

    final Query<Data> ascendingQuery = Query.select(Data.class).where(Expressions.matchAll()).limit(NUM_DOCS)
            .sortBy(SortBy.field("floatField", Order.ASC)).build();

    checkDocumentOrder(ascendingQuery, data -> data.getFloatField(), orderedItems, Float.class);

    final Query<Data> descendingQuery = Query.select(Data.class).where(Expressions.matchAll()).limit(NUM_DOCS)
            .sortBy(SortBy.field("floatField", Order.DESC)).build();

    checkDocumentOrder(descendingQuery, data -> data.getFloatField(), orderedItems.descendingSet(),
            Float.class);
}

From source file:io.hops.hopsworks.common.security.CertificateMaterializer.java

private void releaseWriteLocks(TreeSet<ReentrantReadWriteLock> acquiredLocks) {
    Set<ReentrantReadWriteLock> reversedLocks = acquiredLocks.descendingSet();
    reversedLocks.stream().forEach(l -> l.writeLock().unlock());
}

From source file:com.b2international.index.SortIndexTest.java

@Test
public void sortAnalyzedField() throws Exception {
    final TreeSet<String> orderedItems = newTreeSet();
    final Map<String, Data> documents = newHashMap();

    for (int i = 0; i < NUM_DOCS; i++) {
        String item = null;//from ww w .  j a  v  a 2 s  .c om
        while (item == null || orderedItems.contains(item)) {
            item = RandomStringUtils.randomAlphabetic(10);
        }
        orderedItems.add(item);

        final Data data = new Data();
        data.setAnalyzedField(item);
        documents.put(Integer.toString(i), data);
    }

    indexDocuments(documents);

    final Query<Data> ascendingQuery = Query.select(Data.class).where(Expressions.matchAll()).limit(NUM_DOCS)
            .sortBy(SortBy.field("analyzedField.exact", Order.ASC)).build();

    checkDocumentOrder(ascendingQuery, data -> data.getAnalyzedField(), orderedItems, String.class);

    final Query<Data> descendingQuery = Query.select(Data.class).where(Expressions.matchAll()).limit(NUM_DOCS)
            .sortBy(SortBy.field("analyzedField.exact", Order.DESC)).build();

    checkDocumentOrder(descendingQuery, data -> data.getAnalyzedField(), orderedItems.descendingSet(),
            String.class);
}

From source file:com.b2international.index.SortIndexTest.java

@Test
public void sortBigDecimalField() throws Exception {
    final PrimitiveIterator.OfDouble doubleIterator = new Random().doubles().iterator();
    final TreeSet<BigDecimal> orderedItems = newTreeSet();
    final Map<String, Data> documents = newHashMap();

    for (int i = 0; i < NUM_DOCS; i++) {
        BigDecimal item = null;//from w  w  w .  j  a  va  2  s  .  c  o  m
        while (item == null || orderedItems.contains(item)) {
            item = BigDecimal.valueOf(doubleIterator.nextDouble());
        }
        orderedItems.add(item);

        final Data data = new Data();
        data.setBigDecimalField(item);
        documents.put(Integer.toString(i), data);
    }

    indexDocuments(documents);

    final Query<Data> ascendingQuery = Query.select(Data.class).where(Expressions.matchAll()).limit(NUM_DOCS)
            .sortBy(SortBy.field("bigDecimalField", Order.ASC)).build();

    checkDocumentOrder(ascendingQuery, data -> data.getBigDecimalField(), orderedItems, BigDecimal.class);

    final Query<Data> descendingQuery = Query.select(Data.class).where(Expressions.matchAll()).limit(NUM_DOCS)
            .sortBy(SortBy.field("bigDecimalField", Order.DESC)).build();

    checkDocumentOrder(descendingQuery, data -> data.getBigDecimalField(), orderedItems.descendingSet(),
            BigDecimal.class);
}

From source file:com.turn.ttorrent.client.Client.java

/**
 * Unchoke connected peers.//  w  ww. java2s.  c  om
 *
 * <p>
 * This is one of the "clever" places of the BitTorrent client. Every
 * OPTIMISTIC_UNCHOKING_FREQUENCY seconds, we decide which peers should be
 * unchocked and authorized to grab pieces from us.
 * </p>
 *
 * <p>
 * Reciprocation (tit-for-tat) and upload capping is implemented here by
 * carefully choosing which peers we unchoke, and which peers we choke.
 * </p>
 *
 * <p>
 * The four peers with the best download rate and are interested in us get
 * unchoked. This maximizes our download rate as we'll be able to get data
 * from there four "best" peers quickly, while allowing these peers to
 * download from us and thus reciprocate their generosity.
 * </p>
 *
 * <p>
 * Peers that have a better download rate than these four downloaders but
 * are not interested get unchoked too, we want to be able to download from
 * them to get more data more quickly. If one becomes interested, it takes
 * a downloader's place as one of the four top downloaders (i.e. we choke
 * the downloader with the worst upload rate).
 * </p>
 *
 * @param optimistic Whether to perform an optimistic unchoke as well.
 */
private synchronized void unchokePeers(boolean optimistic) {
    // Build a set of all connected peers, we don't care about peers we're
    // not connected to.
    TreeSet<SharingPeer> bound = new TreeSet<SharingPeer>(this.getPeerRateComparator());
    bound.addAll(this.connected.values());

    if (bound.size() == 0) {
        logger.trace("No connected peers, skipping unchoking.");
        return;
    } else {
        logger.trace("Running unchokePeers() on {} connected peers.", bound.size());
    }

    int downloaders = 0;
    Set<SharingPeer> choked = new HashSet<SharingPeer>();

    // We're interested in the top downloaders first, so use a descending
    // set.
    for (SharingPeer peer : bound.descendingSet()) {
        if (downloaders < Client.MAX_DOWNLOADERS_UNCHOKE) {
            // Unchoke up to MAX_DOWNLOADERS_UNCHOKE interested peers
            if (peer.isChoking()) {
                if (peer.isInterested()) {
                    downloaders++;
                }

                peer.unchoke();
            }
        } else {
            // Choke everybody else
            choked.add(peer);
        }
    }

    // Actually choke all chosen peers (if any), except the eventual
    // optimistic unchoke.
    if (choked.size() > 0) {
        SharingPeer randomPeer = choked.toArray(new SharingPeer[0])[this.random.nextInt(choked.size())];

        for (SharingPeer peer : choked) {
            if (optimistic && peer == randomPeer) {
                logger.debug("Optimistic unchoke of {}.", peer);
                continue;
            }

            peer.choke();
        }
    }
}

From source file:br.bireme.ngrams.NGrams.java

private static Set<String> results2json(final Parameters parameters, final Set<Result> results) {
    assert parameters != null;
    assert results != null;

    String name;// w w w  .j  a v  a  2 s . c  o m
    String doc;
    final StringBuilder builder = new StringBuilder();
    final TreeSet<String> ret = new TreeSet<>();

    for (Result result : results) {
        builder.setLength(0);
        builder.append("{");
        name = parameters.db.name;
        doc = result.doc.get(name).replace('\"', '\'');
        builder.append(" \"").append(name).append("\":\"").append(doc).append("\",");
        name = parameters.id.name;
        doc = result.doc.get(name).replace('\"', '\'');
        builder.append(" \"").append(name).append("\":\"").append(doc).append("\",");
        name = parameters.indexed.name;
        doc = result.doc.get(name).replace('\"', '\'');
        builder.append(" \"").append(name).append("\":\"").append(doc).append("\"");
        for (ExactField exact : parameters.exacts) {
            name = exact.name;
            doc = result.doc.get(name).replace('\"', '\'');
            builder.append(", \"").append(name).append("\":\"").append(doc).append("\"");
        }
        for (ExactField exact : parameters.exacts) {
            name = exact.name;
            doc = result.doc.get(name).replace('\"', '\'');
            builder.append(", \"").append(name).append("\":\"").append(doc).append("\"");
        }
        for (NGramField ngrams : parameters.ngrams) {
            name = ngrams.name;
            doc = result.doc.get(name).replace('\"', '\'');
            builder.append(", \"").append(name).append("\":\"").append(doc).append("\"");
        }
        for (RegExpField regexps : parameters.regexps) {
            name = regexps.name;
            doc = result.doc.get(name).replace('\"', '\'');
            builder.append(", \"").append(name).append("\":\"").append(doc).append("\"");
        }
        for (NoCompareField nocompare : parameters.nocompare) {
            name = nocompare.name;
            doc = result.doc.get(name).replace('\"', '\'');
            builder.append(", \"").append(name).append("\":\"").append(doc).append("\"");
        }
        builder.append(", \"score\":\"").append(result.score).append("\"");
        builder.append(" }");
        ret.add(builder.toString());
    }

    return ret.descendingSet();
}

From source file:org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.FifoCandidatesSelector.java

@Override
public Map<ApplicationAttemptId, Set<RMContainer>> selectCandidates(
        Map<ApplicationAttemptId, Set<RMContainer>> selectedCandidates, Resource clusterResource,
        Resource totalPreemptionAllowed) {
    // Calculate how much resources we need to preempt
    preemptableAmountCalculator.computeIdealAllocation(clusterResource, totalPreemptionAllowed);

    // Previous selectors (with higher priority) could have already
    // selected containers. We need to deduct preemptable resources
    // based on already selected candidates.
    CapacitySchedulerPreemptionUtils.deductPreemptableResourcesBasedSelectedCandidates(preemptionContext,
            selectedCandidates);/*from   w  ww.  j  a  v a 2  s.co  m*/

    List<RMContainer> skippedAMContainerlist = new ArrayList<>();

    // Loop all leaf queues
    for (String queueName : preemptionContext.getLeafQueueNames()) {
        // check if preemption disabled for the queue
        if (preemptionContext.getQueueByPartition(queueName, RMNodeLabelsManager.NO_LABEL).preemptionDisabled) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("skipping from queue=" + queueName + " because it's a non-preemptable queue");
            }
            continue;
        }

        // compute resToObtainByPartition considered inter-queue preemption
        LeafQueue leafQueue = preemptionContext.getQueueByPartition(queueName,
                RMNodeLabelsManager.NO_LABEL).leafQueue;

        Map<String, Resource> resToObtainByPartition = CapacitySchedulerPreemptionUtils
                .getResToObtainByPartitionForLeafQueue(preemptionContext, queueName, clusterResource);

        synchronized (leafQueue) {
            // go through all ignore-partition-exclusivity containers first to make
            // sure such containers will be preemptionCandidates first
            Map<String, TreeSet<RMContainer>> ignorePartitionExclusivityContainers = leafQueue
                    .getIgnoreExclusivityRMContainers();
            for (String partition : resToObtainByPartition.keySet()) {
                if (ignorePartitionExclusivityContainers.containsKey(partition)) {
                    TreeSet<RMContainer> rmContainers = ignorePartitionExclusivityContainers.get(partition);
                    // We will check container from reverse order, so latter submitted
                    // application's containers will be preemptionCandidates first.
                    for (RMContainer c : rmContainers.descendingSet()) {
                        if (CapacitySchedulerPreemptionUtils.isContainerAlreadySelected(c,
                                selectedCandidates)) {
                            // Skip already selected containers
                            continue;
                        }
                        boolean preempted = CapacitySchedulerPreemptionUtils
                                .tryPreemptContainerAndDeductResToObtain(rc, preemptionContext,
                                        resToObtainByPartition, c, clusterResource, selectedCandidates,
                                        totalPreemptionAllowed);
                        if (!preempted) {
                            continue;
                        }
                    }
                }
            }

            // preempt other containers
            Resource skippedAMSize = Resource.newInstance(0, 0);
            Iterator<FiCaSchedulerApp> desc = leafQueue.getOrderingPolicy().getPreemptionIterator();
            while (desc.hasNext()) {
                FiCaSchedulerApp fc = desc.next();
                // When we complete preempt from one partition, we will remove from
                // resToObtainByPartition, so when it becomes empty, we can get no
                // more preemption is needed
                if (resToObtainByPartition.isEmpty()) {
                    break;
                }

                preemptFrom(fc, clusterResource, resToObtainByPartition, skippedAMContainerlist, skippedAMSize,
                        selectedCandidates, totalPreemptionAllowed);
            }

            // Can try preempting AMContainers (still saving atmost
            // maxAMCapacityForThisQueue AMResource's) if more resources are
            // required to be preemptionCandidates from this Queue.
            Resource maxAMCapacityForThisQueue = Resources.multiply(
                    Resources.multiply(clusterResource, leafQueue.getAbsoluteCapacity()),
                    leafQueue.getMaxAMResourcePerQueuePercent());

            preemptAMContainers(clusterResource, selectedCandidates, skippedAMContainerlist,
                    resToObtainByPartition, skippedAMSize, maxAMCapacityForThisQueue, totalPreemptionAllowed);
        }
    }

    return selectedCandidates;
}

From source file:ORG.oclc.os.ipUseThrottleFilter.ipUseThrottleFilter.java

@Override
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain)
        throws IOException, ServletException {
    String longAddr = null, shortAddr, s, transactionKey = null;
    int count;/*w w w .  jav a  2 s.c  o m*/
    boolean ignorable = false;

    synchronized (simultaneousRequestsByShortIPAddr) {
        if (totalSimultaneousRequests >= maxTotalSimultaneousRequests) {
            log.error("This system has exceeded the maxTotalSimultaneousRequests limit of "
                    + maxTotalSimultaneousRequests);
            log.error(simultaneousRequestsByShortIPAddr);
            for (String str : simultaneousRequests)
                log.error(str);
            ((HttpServletResponse) response).setStatus(HttpURLConnection.HTTP_UNAVAILABLE);
            response.setContentType("text/html");
            PrintWriter writer = response.getWriter();
            writer.println("<html><body><h1>Service Temporarily Unavailable</h1>");
            writer.println(
                    "The system is experiencing a severe load and is temporarily unable to accept new requests");
            if (contactInfo != null)
                writer.println("<p>Contact " + contactInfo + " for more information</p>");
            writer.println("</body></html>");
            writer.close();
            return;
        }
        if (addressInHeader != null) {
            @SuppressWarnings("unchecked")
            Enumeration<String> addrs = ((HttpServletRequest) request).getHeaders(addressInHeader);
            while (addrs.hasMoreElements()) {
                longAddr = addrs.nextElement();
                if (longAddr == null) {
                    if (++addressInHeaderErrorCount < 10)
                        log.error("Expected a " + addressInHeader + " header but got null");
                    continue;
                }
                if (longAddr.lastIndexOf('.') >= 0)
                    break;
            }
        }
        if (longAddr == null)
            longAddr = request.getRemoteAddr();
        int i = longAddr.lastIndexOf('.');
        if (i < 0) {
            log.error("bogus IP address: '" + longAddr + "'");
            longAddr = "0.0.0.0";
        }
        shortAddr = longAddr.substring(0, i); // trim off 4th number group
        // that lets us spot requests from clusters
        s = equivalentAddresses.get(shortAddr); // map one short addr to another?
        if (s != null)
            shortAddr = s;
        if (ignorableAddresses.contains(shortAddr)) {
            ignorable = true;
        } else {
            Integer icount = simultaneousRequestsByShortIPAddr.get(shortAddr);
            if (icount != null)
                count = icount;
            else
                count = 0;

            int maxSimultaneousRequests = (maxTotalSimultaneousRequests - totalSimultaneousRequests) / 4;
            if (maxSimultaneousRequests == 0)
                maxSimultaneousRequests = 1;
            if (count >= maxSimultaneousRequests) {
                log.error("IP addr " + shortAddr + ".* has exceeded " + maxSimultaneousRequests
                        + " simultaneous requests!");
                log.error("maxTotalSimultaneousRequests=" + maxTotalSimultaneousRequests);
                log.error("totalSimultaneousRequests=" + totalSimultaneousRequests);
                for (String str : simultaneousRequests)
                    log.error(str);
                //                ((HttpServletResponse)response).setStatus(HttpURLConnection.HTTP_TOO_MANY_REQUESTS); // someday
                ((HttpServletResponse) response).setStatus(429); // too many requests
                response.setContentType("text/html");
                PrintWriter writer = response.getWriter();
                writer.println(
                        "<html><head><title>Too Many Requests</title></head><body><h1>Too Many Requests</h1>");
                writer.println("You have exceeded the maximum simultaneous request value of "
                        + maxSimultaneousRequests);
                writer.println("<p>This message and your IP address have been logged and reported</p>");
                if (contactInfo != null)
                    writer.println("<p>Contact " + contactInfo + " for more information</p>");
                writer.println("</body></html>");
                writer.close();
                return;
            }
            simultaneousRequestsByShortIPAddr.put(shortAddr, count + 1);
            icount = totalRequests.get(shortAddr);
            if (icount != null)
                count = icount;
            else
                count = 0;
            totalRequests.put(shortAddr, count + 1);
            totalSimultaneousRequests++;
            transactionKey = new StringBuilder((new Date(System.currentTimeMillis())).toString()).append('|')
                    .append(shortAddr).append('|').append(((HttpServletRequest) request).getQueryString())
                    .toString();
            simultaneousRequests.add(transactionKey);
        }
    }

    try {
        HttpServletResponseWrapper wrapper = new HttpServletResponseWrapper((HttpServletResponse) response);
        chain.doFilter(request, wrapper);
    } finally {
        if (!ignorable)
            synchronized (simultaneousRequestsByShortIPAddr) {
                totalSimultaneousRequests--;
                simultaneousRequests.remove(transactionKey);
                count = simultaneousRequestsByShortIPAddr.get(shortAddr);
                if (count == 1) // prune them from the table
                    simultaneousRequestsByShortIPAddr.remove(shortAddr);
                else
                    simultaneousRequestsByShortIPAddr.put(shortAddr, count - 1);
            }
    }

    Calendar c = new GregorianCalendar();
    int hour = c.get(Calendar.HOUR_OF_DAY);
    if (hour == 0 && nextReportingHour == 24) { // new day!
        // you could reset your daily limits table here
        nextReportingHour = 0;
    }

    if (hour >= nextReportingHour) { // generate the hourly report
        // you could reset your hourly limits table here
        nextReportingHour = hour + 1;

        if (log.isInfoEnabled()) {
            HashMap<String, Integer> map = new LinkedHashMap<String, Integer>();
            List<String> yourMapKeys = new ArrayList<String>(totalRequests.keySet());
            List<Integer> yourMapValues = new ArrayList<Integer>(totalRequests.values());
            TreeSet<Integer> sortedSet = new TreeSet<Integer>(yourMapValues);
            Integer[] sortedArray = sortedSet.descendingSet().toArray(new Integer[0]);
            int size = sortedArray.length;

            for (int i = 0; i < size; i++)
                map.put(yourMapKeys.get(yourMapValues.indexOf(sortedArray[i])), sortedArray[i]);
            Iterator<String> it = map.keySet().iterator();
            String key;
            StringBuilder sb = new StringBuilder("Top 10 users in the last hour");
            for (int i = 0; i < 10 && it.hasNext(); i++) {
                key = it.next();
                sb.append("\n    ").append(key).append(" : ").append(map.get(key));
            }
            log.info(sb);
        }
        totalRequests.clear();
    }
}

From source file:org.starnub.starnubserver.pluggable.PluggableManager.java

public NavigableSet<UnloadedPluggable> pluggableScan(String directory,
        ConcurrentHashMap<String, Pluggable> loadedPluggables, boolean updating) {
    TreeSet<UnloadedPluggable> unloadedPluggables = getFiles(directory, "jar", "py");
    /* Remove unloaded pluggables that are older then currently loaded*/
    Iterator<UnloadedPluggable> iterator = unloadedPluggables.iterator();
    while (iterator.hasNext()) {
        UnloadedPluggable up = iterator.next();
        String upName = up.getDetails().getNAME().toLowerCase();
        Pluggable p = loadedPluggables.get(upName);
        if (p != null) {
            boolean canUpdate = canUpdate(up, p, updating);
            if (!canUpdate) {
                iterator.remove();//w  w  w  .jav  a 2 s.c  o m
            }
        }
    }
    return unloadedPluggables.descendingSet();
}