Example usage for java.util TreeSet last

List of usage examples for java.util TreeSet last

Introduction

In this page you can find the example usage for java.util TreeSet last.

Prototype

public E last() 

Source Link

Usage

From source file:com.appeligo.search.actions.SearchResults.java

public List<SearchResult> getSearchResults(int startIndex) {

    initializeStatics();/*from   ww w  . j  av a2  s  .  c  o  m*/

    hasMoreResults = false;
    try {
        IndexSearcher searcher = null;

        try {
            searcher = newIndexSearcher();
            IndexReader reader = searcher.getIndexReader();

            Query luceneQuery = generateLuceneQuery(searcher);
            luceneQuery = luceneQuery.rewrite(reader);
            Hits hits = searcher.search(luceneQuery);

            usingSuggestedQuery = false;
            suggestedQuery = null;
            if ((didYouMeanParser != null)
                    && ((hits.length() < minimumHits) || (calcScore(searcher, getQuery()) < minimumScore))) {
                if (log.isDebugEnabled()) {
                    log.debug("Need to suggest because either num hits " + hits.length() + " < " + minimumHits
                            + "\n or top hit score " + (hits.length() > 0 ? hits.score(0) : "[NO HITS]") + " < "
                            + minimumScore);
                }
                IndexSearcher compositeSearcher = new IndexSearcher(compositeIndexLocation);
                try {
                    log.debug("calling suggest() with query=" + getQuery() + " and composite index from "
                            + compositeIndexLocation);
                    //Query didYouMean = didYouMeanParser.suggest(getQuery(), compositeSearcher.getIndexReader());
                    Query suggestedQueries[] = didYouMeanParser.getSuggestions(getQuery(),
                            compositeSearcher.getIndexReader());
                    TreeSet<Suggestion> suggestions = new TreeSet<Suggestion>();

                    if (suggestedQueries != null) {
                        for (int i = 0; i < suggestedQueries.length; i++) {
                            log.debug("trying suggested query: " + suggestedQueries[i].toString(defaultField));
                            String suggestedQueryString = suggestedQueries[i].toString(defaultField);
                            String constrainedQueryString = suggestedQueryString;
                            if (constrainedQueryString.indexOf('"') < 0
                                    && constrainedQueryString.indexOf('\'') < 0) {
                                constrainedQueryString = "\"" + constrainedQueryString + "\"~5"; // proximity/distance query (within 5 words of each other)
                            }
                            Query suggestedLuceneQuery = generateLuceneQuery(constrainedQueryString, searcher);
                            suggestedLuceneQuery = suggestedLuceneQuery.rewrite(reader);
                            Hits suggestedHits = searcher.search(suggestedLuceneQuery);

                            float score = calcScore(suggestedQueryString, suggestedHits);

                            log.debug("=========================================");
                            log.debug("SCORE = " + score);
                            log.debug("=========================================");

                            suggestions.add(
                                    new Suggestion(suggestedQueryString, suggestedLuceneQuery, suggestedHits,
                                            score, ((i == 0) ? didYouMeanParser.includesOriginal() : false)));
                            log.debug("hits=" + suggestedHits.length() + ", score=" + score);
                        }
                    }

                    Suggestion best = null;
                    if (suggestions.size() > 0) {
                        best = suggestions.last();
                    }

                    if (best != null && !best.isOriginal()) {
                        suggestedQuery = best.getQueryString();
                        if (suggestedQuery != null && suggestedQuery.indexOf('+') >= 0
                                && getQuery().indexOf('+') < 0) {
                            suggestedQuery = suggestedQuery.replaceAll("\\+", "");
                        }
                        if (hits.length() == 0) {
                            if (best.getHits().length() > 0) {
                                // Requery probably required because we added proximity before
                                String suggestedQueryString = best.getQueryString();
                                luceneQuery = generateLuceneQuery(suggestedQueryString, searcher);
                                luceneQuery = luceneQuery.rewrite(reader);
                                hits = searcher.search(luceneQuery);
                                //hits = best.getHits();
                                //luceneQuery = best.getLuceneQuery();
                                usingSuggestedQuery = true;
                            }
                        }
                        log.debug("DidYouMeanParser suggested " + suggestedQuery);
                    } else {
                        if (best != null && best.isOriginal()) {
                            log.debug("The suggestion was the original query after all");
                        }
                        log.debug("DidYouMeanParser did not suggest anything");
                    }
                } finally {
                    compositeSearcher.close();
                }
            }
            /*
            if (hits.length() == 0 && suggestedQuery != null) {
            // If we didn't find anything at all, go ahead and show them what the suggested query
            // will give them
            Query suggestedLuceneQuery = generateLuceneQuery(suggestedQuery, searcher);
            suggestedLuceneQuery = suggestedLuceneQuery.rewrite(reader);
               Hits suggestedHits = searcher.search(suggestedLuceneQuery);
               if (suggestedHits.length() > 0) {
             hits = suggestedHits;
             luceneQuery = suggestedLuceneQuery;
             usingSuggestedQuery = true;
               }
            }
               */
            totalHits = hits.length();
            //Get the genere matches:
            try {
                BitSetFacetHitCounter facetHitCounter = new BitSetFacetHitCounter();
                facetHitCounter.setSearcher(searcher);
                String baseQueryString = (isUsingSuggestedQuery() ? suggestedQuery : query);
                String quotedQueryString = baseQueryString;
                if (quotedQueryString.indexOf('"') == -1 && quotedQueryString.indexOf(' ') > -1) {
                    quotedQueryString = "\"" + quotedQueryString + "\"";
                }
                facetHitCounter.setBaseQuery(luceneQuery, baseQueryString);

                List<HitCount> subQueries = new ArrayList<HitCount>();
                for (Map.Entry<String, Query> entry : genreQueries.entrySet()) {
                    subQueries.add(
                            new HitCount(entry.getKey(), entry.getValue(), entry.getValue().toString(), 0));
                }
                facetHitCounter.setSubQueries(subQueries);
                genreCounts = facetHitCounter.getFacetHitCounts(true);

                whatMatchedCounts = new ArrayList<HitCount>();
                whatMatchedCounts
                        .add(new HitCount("Title", getFieldQuery(baseQueryString, "programTitle", searcher),
                                "programTitle:" + quotedQueryString, 0));
                whatMatchedCounts.add(
                        new HitCount("Episode Title", getFieldQuery(baseQueryString, "episodeTitle", searcher),
                                "episodeTitle:" + quotedQueryString, 0));
                whatMatchedCounts.add(
                        new HitCount("Description", getFieldQuery(baseQueryString, "description", searcher),
                                "description:" + quotedQueryString, 0));
                whatMatchedCounts.add(new HitCount("Content", getFieldQuery(baseQueryString, "text", searcher),
                        "text:" + quotedQueryString, 0));
                whatMatchedCounts
                        .add(new HitCount("Credits", getFieldQuery(baseQueryString, "credits", searcher),
                                "credits:" + quotedQueryString, 0));
                facetHitCounter.setSubQueries(whatMatchedCounts);
                whatMatchedCounts = facetHitCounter.getFacetHitCounts(true);

                //Program Count  -- Not sure if there is a better way to do this.
                HashSet<String> programTitles = new HashSet<String>();
                programCounts = new ArrayList<HitCount>();
                for (int i = 0; i < hits.length() && programCounts.size() < 5; i++) {
                    String title = hits.doc(i).get("programTitle");
                    if (!programTitles.contains(title)) {
                        String queryTitle = title;
                        queryTitle = QueryParser.escape(title);
                        if (queryTitle.indexOf('"') > -1) {
                            queryTitle.replace("\"", "\\\"");
                        }
                        if (queryTitle.indexOf(' ') > -1) {
                            queryTitle = "\"" + queryTitle + "\"";
                        }

                        programCounts
                                .add(new HitCount(title, getFieldQuery(queryTitle, "programTitle", searcher),
                                        "programTitle:" + queryTitle, 0));
                        programTitles.add(title);
                    }
                }
                facetHitCounter.setSubQueries(programCounts);
                programCounts = facetHitCounter.getFacetHitCounts(false);
            } catch (Exception e) {
                e.printStackTrace();
            }

            results = new ArrayList<SearchResult>();
            programToSearchResult.clear();
            Query userQuery = getContentQuery(query, searcher);
            userQuery.rewrite(reader);
            Highlighter highlighter = new Highlighter(new TermFormatter(), new QueryScorer(userQuery, "text"));

            log.debug("#hits=" + hits.length());

            EPGProvider epgProvider = DefaultEpg.getInstance();

            boolean missingWebPaths = false; // We added this to the index midstream, so some do and some don't.
            // Next index rebuild, and they'll all have it.
            for (int i = 0; i < pageSize && i + startIndex < hits.length(); i++) {
                if (hits.doc(i + startIndex).get("webPath") == null) {
                    missingWebPaths = true;
                    break;
                }
            }
            Program[] programs = null;
            if (missingWebPaths) {
                List<String> programIds = new ArrayList<String>(pageSize);
                for (int i = 0; i < pageSize && i + startIndex < hits.length(); i++) {
                    programIds.add(hits.doc(i + startIndex).get("programID"));
                }
                programs = DefaultEpg.getInstance().getProgramList(programIds);
            }
            for (int i = 0; i < pageSize && i + startIndex < hits.length(); i++) {
                addDocument(hits.doc(i + startIndex), hits.score(i + startIndex), epgProvider, highlighter,
                        analyzer, null, null, (programs == null ? null : programs[i]));
            }
            if (results.size() + startIndex < hits.length()) {
                hasMoreResults = true;
            }
        } finally {
            if (searcher != null) {
                searcher.close();
            }
        }
    } catch (IOException e) {
        log.error("Error searching index", e);
    } catch (ParseException e) {
        log.error("Error searching index", e);
    }
    return results;
}

From source file:net.sourceforge.fenixedu.domain.StudentCurricularPlan.java

final public Enrolment getLatestDissertationEnrolment() {
    final TreeSet<Enrolment> result = new TreeSet<Enrolment>(Enrolment.COMPARATOR_BY_EXECUTION_PERIOD_AND_ID);
    result.addAll(getDissertationEnrolments());
    return result.isEmpty() ? null : result.last();
}

From source file:com.datatorrent.stram.StreamingContainerManager.java

/**
 * Compute checkpoints required for a given operator instance to be recovered.
 * This is done by looking at checkpoints available for downstream dependencies first,
 * and then selecting the most recent available checkpoint that is smaller than downstream.
 *
 * @param operator Operator instance for which to find recovery checkpoint
 * @param ctx      Context into which to collect traversal info
 *//*from www  .j  av a  2 s .  c  o m*/
public void updateRecoveryCheckpoints(PTOperator operator, UpdateCheckpointsContext ctx) {
    if (operator.getRecoveryCheckpoint().windowId < ctx.committedWindowId.longValue()) {
        ctx.committedWindowId.setValue(operator.getRecoveryCheckpoint().windowId);
    }

    if (operator.getState() == PTOperator.State.ACTIVE && (ctx.currentTms
            - operator.stats.lastWindowIdChangeTms) > operator.stats.windowProcessingTimeoutMillis) {
        // if the checkpoint is ahead, then it is not blocked but waiting for activation (state-less recovery, at-most-once)
        if (ctx.committedWindowId.longValue() >= operator.getRecoveryCheckpoint().windowId) {
            LOG.debug("Marking operator {} blocked committed window {}, recovery window {}", operator,
                    Codec.getStringWindowId(ctx.committedWindowId.longValue()),
                    Codec.getStringWindowId(operator.getRecoveryCheckpoint().windowId));
            ctx.blocked.add(operator);
        }
    }

    // the most recent checkpoint eligible for recovery based on downstream state
    Checkpoint maxCheckpoint = Checkpoint.INITIAL_CHECKPOINT;

    Set<OperatorMeta> checkpointGroup = ctx.checkpointGroups.get(operator.getOperatorMeta());
    if (checkpointGroup == null) {
        checkpointGroup = Collections.singleton(operator.getOperatorMeta());
    }
    // find intersection of checkpoints that group can collectively move to
    TreeSet<Checkpoint> commonCheckpoints = new TreeSet<>(new Checkpoint.CheckpointComparator());
    synchronized (operator.checkpoints) {
        commonCheckpoints.addAll(operator.checkpoints);
    }
    Set<PTOperator> groupOpers = new HashSet<>(checkpointGroup.size());
    boolean pendingDeploy = operator.getState() == PTOperator.State.PENDING_DEPLOY;
    if (checkpointGroup.size() > 1) {
        for (OperatorMeta om : checkpointGroup) {
            Collection<PTOperator> operators = plan.getAllOperators(om);
            for (PTOperator groupOper : operators) {
                synchronized (groupOper.checkpoints) {
                    commonCheckpoints.retainAll(groupOper.checkpoints);
                }
                // visit all downstream operators of the group
                ctx.visited.add(groupOper);
                groupOpers.add(groupOper);
                pendingDeploy |= operator.getState() == PTOperator.State.PENDING_DEPLOY;
            }
        }
        // highest common checkpoint
        if (!commonCheckpoints.isEmpty()) {
            maxCheckpoint = commonCheckpoints.last();
        }
    } else {
        // without logical grouping, treat partitions as independent
        // this is especially important for parallel partitioning
        ctx.visited.add(operator);
        groupOpers.add(operator);
        maxCheckpoint = operator.getRecentCheckpoint();
        if (ctx.recovery && maxCheckpoint.windowId == Stateless.WINDOW_ID && operator.isOperatorStateLess()) {
            long currentWindowId = WindowGenerator.getWindowId(ctx.currentTms, this.vars.windowStartMillis,
                    this.getLogicalPlan().getValue(LogicalPlan.STREAMING_WINDOW_SIZE_MILLIS));
            maxCheckpoint = new Checkpoint(currentWindowId, 0, 0);
        }
    }

    // DFS downstream operators
    for (PTOperator groupOper : groupOpers) {
        for (PTOperator.PTOutput out : groupOper.getOutputs()) {
            for (PTOperator.PTInput sink : out.sinks) {
                PTOperator sinkOperator = sink.target;
                if (groupOpers.contains(sinkOperator)) {
                    continue; // downstream operator within group
                }
                if (!ctx.visited.contains(sinkOperator)) {
                    // downstream traversal
                    updateRecoveryCheckpoints(sinkOperator, ctx);
                }
                // recovery window id cannot move backwards
                // when dynamically adding new operators
                if (sinkOperator.getRecoveryCheckpoint().windowId >= operator
                        .getRecoveryCheckpoint().windowId) {
                    maxCheckpoint = Checkpoint.min(maxCheckpoint, sinkOperator.getRecoveryCheckpoint());
                }

                if (ctx.blocked.contains(sinkOperator)) {
                    if (sinkOperator.stats.getCurrentWindowId() == operator.stats.getCurrentWindowId()) {
                        // downstream operator is blocked by this operator
                        ctx.blocked.remove(sinkOperator);
                    }
                }
            }
        }
    }

    // find the common checkpoint that is <= downstream recovery checkpoint
    if (!commonCheckpoints.contains(maxCheckpoint)) {
        if (!commonCheckpoints.isEmpty()) {
            maxCheckpoint = Objects.firstNonNull(commonCheckpoints.floor(maxCheckpoint), maxCheckpoint);
        }
    }

    for (PTOperator groupOper : groupOpers) {
        // checkpoint frozen during deployment
        if (!pendingDeploy || ctx.recovery) {
            // remove previous checkpoints
            Checkpoint c1 = Checkpoint.INITIAL_CHECKPOINT;
            LinkedList<Checkpoint> checkpoints = groupOper.checkpoints;
            synchronized (checkpoints) {
                if (!checkpoints.isEmpty() && (checkpoints.getFirst()).windowId <= maxCheckpoint.windowId) {
                    c1 = checkpoints.getFirst();
                    Checkpoint c2;
                    while (checkpoints.size() > 1
                            && ((c2 = checkpoints.get(1)).windowId) <= maxCheckpoint.windowId) {
                        checkpoints.removeFirst();
                        //LOG.debug("Checkpoint to delete: operator={} windowId={}", operator.getName(), c1);
                        this.purgeCheckpoints.add(new Pair<PTOperator, Long>(groupOper, c1.windowId));
                        c1 = c2;
                    }
                } else {
                    if (ctx.recovery && checkpoints.isEmpty() && groupOper.isOperatorStateLess()) {
                        LOG.debug("Adding checkpoint for stateless operator {} {}", groupOper,
                                Codec.getStringWindowId(maxCheckpoint.windowId));
                        c1 = groupOper.addCheckpoint(maxCheckpoint.windowId, this.vars.windowStartMillis);
                    }
                }
            }
            //LOG.debug("Operator {} checkpoints: commit {} recent {}", new Object[] {operator.getName(), c1, operator.checkpoints});
            groupOper.setRecoveryCheckpoint(c1);
        } else {
            LOG.debug("Skipping checkpoint update {} during {}", groupOper, groupOper.getState());
        }
    }

}

From source file:org.alfresco.repo.lock.JobLockServiceImpl.java

/**
 * {@inheritDoc}//from  ww w . j  ava  2  s.  c om
 */
@Override
public void getTransactionalLock(QName lockQName, long timeToLive, long retryWait, int retryCount) {
    // Check that transaction is present
    final String txnId = AlfrescoTransactionSupport.getTransactionId();
    if (txnId == null) {
        throw new IllegalStateException("Locking requires an active transaction");
    }
    // Get the set of currently-held locks
    TreeSet<QName> heldLocks = TransactionalResourceHelper.getTreeSet(KEY_RESOURCE_LOCKS);
    // We don't want the lock registered as being held if something goes wrong
    TreeSet<QName> heldLocksTemp = new TreeSet<QName>(heldLocks);
    boolean added = heldLocksTemp.add(lockQName);
    if (!added) {
        // It's a refresh.  Ordering is not important here as we already hold the lock.
        refreshLock(txnId, lockQName, timeToLive);
    } else {
        QName lastLock = heldLocksTemp.last();
        if (lastLock.equals(lockQName)) {
            if (logger.isDebugEnabled()) {
                logger.debug("Attempting to acquire ordered lock: \n" + "   Lock:     " + lockQName + "\n"
                        + "   TTL:      " + timeToLive + "\n" + "   Txn:      " + txnId);
            }
            // If it was last in the set, then the order is correct and we use the
            // full retry behaviour.
            getLockImpl(txnId, lockQName, timeToLive, retryWait, retryCount);
        } else {
            if (logger.isDebugEnabled()) {
                logger.debug("Attempting to acquire UNORDERED lock: \n" + "   Lock:     " + lockQName + "\n"
                        + "   TTL:      " + timeToLive + "\n" + "   Txn:      " + txnId);
            }
            // The lock request is made out of natural order.
            // Unordered locks do not get any retry behaviour
            getLockImpl(txnId, lockQName, timeToLive, retryWait, 1);
        }
    }
    // It went in, so add it to the transactionally-stored set
    heldLocks.add(lockQName);
    // Done
}

From source file:org.apache.ambari.server.serveraction.upgrades.AutoSkipFailedSummaryAction.java

/**
 * {@inheritDoc}/*from  w w w .  jav  a  2  s  . c o m*/
 */
@Override
public CommandReport execute(ConcurrentMap<String, Object> requestSharedDataContext)
        throws AmbariException, InterruptedException {

    HostRoleCommand hostRoleCommand = getHostRoleCommand();
    long requestId = hostRoleCommand.getRequestId();
    long stageId = hostRoleCommand.getStageId();

    // use the host role command to get to the parent upgrade group
    UpgradeItemEntity upgradeItem = m_upgradeDAO.findUpgradeItemByRequestAndStage(requestId, stageId);
    UpgradeGroupEntity upgradeGroup = upgradeItem.getGroupEntity();

    // find all of the stages in this group
    long upgradeGroupId = upgradeGroup.getId();
    UpgradeGroupEntity upgradeGroupEntity = m_upgradeDAO.findUpgradeGroup(upgradeGroupId);
    List<UpgradeItemEntity> groupUpgradeItems = upgradeGroupEntity.getItems();
    TreeSet<Long> stageIds = new TreeSet<>();
    for (UpgradeItemEntity groupUpgradeItem : groupUpgradeItems) {
        stageIds.add(groupUpgradeItem.getStageId());
    }

    // for every stage, find all tasks that have been SKIPPED_FAILED - we use a
    // bit of trickery here since within any given request, the stage ID are
    // always sequential. This allows us to make a simple query instead of some
    // overly complex IN or NESTED SELECT query
    long minStageId = stageIds.first();
    long maxStageId = stageIds.last();

    List<HostRoleCommandEntity> skippedTasks = m_hostRoleCommandDAO.findByStatusBetweenStages(
            hostRoleCommand.getRequestId(), HostRoleStatus.SKIPPED_FAILED, minStageId, maxStageId);

    if (skippedTasks.isEmpty()) {
        return createCommandReport(0, HostRoleStatus.COMPLETED, "{}", "There were no skipped failures", null);
    }

    StringBuilder buffer = new StringBuilder("The following steps failed and were automatically skipped:\n");

    for (HostRoleCommandEntity skippedTask : skippedTasks) {
        try {
            ServiceComponentHostEventWrapper eventWrapper = new ServiceComponentHostEventWrapper(
                    skippedTask.getEvent());

            ServiceComponentHostEvent event = eventWrapper.getEvent();

            String hostName = skippedTask.getHostName();
            if (null != hostName) {
                Map<String, Object> failures = m_structuredFailures.get(hostName);
                if (null == failures) {
                    failures = new HashMap<>();
                    m_structuredFailures.put(hostName, failures);
                }

                failures.put("id", skippedTask.getTaskId());
                failures.put("exit_code", skippedTask.getExitcode());
                failures.put("output_log", skippedTask.getOutputLog());
                failures.put("error_log", skippedTask.getErrorLog());

                String stdOut = StringUtils.abbreviateMiddle(new String(skippedTask.getStdOut()),
                        MIDDLE_ELLIPSIZE_MARKER, 1000);

                String stderr = StringUtils.abbreviateMiddle(new String(skippedTask.getStdError()),
                        MIDDLE_ELLIPSIZE_MARKER, 1000);

                failures.put("stdout", stdOut);
                failures.put("stderr", stderr);
            }

            buffer.append(event.getServiceComponentName());
            if (null != event.getHostName()) {
                buffer.append(" on ");
                buffer.append(event.getHostName());
            }

            buffer.append(": ");
            buffer.append(skippedTask.getCommandDetail());
            buffer.append("\n");
        } catch (Exception exception) {
            LOG.warn("Unable to extract failure information for {}", skippedTask);
            buffer.append(": ");
            buffer.append(skippedTask);
        }
    }

    String structuredOutput = m_gson.toJson(m_structuredFailures);
    String standardOutput = MessageFormat.format(FAILURE_STD_OUT_TEMPLATE, skippedTasks.size());
    String standardError = buffer.toString();

    return createCommandReport(0, HostRoleStatus.HOLDING, structuredOutput, standardOutput, standardError);
}

From source file:org.apache.bookkeeper.stream.storage.impl.sc.DefaultStorageContainerController.java

@Override
public ClusterAssignmentData computeIdealState(ClusterMetadata clusterMetadata,
        ClusterAssignmentData currentState, Set<BookieSocketAddress> currentCluster) {

    if (currentCluster.isEmpty()) {
        log.info("Current cluster is empty. No alive server is found.");
        return currentState;
    }// w w  w . j a v a  2 s  . c o  m

    // 1. get current server assignments
    Map<BookieSocketAddress, Set<Long>> currentServerAssignments;
    try {
        currentServerAssignments = currentState.getServersMap().entrySet().stream()
                .collect(Collectors.toMap(e1 -> {
                    try {
                        return new BookieSocketAddress(e1.getKey());
                    } catch (UnknownHostException uhe) {
                        log.error("Invalid cluster ");
                        throw new UncheckedExecutionException(
                                "Invalid server found in current assignment map" + e1.getKey(), uhe);
                    }
                }, e2 -> e2.getValue().getContainersList().stream().collect(Collectors.toSet())));
    } catch (UncheckedExecutionException uee) {
        log.warn("Invalid cluster assignment data is found : {} - {}. Recompute assignment from empty state",
                currentState, uee.getCause().getMessage());
        currentServerAssignments = Maps.newHashMap();
    }
    Set<BookieSocketAddress> currentServersAssigned = currentServerAssignments.keySet();

    // 2. if no servers is assigned, initialize the ideal state
    if (currentServersAssigned.isEmpty()) {
        return initializeIdealState(clusterMetadata, currentCluster);
    }

    // 3. get the cluster diffs
    Set<BookieSocketAddress> serversAdded = Sets.difference(currentCluster, currentServersAssigned)
            .immutableCopy();
    Set<BookieSocketAddress> serversRemoved = Sets.difference(currentServersAssigned, currentCluster)
            .immutableCopy();

    if (serversAdded.isEmpty() && serversRemoved.isEmpty()) {
        // cluster is unchanged, assuming the current state is ideal, no re-assignment is required.
        return currentState;
    }

    log.info(
            "Storage container controller detects cluster changed:\n"
                    + "\t {} servers added: {}\n\t {} servers removed: {}",
            serversAdded.size(), serversAdded, serversRemoved.size(), serversRemoved);

    // 4. compute the containers that owned by servers removed. these containers are needed to be reassigned.
    Set<Long> containersToReassign = currentServerAssignments.entrySet().stream()
            .filter(serverEntry -> !currentCluster.contains(serverEntry.getKey()))
            .flatMap(serverEntry -> serverEntry.getValue().stream()).collect(Collectors.toSet());

    // 5. use an ordered set as priority deque to sort the servers by the number of assigned containers
    TreeSet<Pair<BookieSocketAddress, LinkedList<Long>>> assignmentQueue = new TreeSet<>(
            new ServerAssignmentDataComparator());
    for (Map.Entry<BookieSocketAddress, Set<Long>> entry : currentServerAssignments.entrySet()) {
        BookieSocketAddress host = entry.getKey();

        if (!currentCluster.contains(host)) {
            if (log.isTraceEnabled()) {
                log.trace("Host {} is not in current cluster anymore", host);
            }
            continue;
        } else {
            if (log.isTraceEnabled()) {
                log.trace("Adding host {} to assignment queue", host);
            }
            assignmentQueue.add(Pair.of(host, Lists.newLinkedList(entry.getValue())));
        }
    }

    // 6. add new servers
    for (BookieSocketAddress server : serversAdded) {
        assignmentQueue.add(Pair.of(server, Lists.newLinkedList()));
    }

    // 7. assign the containers that are needed to be reassigned.
    for (Long containerId : containersToReassign) {
        Pair<BookieSocketAddress, LinkedList<Long>> leastLoadedServer = assignmentQueue.pollFirst();
        leastLoadedServer.getValue().add(containerId);
        assignmentQueue.add(leastLoadedServer);
    }

    // 8. rebalance the containers if needed
    int diffAllowed;
    if (assignmentQueue.size() > clusterMetadata.getNumStorageContainers()) {
        diffAllowed = 1;
    } else {
        diffAllowed = clusterMetadata.getNumStorageContainers() % assignmentQueue.size() == 0 ? 0 : 1;
    }

    Pair<BookieSocketAddress, LinkedList<Long>> leastLoaded = assignmentQueue.first();
    Pair<BookieSocketAddress, LinkedList<Long>> mostLoaded = assignmentQueue.last();
    while (mostLoaded.getValue().size() - leastLoaded.getValue().size() > diffAllowed) {
        leastLoaded = assignmentQueue.pollFirst();
        mostLoaded = assignmentQueue.pollLast();

        // move container from mostLoaded to leastLoaded
        Long containerId = mostLoaded.getValue().removeFirst();
        // add the container to the end to avoid balancing this container again.
        leastLoaded.getValue().addLast(containerId);

        assignmentQueue.add(leastLoaded);
        assignmentQueue.add(mostLoaded);

        leastLoaded = assignmentQueue.first();
        mostLoaded = assignmentQueue.last();
    }

    // 9. the new ideal state is computed, finalize it
    Map<String, ServerAssignmentData> newAssignmentMap = Maps.newHashMap();
    assignmentQueue.forEach(assignment -> newAssignmentMap.put(assignment.getKey().toString(),
            ServerAssignmentData.newBuilder().addAllContainers(assignment.getValue()).build()));
    return ClusterAssignmentData.newBuilder().putAllServers(newAssignmentMap).build();
}

From source file:org.apache.cassandra.concurrent.LongSharedExecutorPoolTest.java

private void testPromptnessOfExecution(long intervalNanos, float loadIncrement)
        throws InterruptedException, ExecutionException {
    final int executorCount = 4;
    int threadCount = 8;
    int maxQueued = 1024;
    final WeibullDistribution workTime = new WeibullDistribution(3, 200000);
    final long minWorkTime = TimeUnit.MICROSECONDS.toNanos(1);
    final long maxWorkTime = TimeUnit.MILLISECONDS.toNanos(1);

    final int[] threadCounts = new int[executorCount];
    final WeibullDistribution[] workCount = new WeibullDistribution[executorCount];
    final ExecutorService[] executors = new ExecutorService[executorCount];
    for (int i = 0; i < executors.length; i++) {
        executors[i] = SharedExecutorPool.SHARED.newExecutor(threadCount, maxQueued, "test" + i, "test" + i);
        threadCounts[i] = threadCount;//from w w w. ja  v  a2 s.  co m
        workCount[i] = new WeibullDistribution(2, maxQueued);
        threadCount *= 2;
        maxQueued *= 2;
    }

    long runs = 0;
    long events = 0;
    final TreeSet<Batch> pending = new TreeSet<>();
    final BitSet executorsWithWork = new BitSet(executorCount);
    long until = 0;
    // basic idea is to go through different levels of load on the executor service; initially is all small batches
    // (mostly within max queue size) of very short operations, moving to progressively larger batches
    // (beyond max queued size), and longer operations
    for (float multiplier = 0f; multiplier < 2.01f;) {
        if (System.nanoTime() > until) {
            System.out.println(String.format("Completed %.0fK batches with %.1fM events", runs * 0.001f,
                    events * 0.000001f));
            events = 0;
            until = System.nanoTime() + intervalNanos;
            multiplier += loadIncrement;
            System.out.println(String.format("Running for %ds with load multiplier %.1f",
                    TimeUnit.NANOSECONDS.toSeconds(intervalNanos), multiplier));
        }

        // wait a random amount of time so we submit new tasks in various stages of
        long timeout;
        if (pending.isEmpty())
            timeout = 0;
        else if (Math.random() > 0.98)
            timeout = Long.MAX_VALUE;
        else if (pending.size() == executorCount)
            timeout = pending.first().timeout;
        else
            timeout = (long) (Math.random() * pending.last().timeout);

        while (!pending.isEmpty() && timeout > System.nanoTime()) {
            Batch first = pending.first();
            boolean complete = false;
            try {
                for (Result result : first.results.descendingSet())
                    result.future.get(timeout - System.nanoTime(), TimeUnit.NANOSECONDS);
                complete = true;
            } catch (TimeoutException e) {
            }
            if (!complete && System.nanoTime() > first.timeout) {
                for (Result result : first.results)
                    if (!result.future.isDone())
                        throw new AssertionError();
                complete = true;
            }
            if (complete) {
                pending.pollFirst();
                executorsWithWork.clear(first.executorIndex);
            }
        }

        // if we've emptied the executors, give all our threads an opportunity to spin down
        if (timeout == Long.MAX_VALUE)
            Uninterruptibles.sleepUninterruptibly(10, TimeUnit.MILLISECONDS);

        // submit a random batch to the first free executor service
        int executorIndex = executorsWithWork.nextClearBit(0);
        if (executorIndex >= executorCount)
            continue;
        executorsWithWork.set(executorIndex);
        ExecutorService executor = executors[executorIndex];
        TreeSet<Result> results = new TreeSet<>();
        int count = (int) (workCount[executorIndex].sample() * multiplier);
        long targetTotalElapsed = 0;
        long start = System.nanoTime();
        long baseTime;
        if (Math.random() > 0.5)
            baseTime = 2 * (long) (workTime.sample() * multiplier);
        else
            baseTime = 0;
        for (int j = 0; j < count; j++) {
            long time;
            if (baseTime == 0)
                time = (long) (workTime.sample() * multiplier);
            else
                time = (long) (baseTime * Math.random());
            if (time < minWorkTime)
                time = minWorkTime;
            if (time > maxWorkTime)
                time = maxWorkTime;
            targetTotalElapsed += time;
            Future<?> future = executor.submit(new WaitTask(time));
            results.add(new Result(future, System.nanoTime() + time));
        }
        long end = start + (long) Math.ceil(targetTotalElapsed / (double) threadCounts[executorIndex])
                + TimeUnit.MILLISECONDS.toNanos(100L);
        long now = System.nanoTime();
        if (runs++ > executorCount && now > end)
            throw new AssertionError();
        events += results.size();
        pending.add(new Batch(results, end, executorIndex));
        //            System.out.println(String.format("Submitted batch to executor %d with %d items and %d permitted millis", executorIndex, count, TimeUnit.NANOSECONDS.toMillis(end - start)));
    }
}

From source file:org.artifactory.maven.MavenMetadataCalculator.java

private void createVersionsMetadata(RepoPath repoPath, List<ItemNode> versionNodes) {
    // get artifact info from the first pom

    RepoPath samplePomRepoPath = getFirstPom(versionNodes);
    if (samplePomRepoPath == null) {
        //Should never really be null, we've checked the list of version nodes for poms before passing it into here
        return;//from  w w w  .j a  va2s.  c  o m
    }
    MavenArtifactInfo artifactInfo = MavenArtifactInfo.fromRepoPath(samplePomRepoPath);
    if (!artifactInfo.isValid()) {
        return;
    }
    Metadata metadata = new Metadata();
    metadata.setGroupId(artifactInfo.getGroupId());
    metadata.setArtifactId(artifactInfo.getArtifactId());
    metadata.setVersion(artifactInfo.getVersion());
    Versioning versioning = new Versioning();
    metadata.setVersioning(versioning);
    versioning.setLastUpdatedTimestamp(new Date());

    MavenMetadataVersionComparator comparator = createVersionComparator();
    TreeSet<ItemNode> sortedVersions = Sets.newTreeSet(comparator);
    sortedVersions.addAll(versionNodes);

    // add the versions to the versioning section
    for (ItemNode sortedVersion : sortedVersions) {
        versioning.addVersion(sortedVersion.getName());
    }

    // latest is simply the last (be it snapshot or release version)
    String latestVersion = sortedVersions.last().getName();
    versioning.setLatest(latestVersion);

    // release is the latest non snapshot version
    for (ItemNode sortedVersion : sortedVersions) {
        String versionNodeName = sortedVersion.getName();
        if (!MavenNaming.isSnapshot(versionNodeName)) {
            versioning.setRelease(versionNodeName);
        }
    }

    saveMetadata(repoPath, metadata);
}

From source file:org.commoncrawl.service.listcrawler.CacheManager.java

/** check cache via fingerprint - this call blocks and should not be used in an async context
 * /*w w  w  .  ja v  a 2s.  co m*/
 * @param urlFingerprint
 * @return true if a document with matching fingerprint exists in the cache ... 
 */
public long checkCacheForFingerprint(long urlFingerprint, boolean returnDate) {

    synchronized (this) {
        for (CacheItem item : _writeQueue) {
            if (item.getUrlFingerprint() == urlFingerprint) {
                if (returnDate) {
                    long dateOut = dateFromCacheItem(item);
                    // if no date found, use current date as an approximate...
                    return (dateOut != 0) ? dateOut : System.currentTimeMillis();
                } else
                    return 1;
            }
        }
    }

    synchronized (this) {
        if (_fingerprintToLocalLogPos.get(urlFingerprint).size() != 0) {
            // assume recent date as an approximate 
            return System.currentTimeMillis();
        }
    }

    // now check hdfs indexes 
    ImmutableList<HDFSFileIndex> indexList = null;

    synchronized (CacheManager.this) {
        indexList = ImmutableList.copyOf(_hdfsIndexList);
    }

    long timeStart = System.currentTimeMillis();

    // first check local item cache ...
    TreeSet<Long> cachedItems = new TreeSet<Long>();

    for (HDFSFileIndex index : Lists.reverse(indexList)) {
        try {
            CacheItem itemFound = index.findItem(urlFingerprint, !returnDate);
            if (itemFound != null) {
                if (returnDate) {
                    // get item date from headers .
                    long itemDate = dateFromCacheItem(itemFound);
                    if (itemDate == 0) {
                        itemDate = index.getIndexTimestamp();
                        // if item date still 0, this is BAD !!!
                        if (itemDate == 0) {
                            LOG.error("!!!!!!UNABLE TO PARSE INDEX TIMESTAMP:" + index.getIndexDataPath());
                            itemDate = 1L;
                        }
                    }
                    // ok add it to the map ... 
                    cachedItems.add(itemDate);
                } else {
                    return 1;
                }
            }
        } catch (IOException e) {
            LOG.error(CCStringUtils.stringifyException(e));
        }
    }
    if (returnDate && cachedItems.size() != 0) {
        return cachedItems.last();
    }
    return 0;
}

From source file:org.commoncrawl.service.listcrawler.CrawlHistoryManager.java

private void iterateHDFSCrawlHistoryLog(long listId, long timestamp, TreeSet<URLFP> criteria,
        ItemUpdater targetList) throws IOException {

    // ok copy stuff locally if possible ...
    File localIndexPath = new File(getLocalDataDir(), CRAWL_HISTORY_HDFS_LOGFILE_PREFIX + timestamp + ".index");
    File localDataPath = new File(getLocalDataDir(), CRAWL_HISTORY_HDFS_LOGFILE_PREFIX + timestamp + ".data");
    File localBloomFilterPath = new File(getLocalDataDir(),
            CRAWL_HISTORY_HDFS_LOGFILE_PREFIX + timestamp + ".bloom");

    SequenceFile.Reader reader = null;
    Path mapFilePath = new Path(_remoteDataDirectory, CRAWL_HISTORY_HDFS_LOGFILE_PREFIX + timestamp);
    Path indexFilePath = new Path(mapFilePath, "index");
    Path dataFilePath = new Path(mapFilePath, "data");
    Path bloomFilePath = new Path(_remoteDataDirectory, CRAWL_HISTORY_HDFS_BLOOMFILTER_PREFIX + timestamp);

    // ok copy local first
    if (!localIndexPath.exists()) {
        LOG.info("LIST:" + listId + " Copying Index File:" + indexFilePath + " to Local:"
                + localIndexPath.getAbsolutePath());
        try {/*from  w  ww  . jav a  2s . c  o  m*/
            _remoteFileSystem.copyToLocalFile(indexFilePath, new Path(localIndexPath.getAbsolutePath()));
        } catch (IOException e) {
            LOG.error(CCStringUtils.stringifyException(e));
            localIndexPath.delete();
            throw e;
        }
    }
    if (!localDataPath.exists()) {
        LOG.info("LIST:" + listId + " Copying Data File:" + dataFilePath + " to Local:"
                + localDataPath.getAbsolutePath());
        try {
            _remoteFileSystem.copyToLocalFile(dataFilePath, new Path(localDataPath.getAbsolutePath()));
        } catch (IOException e) {
            LOG.error(CCStringUtils.stringifyException(e));
            localDataPath.delete();
            throw e;
        }

    }
    if (!localBloomFilterPath.exists()) {
        LOG.info("LIST:" + listId + " Copying Bloom File:" + bloomFilePath + " to Local:"
                + localBloomFilterPath.getAbsolutePath());
        try {
            _remoteFileSystem.copyToLocalFile(bloomFilePath, new Path(localBloomFilterPath.getAbsolutePath()));
        } catch (IOException e) {
            LOG.error(CCStringUtils.stringifyException(e));
            localBloomFilterPath.delete();
            throw e;
        }

    }

    // ok open local
    FileSystem localFileSystem = FileSystem.getLocal(CrawlEnvironment.getHadoopConfig());

    SequenceFile.Reader indexReader = new SequenceFile.Reader(localFileSystem,
            new Path(localIndexPath.getAbsolutePath()), CrawlEnvironment.getHadoopConfig());

    try {
        URLFP firstIndexKey = null;
        URLFP lastIndexKey = new URLFP();
        LongWritable position = new LongWritable();
        while (indexReader.next(lastIndexKey, position)) {
            if (firstIndexKey == null) {
                try {
                    firstIndexKey = (URLFP) lastIndexKey.clone();
                } catch (CloneNotSupportedException e) {
                    // TODO Auto-generated catch block
                    e.printStackTrace();
                }
            }
        }

        LOG.info("LIST:" + listId + " ### Index First Domain:" + firstIndexKey.getDomainHash() + " URLHash:"
                + firstIndexKey.getUrlHash() + " Last Domain:" + lastIndexKey.getDomainHash() + " URLHash:"
                + lastIndexKey.getUrlHash());

        URLFP criteriaFirstKey = criteria.first();
        URLFP criteriaLastKey = criteria.last();

        if (firstIndexKey.compareTo(criteriaLastKey) > 0 || lastIndexKey.compareTo(criteriaFirstKey) < 0) {
            LOG.info("LIST:" + listId + " Entire Index is Out of Range. Skipping!");
            LOG.info("LIST:" + listId + " ### Criteria First Domain:" + criteriaFirstKey.getDomainHash()
                    + " URLHash:" + criteriaFirstKey.getUrlHash() + " Last Domain:"
                    + criteriaLastKey.getDomainHash() + " URLHash:" + criteriaLastKey.getUrlHash());
            return;
        }
    } finally {
        indexReader.close();
    }

    LOG.info("LIST:" + listId + " ### Index:" + timestamp + " Passed Test. Doing Full Scan");
    // load bloom filter
    FSDataInputStream bloomFilterStream = localFileSystem
            .open(new Path(localBloomFilterPath.getAbsolutePath()));

    int hitCount = 0;

    try {
        URLFPBloomFilter filter = URLFPBloomFilter.load(bloomFilterStream);

        URLFP fpOut = new URLFP();
        ProxyCrawlHistoryItem itemOut = new ProxyCrawlHistoryItem();
        DataOutputBuffer valueBytesUncompressed = new DataOutputBuffer();
        ValueBytes valueBytes = null;
        DataInputBuffer valueReader = new DataInputBuffer();
        DataOutputBuffer keyBytes = new DataOutputBuffer();
        DataInputBuffer keyReader = new DataInputBuffer();

        URLFP lastFP = null;

        outerLoop:
        // now iterate each item in the criteria
        for (URLFP targetFP : criteria) {
            // if fingerprint is present in filter ...
            if (filter.isPresent(targetFP)) {
                // check to see if reader is initialzied ...
                if (reader == null) {
                    LOG.info("LIST:" + listId + " BloomFilter First Hit. Initializing Reader for file at:"
                            + localDataPath.getAbsolutePath());
                    reader = new SequenceFile.Reader(localFileSystem, new Path(localDataPath.getAbsolutePath()),
                            CrawlEnvironment.getHadoopConfig());
                    LOG.info("LIST:" + listId + " BloomFilter First Hit. Initialized Reader for file at:"
                            + localDataPath.getAbsolutePath());
                    valueBytes = reader.createValueBytes();
                }

                // if last read fingerprint was not null ...
                if (lastFP != null) {
                    // does it match the current item
                    if (lastFP.compareTo(targetFP) == 0) {
                        // decompress value bytes ...
                        valueBytesUncompressed.reset();
                        valueBytes.writeUncompressedBytes(valueBytesUncompressed);
                        // init valueReader
                        valueReader.reset(valueBytesUncompressed.getData(), valueBytesUncompressed.getLength());
                        itemOut.readFields(valueReader);
                        LOG.info("LIST:" + listId + " GOT HISTORY ITEM HIT. URL:" + +lastFP.getUrlHash()
                                + " File:" + dataFilePath);
                        // if so, null out last fp
                        lastFP = null;
                        // and update item state ...
                        targetList.updateItemState(targetFP, itemOut);

                        hitCount++;

                        continue;
                    }
                }

                // ok at this point .. read the next item in the list ...
                lastFP = null;

                while (reader.nextRaw(keyBytes, valueBytes) != -1) {
                    // init reader ...
                    keyReader.reset(keyBytes.getData(), keyBytes.getLength());
                    // read key
                    fpOut.readFields(keyReader);
                    // reset output buffer
                    keyBytes.reset();

                    // LOG.info("LIST:" + listId +" nextRaw Returned DH:" +
                    // fpOut.getDomainHash() + " UH:" + fpOut.getUrlHash() + " TDH:" +
                    // targetFP.getDomainHash() + " TUH:" + targetFP.getUrlHash());
                    // compare it to target ...
                    int result = fpOut.compareTo(targetFP);
                    // ok does it match .. ?
                    if (result == 0) {
                        // decompress value bytes ...
                        valueBytesUncompressed.reset();
                        valueBytes.writeUncompressedBytes(valueBytesUncompressed);
                        // init valueReader
                        valueReader.reset(valueBytesUncompressed.getData(), valueBytesUncompressed.getLength());
                        itemOut.readFields(valueReader);

                        LOG.info("LIST:" + listId + " GOT HISTORY ITEM HIT. URL:" + fpOut.getUrlHash()
                                + " File:" + dataFilePath);
                        // update item state ...
                        targetList.updateItemState(targetFP, itemOut);

                        hitCount++;
                        // and break to outer loop
                        continue outerLoop;
                    } else if (result == 1) {
                        // LOG.info("LIST:" + listId +
                        // " FP Comparison Returned 1. Going to OuterLoop");
                        // update last FP
                        lastFP = fpOut;
                        // continue outer loop
                        continue outerLoop;
                    } else {
                        // otherwise skip
                    }
                }
                // ok if we got here .. we are done reading the sequence file and did
                // not find a trailing match
                LOG.warn("LIST:" + listId
                        + " ### Reached End Of File Searching for item in MapFile while BloomFilter returned positivie result (DomainHash:"
                        + targetFP.getDomainHash() + "FP:" + targetFP.getUrlHash() + ")");
                // break out of outer loop

                break;
            }
        }
    } finally {
        bloomFilterStream.close();

        if (reader != null) {
            reader.close();
        }

        LOG.info("LIST:" + listId + " File:" + dataFilePath + " DONE. HitCount:" + hitCount);
    }
}