Example usage for java.util PriorityQueue PriorityQueue

List of usage examples for java.util PriorityQueue PriorityQueue

Introduction

In this page you can find the example usage for java.util PriorityQueue PriorityQueue.

Prototype

public PriorityQueue(int initialCapacity, Comparator<? super E> comparator) 

Source Link

Document

Creates a PriorityQueue with the specified initial capacity that orders its elements according to the specified comparator.

Usage

From source file:org.apache.hadoop.corona.PoolSchedulable.java

/**
 * Get the queue of sessions in the pool sorted by comparator
 * @param comparator the comparator to use when sorting sessions
 * @return the queue of the sessions sorted by a given comparator
 *///from  ww w .j  a  v a2  s . c  o m
public Queue<SessionSchedulable> createSessionQueue(ScheduleComparator comparator) {
    int initCapacity = snapshotSessions.size() == 0 ? 1 : snapshotSessions.size();
    Queue<SessionSchedulable> sessionQueue = new PriorityQueue<SessionSchedulable>(initCapacity, comparator);
    sessionQueue.addAll(snapshotSessions);
    return sessionQueue;
}

From source file:org.apache.hadoop.hdfs.server.namenode.JournalSet.java

/**
 * In this function, we get a bunch of streams from all of our JournalManager
 * objects.  Then we add these to the collection one by one.
 * /*w  w w .  jav a2 s .  c  o m*/
 * @param streams          The collection to add the streams to.  It may or 
 *                         may not be sorted-- this is up to the caller.
 * @param fromTxId         The transaction ID to start looking for streams at
 * @param inProgressOk     Should we consider unfinalized streams?
 */
@Override
public void selectInputStreams(Collection<EditLogInputStream> streams, long fromTxId, boolean inProgressOk)
        throws IOException {
    final PriorityQueue<EditLogInputStream> allStreams = new PriorityQueue<EditLogInputStream>(64,
            EDIT_LOG_INPUT_STREAM_COMPARATOR);
    for (JournalAndStream jas : journals) {
        if (jas.isDisabled()) {
            LOG.info("Skipping jas " + jas + " since it's disabled");
            continue;
        }
        try {
            jas.getManager().selectInputStreams(allStreams, fromTxId, inProgressOk);
        } catch (IOException ioe) {
            LOG.warn("Unable to determine input streams from " + jas.getManager() + ". Skipping.", ioe);
        }
    }
    chainAndMakeRedundantStreams(streams, allStreams, fromTxId);
}

From source file:$.HyperGraphBuilder$.java

@Override
    public V_GenericGraph makeGraphResponse(final V_GraphQuery graphQuery) throws Exception {
        nodeList = new HashMap<String, V_GenericNode>();
        // edgeMap = new HashMap<String, V_GenericEdge>();
        edgeList = new HashMap<String, V_GenericEdge>();
        scannedQueries = new HashSet<String>();

        final PriorityQueue<G_EntityQuery> queriesToRun = new PriorityQueue<G_EntityQuery>(10,
                new ScoreComparator());
        Map<String, V_GenericNode> nodesFromPreviousDegree = new HashMap<String, V_GenericNode>();
        Map<String, V_GenericEdge> edgesFromPreviousDegree = new HashMap<String, V_GenericEdge>();

        if (graphQuery.getMaxHops() <= 0) {
            return new V_GenericGraph();
        } else {/*  w w w.j av a2s.c o m*/
            logger.debug("Attempting a graph for query " + graphQuery.toString());
        }

        int intStatus = 0;
        String strStatus = "Graph Loaded";

        final G_PropertyMatchDescriptor identifierList = G_PropertyMatchDescriptor.newBuilder().setKey("_all")
                .setListRange(new ListRangeHelper(G_PropertyType.STRING, graphQuery.getSearchIds()))
                .setConstraint(G_Constraint.EQUALS).build();
        final QueryHelper qh = new QueryHelper(identifierList);
        qh.setTargetSchema(index);
        queriesToRun.add(qh);

        int currentDegree = 0;
        for (currentDegree = 0; (currentDegree < graphQuery.getMaxHops())
                && (nodeList.size() < graphQuery.getMaxNodes()); currentDegree++) {
            G_EntityQuery eq = null;
            logger.debug("${symbol_dollar}${symbol_dollar}${symbol_dollar}${symbol_dollar}There are "
                    + queriesToRun.size() + " queries to run in the current degree.");
            while ((queriesToRun.size() > 0) && ((eq = queriesToRun.poll()) != null)
                    && (nodeList.size() < graphQuery.getMaxNodes())) {

                if (ValidationUtils.isValid(eq.getPropertyMatchDescriptors())) {
                    nodesFromPreviousDegree = new HashMap<String, V_GenericNode>(nodeList);
                    edgesFromPreviousDegree = new HashMap<String, V_GenericEdge>(edgeList);
                    logger.debug("Processing degree " + currentDegree);

                    /**
                     * This will end up building nodes and edges, and creating
                     * new queries for the queue
                     */
                    logger.debug("1111=====Running query " + eq.toString());
                    getDAO().performCallback(0, eq.getMaxResult(), this, eq);
                    logger.debug("3333====After running " + eq.toString() + ", there are "
                            + queriesToRunNextDegree.size() + " queries to run in the next degree.");
                }
            } // end while loop

            // very important!!
            // unscannedNodeList.clear();
            // ////////////////////////////////////////////////
            logger.debug("4444==== At the end of degree " + currentDegree + ", there are " + nodeList.size()
                    + " nodes and " + edgeList.size() + " edges");

            logger.debug(
                    "5555====There are " + queriesToRunNextDegree.size() + " queries to run in the next degree.");
            queriesToRun.addAll(queriesToRunNextDegree);
            queriesToRunNextDegree.clear();
        }

        // All hops have been done
        // Check to see if we have too many nodes.
        if (nodeList.size() > graphQuery.getMaxNodes()) {
            nodeList = nodesFromPreviousDegree;
            edgeList = edgesFromPreviousDegree;
            intStatus = 1; // will trigger the message.
            strStatus = "Returning only " + currentDegree
                    + " hops, as maximum nodes you requested would be exceeded";
        } else {
            intStatus = 1; // will trigger the message.
            strStatus = "Returning " + nodeList.size() + " nodes and " + edgeList.size() + " edges.";
        }

        // NOW finally add in all those unique edges.

        performPostProcess(graphQuery);
        final V_GenericGraph g = new V_GenericGraph(nodeList, edgeList);
        g.setIntStatus(intStatus);
        g.setStrStatus(strStatus);
        logger.debug("Graph status: " + g.getStrStatus());
        for (final V_LegendItem li : legendItems) {
            g.addLegendItem(li);
        }

        return g;
    }

From source file:com.anhth12.lambda.app.serving.als.model.ALSServingModel.java

public List<Pair<String, Double>> topN(final ToDoubleFunction<float[]> scoreFn,
        final ObjDoubleToDoubleFunction<String> rescoreFn, final int howMany,
        final Predicate<String> allowedPredicate) {

    List<Callable<Iterable<Pair<String, Double>>>> tasks = new ArrayList<>(Y.length);
    for (int partition = 0; partition < Y.length; partition++) {
        final int thePartition = partition;
        tasks.add(new LoggingCallable<Iterable<Pair<String, Double>>>() {

            @Override/*from   www  . j a va 2  s  . co  m*/
            public Iterable<Pair<String, Double>> doCall() throws Exception {
                Queue<Pair<String, Double>> topN = new PriorityQueue<>(howMany + 1,
                        PairComparators.<Double>bySecond());
                TopNConsumer topNPoc = new TopNConsumer(topN, howMany, scoreFn, rescoreFn, allowedPredicate);
                try (AutoLock al = new AutoLock(yLocks[thePartition].readLock())) {
                    Y[thePartition].forEach(topNPoc);
                }

                return topN;
            }
        });
    }

    List<Iterable<Pair<String, Double>>> iterables = new ArrayList<>();

    if (Y.length >= 2) {
        try {
            for (Future<Iterable<Pair<String, Double>>> future : executor.invokeAll(tasks)) {
                iterables.add(future.get());
            }
        } catch (InterruptedException | ExecutionException e) {
            throw new IllegalStateException(e);
        }
    } else {
        try {
            iterables.add(tasks.get(0).call());
        } catch (Exception e) {
            throw new IllegalStateException(e);
        }
    }

    return Ordering.from(PairComparators.<Double>bySecond()).greatestOf(Iterables.concat(iterables), howMany);

}

From source file:com.linkedin.pinot.routing.builder.KafkaLowLevelConsumerRoutingTableBuilder.java

@Override
public List<ServerToSegmentSetMap> computeRoutingTableFromExternalView(String tableName,
        ExternalView externalView, List<InstanceConfig> instanceConfigList) {
    // We build the routing table based off the external view here. What we want to do is to make sure that we uphold
    // the guarantees clients expect (no duplicate records, eventual consistency) and spreading the load as equally as
    // possible between the servers.
    ////from w ww . j ava 2 s  .  c o  m
    // Each Kafka partition contains a fraction of the data, so we need to make sure that we query all partitions.
    // Because in certain unlikely degenerate scenarios, we can consume overlapping data until segments are flushed (at
    // which point the overlapping data is discarded during the reconciliation process with the controller), we need to
    // ensure that the query that is sent has only one partition in CONSUMING state in order to avoid duplicate records.
    //
    // Because we also want to want to spread the load as equally as possible between servers, we use a weighted random
    // replica selection that favors picking replicas with fewer segments assigned to them, thus having an approximately
    // equal distribution of load between servers.
    //
    // For example, given three replicas with 1, 2 and 3 segments assigned to each, the replica with one segment should
    // have a weight of 2, which is the maximum segment count minus the segment count for that replica. Thus, each
    // replica other than the replica(s) with the maximum segment count should have a chance of getting a segment
    // assigned to it. This corresponds to alternative three below:
    //
    // Alternative 1 (weight is sum of segment counts - segment count in that replica):
    // (6 - 1) = 5 -> P(0.4166)
    // (6 - 2) = 4 -> P(0.3333)
    // (6 - 3) = 3 -> P(0.2500)
    //
    // Alternative 2 (weight is max of segment counts - segment count in that replica + 1):
    // (3 - 1) + 1 = 3 -> P(0.5000)
    // (3 - 2) + 1 = 2 -> P(0.3333)
    // (3 - 3) + 1 = 1 -> P(0.1666)
    //
    // Alternative 3 (weight is max of segment counts - segment count in that replica):
    // (3 - 1) = 2 -> P(0.6666)
    // (3 - 2) = 1 -> P(0.3333)
    // (3 - 3) = 0 -> P(0.0000)
    //
    // Of those three weighting alternatives, the third one has the smallest standard deviation of the number of
    // segments assigned per replica, so it corresponds to the weighting strategy used for segment assignment. Empirical
    // testing shows that for 20 segments and three replicas, the standard deviation of each alternative is respectively
    // 2.112, 1.496 and 0.853.
    //
    // This algorithm works as follows:
    // 1. Gather all segments and group them by Kafka partition, sorted by sequence number
    // 2. Ensure that for each partition, we have at most one partition in consuming state
    // 3. Sort all the segments to be used during assignment in ascending order of replicas
    // 4. For each segment to be used during assignment, pick a random replica, weighted by the number of segments
    //    assigned to each replica.

    // 1. Gather all segments and group them by Kafka partition, sorted by sequence number
    Map<String, SortedSet<SegmentName>> sortedSegmentsByKafkaPartition = new HashMap<String, SortedSet<SegmentName>>();
    for (String helixPartitionName : externalView.getPartitionSet()) {
        // Ignore segments that are not low level consumer segments
        if (!SegmentNameBuilder.Realtime.isRealtimeV2Name(helixPartitionName)) {
            continue;
        }

        final LLCSegmentName segmentName = new LLCSegmentName(helixPartitionName);
        String kafkaPartitionName = segmentName.getPartitionRange();
        SortedSet<SegmentName> segmentsForPartition = sortedSegmentsByKafkaPartition.get(kafkaPartitionName);

        // Create sorted set if necessary
        if (segmentsForPartition == null) {
            segmentsForPartition = new TreeSet<SegmentName>();

            sortedSegmentsByKafkaPartition.put(kafkaPartitionName, segmentsForPartition);
        }

        segmentsForPartition.add(segmentName);
    }

    // 2. Ensure that for each Kafka partition, we have at most one Helix partition (Pinot segment) in consuming state
    Map<String, SegmentName> allowedSegmentInConsumingStateByKafkaPartition = new HashMap<String, SegmentName>();
    for (String kafkaPartition : sortedSegmentsByKafkaPartition.keySet()) {
        SortedSet<SegmentName> sortedSegmentsForKafkaPartition = sortedSegmentsByKafkaPartition
                .get(kafkaPartition);
        SegmentName lastAllowedSegmentInConsumingState = null;

        for (SegmentName segmentName : sortedSegmentsForKafkaPartition) {
            Map<String, String> helixPartitionState = externalView.getStateMap(segmentName.getSegmentName());
            boolean allInConsumingState = true;
            int replicasInConsumingState = 0;

            // Only keep the segment if all replicas have it in CONSUMING state
            for (String externalViewState : helixPartitionState.values()) {
                // Ignore ERROR state
                if (externalViewState.equalsIgnoreCase(
                        CommonConstants.Helix.StateModel.RealtimeSegmentOnlineOfflineStateModel.ERROR)) {
                    continue;
                }

                // Not all segments are in CONSUMING state, therefore don't consider the last segment assignable to CONSUMING
                // replicas
                if (externalViewState.equalsIgnoreCase(
                        CommonConstants.Helix.StateModel.RealtimeSegmentOnlineOfflineStateModel.ONLINE)) {
                    allInConsumingState = false;
                    break;
                }

                // Otherwise count the replica as being in CONSUMING state
                if (externalViewState.equalsIgnoreCase(
                        CommonConstants.Helix.StateModel.RealtimeSegmentOnlineOfflineStateModel.CONSUMING)) {
                    replicasInConsumingState++;
                }
            }

            // If all replicas have this segment in consuming state (and not all of them are in ERROR state), then pick this
            // segment to be the last allowed segment to be in CONSUMING state
            if (allInConsumingState && 0 < replicasInConsumingState) {
                lastAllowedSegmentInConsumingState = segmentName;
                break;
            }
        }

        if (lastAllowedSegmentInConsumingState != null) {
            allowedSegmentInConsumingStateByKafkaPartition.put(kafkaPartition,
                    lastAllowedSegmentInConsumingState);
        }
    }

    // 3. Sort all the segments to be used during assignment in ascending order of replicas

    // PriorityQueue throws IllegalArgumentException when given a size of zero
    int segmentCount = Math.max(externalView.getPartitionSet().size(), 1);
    PriorityQueue<Pair<String, Set<String>>> segmentToReplicaSetQueue = new PriorityQueue<Pair<String, Set<String>>>(
            segmentCount, new Comparator<Pair<String, Set<String>>>() {
                @Override
                public int compare(Pair<String, Set<String>> firstPair, Pair<String, Set<String>> secondPair) {
                    return Integer.compare(firstPair.getRight().size(), secondPair.getRight().size());
                }
            });
    RoutingTableInstancePruner instancePruner = new RoutingTableInstancePruner(instanceConfigList);

    for (Map.Entry<String, SortedSet<SegmentName>> entry : sortedSegmentsByKafkaPartition.entrySet()) {
        String kafkaPartition = entry.getKey();
        SortedSet<SegmentName> segmentNames = entry.getValue();

        // The only segment name which is allowed to be in CONSUMING state or null
        SegmentName validConsumingSegment = allowedSegmentInConsumingStateByKafkaPartition.get(kafkaPartition);

        for (SegmentName segmentName : segmentNames) {
            Set<String> validReplicas = new HashSet<String>();
            Map<String, String> externalViewState = externalView.getStateMap(segmentName.getSegmentName());

            for (Map.Entry<String, String> instanceAndStateEntry : externalViewState.entrySet()) {
                String instance = instanceAndStateEntry.getKey();
                String state = instanceAndStateEntry.getValue();

                // Skip pruned replicas (shutting down or otherwise disabled)
                if (instancePruner.isInactive(instance)) {
                    continue;
                }

                // Replicas in ONLINE state are always allowed
                if (state.equalsIgnoreCase(
                        CommonConstants.Helix.StateModel.RealtimeSegmentOnlineOfflineStateModel.ONLINE)) {
                    validReplicas.add(instance);
                    continue;
                }

                // Replicas in CONSUMING state are only allowed on the last segment
                if (state.equalsIgnoreCase(
                        CommonConstants.Helix.StateModel.RealtimeSegmentOnlineOfflineStateModel.CONSUMING)
                        && segmentName.equals(validConsumingSegment)) {
                    validReplicas.add(instance);
                }
            }

            segmentToReplicaSetQueue
                    .add(new ImmutablePair<String, Set<String>>(segmentName.getSegmentName(), validReplicas));

            // If this segment is the segment allowed in CONSUMING state, don't process segments after it in that Kafka
            // partition
            if (segmentName.equals(validConsumingSegment)) {
                break;
            }
        }
    }

    // 4. For each segment to be used during assignment, pick a random replica, weighted by the number of segments
    //    assigned to each replica.
    List<ServerToSegmentSetMap> routingTables = new ArrayList<ServerToSegmentSetMap>(routingTableCount);
    for (int i = 0; i < routingTableCount; ++i) {
        Map<String, Set<String>> instanceToSegmentSetMap = new HashMap<String, Set<String>>();

        PriorityQueue<Pair<String, Set<String>>> segmentToReplicaSetQueueCopy = new PriorityQueue<Pair<String, Set<String>>>(
                segmentToReplicaSetQueue);

        while (!segmentToReplicaSetQueueCopy.isEmpty()) {
            Pair<String, Set<String>> segmentAndValidReplicaSet = segmentToReplicaSetQueueCopy.poll();
            String segment = segmentAndValidReplicaSet.getKey();
            Set<String> validReplicaSet = segmentAndValidReplicaSet.getValue();

            String replica = pickWeightedRandomReplica(validReplicaSet, instanceToSegmentSetMap);
            if (replica != null) {
                Set<String> segmentsForInstance = instanceToSegmentSetMap.get(replica);

                if (segmentsForInstance == null) {
                    segmentsForInstance = new HashSet<String>();
                    instanceToSegmentSetMap.put(replica, segmentsForInstance);
                }

                segmentsForInstance.add(segment);
            }
        }

        routingTables.add(new ServerToSegmentSetMap(instanceToSegmentSetMap));
    }

    return routingTables;
}

From source file:at.illecker.hama.hybrid.examples.onlinecf.OnlineCF.java

public List<KeyValuePair<Long, Double>> getMostSimilarUsers(long user, int count) {

    Comparator<KeyValuePair<Long, Double>> similarityComparator = new Comparator<KeyValuePair<Long, Double>>() {

        @Override/* w w w .  j a  v  a 2s .co m*/
        public int compare(KeyValuePair<Long, Double> arg0, KeyValuePair<Long, Double> arg1) {
            double difference = arg0.getValue().doubleValue() - arg1.getValue().doubleValue();
            return (int) (100000 * difference);
        }
    };

    PriorityQueue<KeyValuePair<Long, Double>> queue = new PriorityQueue<KeyValuePair<Long, Double>>(count,
            similarityComparator);

    LinkedList<KeyValuePair<Long, Double>> results = new LinkedList<KeyValuePair<Long, Double>>();
    for (Long candidateUser : m_modelUserFactorizedValues.keySet()) {
        double similarity = calculateUserSimilarity(user, candidateUser);
        KeyValuePair<Long, Double> targetUser = new KeyValuePair<Long, Double>(candidateUser, similarity);
        queue.add(targetUser);
    }
    results.addAll(queue);
    return results;
}

From source file:org.springframework.cloud.stream.app.pose.estimation.processor.PoseEstimationTensorflowOutputConverter.java

/**
 *
 * The Part Affinity Field (PAF) is a 2D vector field for each limb. For each pixel in the area belonging to a
 * particular limb, a 2D vector encodes the direction that points from one part of the limb to the other.
 * Each type of limb has a corresponding affinity field joining its two associated body parts.
 *
 * @param limbType Limb Type to find limb candidates form.
 * @param fromParts/*ww w  .  j  a v a2s. c om*/
 * @param toParts
 * @param outputTensor
 * @return Returns a list of Limb candidates sorted by their total PAF score in a descending order.
 */
private PriorityQueue<Limb> findLimbCandidates(Model.LimbType limbType, List<Part> fromParts,
        List<Part> toParts, float[][][] outputTensor) {

    // Use priority queue to keeps the limb instance candidates in descending order.
    int initialSize = (fromParts.size() * toParts.size()) / 2 + 1;
    PriorityQueue<Limb> limbCandidatesQueue = new PriorityQueue<>(initialSize, (limb1, limb2) -> {
        if (limb1.getPafScore() == limb2.getPafScore())
            return 0;
        return (limb1.getPafScore() > limb2.getPafScore()) ? -1 : 1;
    });

    // For every {from -> to} pair compute a line integral over the Limb-PAF vector field toward the line
    // connecting both Parts. Computed value is used as a Limb candidate score. The higher the value the
    // higher the chance for connection between those Parts.
    for (Part fromPart : fromParts) {
        for (Part toPart : toParts) {

            float deltaX = toPart.getY() - fromPart.getY();
            float deltaY = toPart.getX() - fromPart.getX();
            float norm = (float) Math.sqrt(deltaX * deltaX + deltaY * deltaY);

            // Skip self-pointing edges (e.g. fromPartInstance == toPartInstance)
            if (norm > 1e-12) {

                float dx = deltaX / norm;
                float dy = deltaY / norm;

                int STEP_PAF = 10;
                float pafScores[] = new float[STEP_PAF];
                int stepPafScoreCount = 0;
                float totalPafScore = 0.0f;
                for (int t = 0; t < STEP_PAF; t++) {
                    int tx = (int) ((float) fromPart.getY() + (t * deltaX / STEP_PAF) + 0.5);
                    int ty = (int) ((float) fromPart.getX() + (t * deltaY / STEP_PAF) + 0.5);

                    float pafScoreX = outputTensor[tx][ty][limbType.getPafIndexX()];
                    float pafScoreY = outputTensor[tx][ty][limbType.getPafIndexY()];

                    pafScores[t] = (dy * pafScoreX) + (dx * pafScoreY);

                    totalPafScore += pafScores[t];

                    // Filter out the step PAF scores below a given, pre-defined stepPafScoreThreshold
                    if (pafScores[t] > poseProperties.getStepPafScoreThreshold()) {
                        stepPafScoreCount++;
                    }
                }

                if (totalPafScore > poseProperties.getTotalPafScoreThreshold()
                        && stepPafScoreCount >= poseProperties.getPafCountThreshold()) {
                    limbCandidatesQueue.add(new Limb(limbType, totalPafScore, fromPart, toPart));
                }
            }
        }
    }

    return limbCandidatesQueue;
}

From source file:com.cloudera.oryx.ml.serving.als.model.ALSServingModel.java

public List<Pair<String, Double>> topN(final DoubleFunction<float[]> scoreFn, final int howMany,
        final Predicate<String> allowedPredicate) {

    List<Callable<Iterable<Pair<String, Double>>>> tasks = new ArrayList<>(Y.length);
    for (int partition = 0; partition < Y.length; partition++) {
        final int thePartition = partition;
        tasks.add(new LoggingCallable<Iterable<Pair<String, Double>>>() {
            @Override// w  w w .  j a va  2  s  . c o m
            public Iterable<Pair<String, Double>> doCall() {
                Queue<Pair<String, Double>> topN = new PriorityQueue<>(howMany + 1,
                        PairComparators.<Double>bySecond());
                TopNConsumer topNProc = new TopNConsumer(topN, howMany, scoreFn, allowedPredicate);

                Lock lock = yLocks[thePartition].readLock();
                lock.lock();
                try {
                    Y[thePartition].forEach(topNProc);
                } finally {
                    lock.unlock();
                }
                // Ordering and excess items don't matter; will be merged and finally sorted later
                return topN;
            }
        });
    }

    List<Iterable<Pair<String, Double>>> iterables = new ArrayList<>();
    if (Y.length >= 2) {
        try {
            for (Future<Iterable<Pair<String, Double>>> future : executor.invokeAll(tasks)) {
                iterables.add(future.get());
            }
        } catch (InterruptedException e) {
            throw new IllegalStateException(e);
        } catch (ExecutionException e) {
            throw new IllegalStateException(e.getCause());
        }
    } else {
        try {
            iterables.add(tasks.get(0).call());
        } catch (Exception e) {
            throw new IllegalStateException(e);
        }
    }

    return Ordering.from(PairComparators.<Double>bySecond()).greatestOf(Iterables.concat(iterables), howMany);
}

From source file:pt.webdetails.cda.cache.CacheScheduleManager.java

private void initQueue() throws PluginHibernateException {
    Session s = getHibernateSession();/*from w  w w .ja va 2s.  co m*/
    s.beginTransaction();

    List l = s.createQuery("from CachedQuery").list();
    this.queue = new PriorityQueue<CachedQuery>(20, new SortByTimeDue());
    for (Object o : l) {
        CachedQuery cq = (CachedQuery) o;
        if (cq.getLastExecuted() == null) {
            cq.setLastExecuted(new Date(0L));
        }
        Date nextExecution;
        try {
            nextExecution = new CronExpression(cq.getCronString()).getNextValidTimeAfter(new Date());
        } catch (ParseException ex) {
            nextExecution = new Date(0);
            logger.error("Failed to schedule " + cq.toString());
        }
        cq.setNextExecution(nextExecution);
        this.queue.add(cq);

        s.save(cq);
    }

    s.flush();
    s.getTransaction().commit();
    s.close();
}

From source file:com.microsoft.azure.vmagent.AzureVMAgentCleanUpTask.java

public void cleanLeakedResources(final String resourceGroup, final ServicePrincipal servicePrincipal,
        final String cloudName, final DeploymentRegistrar deploymentRegistrar) {
    try {//from   w ww. ja  v  a2 s.  c o  m
        final List<String> validVMs = getValidVMs(cloudName);
        final Azure azureClient = TokenCache.getInstance(servicePrincipal).getAzureClient();
        //can't use listByTag because for some reason that method strips all the tags from the outputted resources (https://github.com/Azure/azure-sdk-for-java/issues/1436)
        final PagedList<GenericResource> resources = azureClient.genericResources().listByGroup(resourceGroup);

        if (resources == null || resources.isEmpty()) {
            return;
        }

        final PriorityQueue<GenericResource> resourcesMarkedForDeletion = new PriorityQueue<>(resources.size(),
                new Comparator<GenericResource>() {
                    @Override
                    public int compare(GenericResource o1, GenericResource o2) {
                        int o1Priority = getPriority(o1);
                        int o2Priority = getPriority(o2);
                        if (o1Priority == o2Priority) {
                            return 0;
                        }
                        return (o1Priority < o2Priority) ? -1 : 1;
                    }

                    private int getPriority(final GenericResource resource) {
                        final String type = resource.type();
                        if (StringUtils.containsIgnoreCase(type, "virtualMachine")) {
                            return 1;
                        }
                        if (StringUtils.containsIgnoreCase(type, "networkInterface")) {
                            return 2;
                        }
                        if (StringUtils.containsIgnoreCase(type, "IPAddress")) {
                            return 3;
                        }
                        return 4;
                    }
                });

        for (GenericResource resource : resources) {
            final Map<String, String> tags = resource.tags();
            if (!tags.containsKey(Constants.AZURE_RESOURCES_TAG_NAME) || !deploymentRegistrar.getDeploymentTag()
                    .matches(new AzureUtil.DeploymentTag(tags.get(Constants.AZURE_RESOURCES_TAG_NAME)))) {
                continue;
            }
            boolean shouldSkipDeletion = false;
            for (String validVM : validVMs) {
                if (resource.name().contains(validVM)) {
                    shouldSkipDeletion = true;
                    break;
                }
            }
            // we're not removing storage accounts of networks - someone else might be using them
            if (shouldSkipDeletion || StringUtils.containsIgnoreCase(resource.type(), "StorageAccounts")
                    || StringUtils.containsIgnoreCase(resource.type(), "virtualNetworks")) {
                continue;
            }
            resourcesMarkedForDeletion.add(resource);
        }

        while (!resourcesMarkedForDeletion.isEmpty()) {
            try {
                final GenericResource resource = resourcesMarkedForDeletion.poll();
                if (resource == null)
                    continue;

                URI osDiskURI = null;
                if (StringUtils.containsIgnoreCase(resource.type(), "virtualMachine")) {
                    osDiskURI = new URI(
                            azureClient.virtualMachines().getById(resource.id()).osUnmanagedDiskVhdUri());
                }

                LOGGER.log(Level.INFO, "cleanLeakedResources: deleting {0} from resource group {1}",
                        new Object[] { resource.name(), resourceGroup });
                azureClient.genericResources().deleteById(resource.id());
                if (osDiskURI != null) {
                    AzureVMManagementServiceDelegate.removeStorageBlob(azureClient, osDiskURI, resourceGroup);
                }
            } catch (Exception e) {
                LOGGER.log(Level.INFO,
                        "AzureVMAgentCleanUpTask: cleanLeakedResources: failed to clean resource ", e);
            }
        }
    } catch (Exception e) {
        // No need to throw exception back, just log and move on. 
        LOGGER.log(Level.INFO,
                "AzureVMAgentCleanUpTask: cleanLeakedResources: failed to clean leaked resources ", e);
    }
}