Example usage for java.util PriorityQueue PriorityQueue

List of usage examples for java.util PriorityQueue PriorityQueue

Introduction

In this page you can find the example usage for java.util PriorityQueue PriorityQueue.

Prototype

public PriorityQueue(int initialCapacity, Comparator<? super E> comparator) 

Source Link

Document

Creates a PriorityQueue with the specified initial capacity that orders its elements according to the specified comparator.

Usage

From source file:beast.evolution.tree.ConstrainedClusterTree.java

/**
 * Perform clustering using a link method
 * This implementation uses a priority queue resulting in a O(n^2 log(n)) algorithm
 *
 * @param nClusters    number of clusters
 * @param nClusterID//from ww w.j a  va2s. co  m
 * @param clusterNodes
 */
void doLinkClustering(int nClusters, final List<Integer>[] nClusterID, final NodeX[] clusterNodes) {
    Log.warning.print("Calculating distance");
    final int nInstances = taxaNames.size();
    final PriorityQueue<Tuple> queue = new PriorityQueue<Tuple>(nClusters * nClusters / 2,
            new TupleComparator());
    final double[][] fDistance0 = new double[nClusters][nClusters];
    for (int i = 0; i < nClusters; i++) {
        fDistance0[i][i] = 0;
        for (int j = i + 1; j < nClusters; j++) {
            fDistance0[i][j] = getDistance0(nClusterID[i], nClusterID[j]);
            fDistance0[j][i] = fDistance0[i][j];
            if (isCompatible(i, j, nClusterID)) {
                queue.add(new Tuple(fDistance0[i][j], i, j, 1, 1));
            }
        }
        // feedback on progress
        if ((i + 1) % 100 == 0) {
            if ((i + 1) % 1000 == 0) {
                Log.warning.print('|');
            } else {
                Log.warning.print('.');
            }
        }
    }
    Log.warning.print("\nClustering: ");
    while (nClusters > 1) {
        int iMin1 = -1;
        int iMin2 = -1;
        // use priority queue to find next best pair to cluster
        Tuple t;
        do {
            t = queue.poll();
        } while (t != null && (nClusterID[t.m_iCluster1].size() != t.m_nClusterSize1
                || nClusterID[t.m_iCluster2].size() != t.m_nClusterSize2));
        iMin1 = t.m_iCluster1;
        iMin2 = t.m_iCluster2;
        merge(iMin1, iMin2, t.m_fDist / 2.0, t.m_fDist / 2.0, nClusterID, clusterNodes);
        updateConstraints(nClusterID[iMin1]);
        // merge  clusters

        // update distances & queue
        for (int i = 0; i < nInstances; i++) {
            if (i != iMin1 && nClusterID[i].size() != 0) {
                final int i1 = Math.min(iMin1, i);
                final int i2 = Math.max(iMin1, i);
                if (isCompatible(i1, i2, nClusterID)) {
                    final double fDistance = getDistance(fDistance0, nClusterID[i1], nClusterID[i2]);
                    queue.add(new Tuple(fDistance, i1, i2, nClusterID[i1].size(), nClusterID[i2].size()));
                }
            }
        }

        nClusters--;

        // feedback on progress
        if (nClusters % 100 == 0) {
            if (nClusters % 1000 == 0) {
                Log.warning.print('|');
            } else {
                Log.warning.print('.');
            }
        }
    }
    Log.warning.println(" done.");
}

From source file:org.apache.flink.streaming.runtime.operators.windowing.WindowOperator.java

private void restoreFromLegacyAlignedWindowOperator(DataInputViewStreamWrapper in) throws IOException {
    Preconditions.checkArgument(legacyWindowOperatorType != LegacyWindowOperatorType.NONE);

    final long nextEvaluationTime = in.readLong();
    final long nextSlideTime = in.readLong();

    validateMagicNumber(BEGIN_OF_STATE_MAGIC_NUMBER, in.readInt());

    restoredFromLegacyAlignedOpRecords = new PriorityQueue<>(42, new Comparator<StreamRecord<IN>>() {
        @Override/*from   w  ww  .  j  ava  2 s .c  o m*/
        public int compare(StreamRecord<IN> o1, StreamRecord<IN> o2) {
            return Long.compare(o1.getTimestamp(), o2.getTimestamp());
        }
    });

    switch (legacyWindowOperatorType) {
    case FAST_ACCUMULATING:
        restoreElementsFromLegacyAccumulatingAlignedWindowOperator(in, nextSlideTime);
        break;
    case FAST_AGGREGATING:
        restoreElementsFromLegacyAggregatingAlignedWindowOperator(in, nextSlideTime);
        break;
    }

    if (LOG.isDebugEnabled()) {
        LOG.debug("{} (taskIdx={}) restored {} events from legacy {}.", getClass().getSimpleName(),
                getRuntimeContext().getIndexOfThisSubtask(), restoredFromLegacyAlignedOpRecords.size(),
                legacyWindowOperatorType);
    }
}

From source file:org.jiemamy.utils.collection.CollectionsUtil.java

/**
 * {@link PriorityQueue}?????/*  w w  w . j a  v a2  s. c  o  m*/
 * 
 * @param <E> {@link PriorityQueue}??
 * @param initialCapacity ?????
 * @param comparator ???????
 * @return {@link PriorityQueue}???
 * @throws IllegalArgumentException if <tt>initialCapacity</tt> is less than 1
 * @see PriorityQueue#PriorityQueue(int, Comparator)
 */
public static <E> PriorityQueue<E> newPriorityQueue(int initialCapacity, Comparator<? super E> comparator) {
    return new PriorityQueue<E>(initialCapacity, comparator);
}

From source file:org.rascalmpl.library.Prelude.java

public IValue shortestPathPair(ISet G, IValue From, IValue To) {
    buildAdjacencyListAndDistance(G);/* w  w  w  .j av  a 2s  .  c om*/
    distance.put(From, new Distance(0));

    pred = new HashMap<IValue, IValue>();
    settled = new HashSet<IValue>();
    Q = new PriorityQueue<IValue>(G.size(), new NodeComparator(distance));
    Q.add(From);

    while (!Q.isEmpty()) {
        IValue u = Q.remove();
        if (u.isEqual(To))
            return extractPath(From, u);
        settled.add(u);
        relaxNeighbours(u);
    }
    return values.list();
}

From source file:ml.shifu.shifu.core.dtrain.dt.DTMaster.java

@Override
public void init(MasterContext<DTMasterParams, DTWorkerParams> context) {
    Properties props = context.getProps();

    // init model config and column config list at first
    SourceType sourceType;/*from  ww w . j ava 2  s  . co m*/
    try {
        sourceType = SourceType
                .valueOf(props.getProperty(CommonConstants.MODELSET_SOURCE_TYPE, SourceType.HDFS.toString()));
        this.modelConfig = CommonUtils.loadModelConfig(props.getProperty(CommonConstants.SHIFU_MODEL_CONFIG),
                sourceType);
        this.columnConfigList = CommonUtils
                .loadColumnConfigList(props.getProperty(CommonConstants.SHIFU_COLUMN_CONFIG), sourceType);
    } catch (IOException e) {
        throw new RuntimeException(e);
    }

    // worker number is used to estimate nodes per iteration for stats
    this.workerNumber = NumberFormatUtils.getInt(props.getProperty(GuaguaConstants.GUAGUA_WORKER_NUMBER), true);

    // check if variables are set final selected
    int[] inputOutputIndex = DTrainUtils.getNumericAndCategoricalInputAndOutputCounts(this.columnConfigList);
    this.inputNum = inputOutputIndex[0] + inputOutputIndex[1];
    this.isAfterVarSelect = (inputOutputIndex[3] == 1);
    // cache all feature list for sampling features
    this.allFeatures = this.getAllFeatureList(columnConfigList, isAfterVarSelect);

    int trainerId = Integer.valueOf(context.getProps().getProperty(CommonConstants.SHIFU_TRAINER_ID, "0"));
    // If grid search, select valid paramters, if not parameters is what in ModelConfig.json
    GridSearch gs = new GridSearch(modelConfig.getTrain().getParams(),
            modelConfig.getTrain().getGridConfigFileContent());
    Map<String, Object> validParams = this.modelConfig.getTrain().getParams();
    if (gs.hasHyperParam()) {
        validParams = gs.getParams(trainerId);
        LOG.info("Start grid search master with params: {}", validParams);
    }

    Object vtObj = validParams.get("ValidationTolerance");
    if (vtObj != null) {
        try {
            validationTolerance = Double.parseDouble(vtObj.toString());
            LOG.warn("Validation by tolerance is enabled with value {}.", validationTolerance);
        } catch (NumberFormatException ee) {
            validationTolerance = 0d;
            LOG.warn(
                    "Validation by tolerance isn't enabled because of non numerical value of ValidationTolerance: {}.",
                    vtObj);
        }
    } else {
        LOG.warn("Validation by tolerance isn't enabled.");
    }

    // tree related parameters initialization
    Object fssObj = validParams.get("FeatureSubsetStrategy");
    if (fssObj != null) {
        try {
            this.featureSubsetRate = Double.parseDouble(fssObj.toString());
            // no need validate featureSubsetRate is in (0,1], as already validated in ModelInspector
            this.featureSubsetStrategy = null;
        } catch (NumberFormatException ee) {
            this.featureSubsetStrategy = FeatureSubsetStrategy.of(fssObj.toString());
        }
    } else {
        LOG.warn("FeatureSubsetStrategy is not set, set to TWOTHRIDS by default in DTMaster.");
        this.featureSubsetStrategy = FeatureSubsetStrategy.TWOTHIRDS;
        this.featureSubsetRate = 0;
    }

    // max depth
    Object maxDepthObj = validParams.get("MaxDepth");
    if (maxDepthObj != null) {
        this.maxDepth = Integer.valueOf(maxDepthObj.toString());
    } else {
        this.maxDepth = 10;
    }

    // max leaves which is used for leaf-wised tree building, TODO add more benchmarks
    Object maxLeavesObj = validParams.get("MaxLeaves");
    if (maxLeavesObj != null) {
        this.maxLeaves = Integer.valueOf(maxLeavesObj.toString());
    } else {
        this.maxLeaves = -1;
    }

    // enable leaf wise tree building once maxLeaves is configured
    if (this.maxLeaves > 0) {
        this.isLeafWise = true;
    }

    // maxBatchSplitSize means each time split # of batch nodes
    Object maxBatchSplitSizeObj = validParams.get("MaxBatchSplitSize");
    if (maxBatchSplitSizeObj != null) {
        this.maxBatchSplitSize = Integer.valueOf(maxBatchSplitSizeObj.toString());
    } else {
        // by default split 32 at most in a batch
        this.maxBatchSplitSize = 32;
    }

    assert this.maxDepth > 0 && this.maxDepth <= 20;

    // hide in parameters, this to avoid OOM issue for each iteration
    Object maxStatsMemoryMB = validParams.get("MaxStatsMemoryMB");
    if (maxStatsMemoryMB != null) {
        this.maxStatsMemory = Long.valueOf(validParams.get("MaxStatsMemoryMB").toString()) * 1024 * 1024;
        if (this.maxStatsMemory > ((2L * Runtime.getRuntime().maxMemory()) / 3)) {
            // if >= 2/3 max memory, take 2/3 max memory to avoid OOM
            this.maxStatsMemory = ((2L * Runtime.getRuntime().maxMemory()) / 3);
        }
    } else {
        // by default it is 1/2 of heap, about 1.5G setting in current Shifu
        this.maxStatsMemory = Runtime.getRuntime().maxMemory() / 2L;
    }

    // assert this.maxStatsMemory <= Math.min(Runtime.getRuntime().maxMemory() * 0.6, 800 * 1024 * 1024L);
    this.treeNum = Integer.valueOf(validParams.get("TreeNum").toString());
    this.isRF = ALGORITHM.RF.toString().equalsIgnoreCase(modelConfig.getAlgorithm());
    this.isGBDT = ALGORITHM.GBT.toString().equalsIgnoreCase(modelConfig.getAlgorithm());
    if (this.isGBDT) {
        // learning rate only effective in gbdt
        this.learningRate = Double.valueOf(validParams.get(CommonConstants.LEARNING_RATE).toString());
    }

    // initialize impurity type according to regression or classfication
    String imStr = validParams.get("Impurity").toString();
    int numClasses = 2;
    if (this.modelConfig.isClassification()) {
        numClasses = this.modelConfig.getTags().size();
    }
    // these two parameters is to stop tree growth parameters
    int minInstancesPerNode = Integer.valueOf(validParams.get("MinInstancesPerNode").toString());
    double minInfoGain = Double.valueOf(validParams.get("MinInfoGain").toString());
    if (imStr.equalsIgnoreCase("entropy")) {
        impurity = new Entropy(numClasses, minInstancesPerNode, minInfoGain);
    } else if (imStr.equalsIgnoreCase("gini")) {
        impurity = new Gini(numClasses, minInstancesPerNode, minInfoGain);
    } else {
        impurity = new Variance(minInstancesPerNode, minInfoGain);
    }

    // checkpoint folder and interval (every # iterations to do checkpoint)
    this.checkpointInterval = NumberFormatUtils
            .getInt(context.getProps().getProperty(CommonConstants.SHIFU_DT_MASTER_CHECKPOINT_INTERVAL, "20"));
    this.checkpointOutput = new Path(context.getProps()
            .getProperty(CommonConstants.SHIFU_DT_MASTER_CHECKPOINT_FOLDER, "tmp/cp_" + context.getAppId()));

    // cache conf to avoid new
    this.conf = new Configuration();

    // if continuous model training is enabled
    this.isContinuousEnabled = Boolean.TRUE.toString()
            .equalsIgnoreCase(context.getProps().getProperty(CommonConstants.CONTINUOUS_TRAINING));

    this.dtEarlyStopDecider = new DTEarlyStopDecider(this.maxDepth);
    if (validParams.containsKey("EnableEarlyStop")
            && Boolean.valueOf(validParams.get("EnableEarlyStop").toString().toLowerCase())) {
        this.enableEarlyStop = true;
    }

    LOG.info(
            "Master init params: isAfterVarSel={}, featureSubsetStrategy={}, featureSubsetRate={} maxDepth={}, maxStatsMemory={}, "
                    + "treeNum={}, impurity={}, workerNumber={}, minInstancesPerNode={}, minInfoGain={}, isRF={}, "
                    + "isGBDT={}, isContinuousEnabled={}, enableEarlyStop={}.",
            isAfterVarSelect, featureSubsetStrategy, this.featureSubsetRate, maxDepth, maxStatsMemory, treeNum,
            imStr, this.workerNumber, minInstancesPerNode, minInfoGain, this.isRF, this.isGBDT,
            this.isContinuousEnabled, this.enableEarlyStop);

    this.toDoQueue = new LinkedList<TreeNode>();

    if (this.isLeafWise) {
        this.toSplitQueue = new PriorityQueue<TreeNode>(64, new Comparator<TreeNode>() {
            @Override
            public int compare(TreeNode o1, TreeNode o2) {
                return Double.compare(o2.getNode().getWgtCntRatio() * o2.getNode().getGain(),
                        o1.getNode().getWgtCntRatio() * o1.getNode().getGain());
            }
        });
    }
    // initialize trees
    if (context.isFirstIteration()) {
        if (this.isRF) {
            // for random forest, trees are trained in parallel
            this.trees = new CopyOnWriteArrayList<TreeNode>();
            for (int i = 0; i < treeNum; i++) {
                this.trees.add(new TreeNode(i, new Node(Node.ROOT_INDEX), 1d));
            }
        }
        if (this.isGBDT) {
            if (isContinuousEnabled) {
                TreeModel existingModel;
                try {
                    Path modelPath = new Path(context.getProps().getProperty(CommonConstants.GUAGUA_OUTPUT));
                    existingModel = (TreeModel) ModelSpecLoaderUtils.loadModel(modelConfig, modelPath,
                            ShifuFileUtils
                                    .getFileSystemBySourceType(this.modelConfig.getDataSet().getSource()));
                    if (existingModel == null) {
                        // null means no existing model file or model file is in wrong format
                        this.trees = new CopyOnWriteArrayList<TreeNode>();
                        this.trees.add(new TreeNode(0, new Node(Node.ROOT_INDEX), 1d));// learning rate is 1 for 1st
                        LOG.info("Starting to train model from scratch and existing model is empty.");
                    } else {
                        this.trees = existingModel.getTrees();
                        this.existingTreeSize = this.trees.size();
                        // starting from existing models, first tree learning rate is current learning rate
                        this.trees.add(new TreeNode(this.existingTreeSize, new Node(Node.ROOT_INDEX),
                                this.existingTreeSize == 0 ? 1d : this.learningRate));
                        LOG.info("Starting to train model from existing model {} with existing trees {}.",
                                modelPath, existingTreeSize);
                    }
                } catch (IOException e) {
                    throw new GuaguaRuntimeException(e);
                }
            } else {
                this.trees = new CopyOnWriteArrayList<TreeNode>();
                // for GBDT, initialize the first tree. trees are trained sequentially,first tree learning rate is 1
                this.trees.add(new TreeNode(0, new Node(Node.ROOT_INDEX), 1.0d));
            }
        }
    } else {
        // recover all states once master is fail over
        LOG.info("Recover master status from checkpoint file {}", this.checkpointOutput);
        recoverMasterStatus(sourceType);
    }
}

From source file:com.uber.stream.kafka.mirrormaker.manager.core.ControllerHelixManager.java

public void scaleCurrentCluster() throws Exception {
    int oldTotalNumWorker = 0;
    int newTotalNumWorker = 0;
    Map<String, Integer> _routeWorkerOverrides = getRouteWorkerOverride();
    for (String pipeline : _pipelineToInstanceMap.keySet()) {
        LOGGER.info("Start rescale pipeline: {}", pipeline);
        PriorityQueue<InstanceTopicPartitionHolder> newItphQueue = new PriorityQueue<>(1,
                InstanceTopicPartitionHolder.totalWorkloadComparator(_pipelineWorkloadMap));
        // TODO: what if routeId is not continuous
        int nextRouteId = _pipelineToInstanceMap.get(pipeline).size();
        for (InstanceTopicPartitionHolder itph : _pipelineToInstanceMap.get(pipeline)) {
            if (itph.getTotalNumPartitions() > _maxNumPartitionsPerRoute) {
                LOGGER.info(//from w ww.  j a va  2s.c o m
                        "Checking route {} with controller {} and topics {} since it exceeds maxNumPartitionsPerRoute {}",
                        itph.getRouteString(), itph.getInstanceName(), itph.getServingTopicPartitionSet(),
                        _maxNumPartitionsPerRoute);
                while (itph.getTotalNumPartitions() > _maxNumPartitionsPerRoute) {
                    // Only one topic left, do nothing
                    if (itph.getNumServingTopicPartitions() == 1) {
                        LOGGER.info("Only one topic {} in route {}, do nothing",
                                itph.getServingTopicPartitionSet().iterator().next(), itph.getRouteString());
                        break;
                    }

                    // Get the topic with largest number of partitions
                    TopicPartition tpToMove = new TopicPartition("tmp", -1);
                    for (TopicPartition tp : itph.getServingTopicPartitionSet()) {
                        if (tp.getPartition() > tpToMove.getPartition()) {
                            tpToMove = tp;
                        }
                    }

                    // If existing lightest route cannot fit the largest topic to move
                    if (newItphQueue.isEmpty() || newItphQueue.peek().getTotalNumPartitions()
                            + tpToMove.getPartition() > _initMaxNumPartitionsPerRoute) {
                        try {
                            InstanceTopicPartitionHolder newHolder = createNewRoute(pipeline, nextRouteId);

                            _helixAdmin.setResourceIdealState(_helixClusterName, tpToMove.getTopic(),
                                    IdealStateBuilder.resetCustomIdealStateFor(
                                            _helixAdmin.getResourceIdealState(_helixClusterName,
                                                    tpToMove.getTopic()),
                                            tpToMove.getTopic(), itph.getRouteString(),
                                            newHolder.getRouteString(), newHolder.getInstanceName()));

                            itph.removeTopicPartition(tpToMove);
                            newHolder.addTopicPartition(tpToMove);
                            newItphQueue.add(newHolder);
                            nextRouteId++;

                        } catch (Exception e) {
                            LOGGER.error("Got exception when create a new route when rebalancing, abandon!", e);
                            throw new Exception(
                                    "Got exception when create a new route when rebalancing, abandon!", e);
                        }
                    } else {
                        InstanceTopicPartitionHolder newHolder = newItphQueue.poll();

                        _helixAdmin.setResourceIdealState(_helixClusterName, tpToMove.getTopic(),
                                IdealStateBuilder.resetCustomIdealStateFor(
                                        _helixAdmin.getResourceIdealState(_helixClusterName,
                                                tpToMove.getTopic()),
                                        tpToMove.getTopic(), itph.getRouteString(), newHolder.getRouteString(),
                                        newHolder.getInstanceName()));
                        itph.removeTopicPartition(tpToMove);
                        newHolder.addTopicPartition(tpToMove);
                        newItphQueue.add(newHolder);
                    }
                }
            }
            newItphQueue.add(itph);
        }

        // After moving topics, scale workers based on workload
        int rescaleFailedCount = 0;
        for (InstanceTopicPartitionHolder itph : newItphQueue) {
            oldTotalNumWorker += itph.getWorkerSet().size();
            String routeString = itph.getRouteString();
            int initWorkerCount = _initMaxNumWorkersPerRoute;
            if (_routeWorkerOverrides.containsKey(routeString)
                    && _routeWorkerOverrides.get(routeString) > initWorkerCount) {
                initWorkerCount = _routeWorkerOverrides.get(routeString);
            }

            String hostname = getHostname(itph.getInstanceName());
            try {
                String result = HttpClientUtils.getData(_httpClient, _requestConfig, hostname, _controllerPort,
                        "/admin/workloadinfo");
                ControllerWorkloadInfo workloadInfo = JSONObject.parseObject(result,
                        ControllerWorkloadInfo.class);
                TopicWorkload totalWorkload = workloadInfo.getTopicWorkload();

                if (workloadInfo != null && workloadInfo.getNumOfExpectedWorkers() != 0) {
                    _pipelineWorkloadMap.put(itph.getRouteString(), totalWorkload);
                    int expectedNumWorkers = workloadInfo.getNumOfExpectedWorkers();
                    LOGGER.info("Current {} workers in route {}, expect {} workers", itph.getWorkerSet().size(),
                            itph.getRouteString(), expectedNumWorkers);
                    int actualExpectedNumWorkers = getActualExpectedNumWorkers(expectedNumWorkers,
                            initWorkerCount);
                    LOGGER.info("Current {} workers in route {}, actual expect {} workers",
                            itph.getWorkerSet().size(), itph.getRouteString(), actualExpectedNumWorkers);

                    if (actualExpectedNumWorkers > itph.getWorkerSet().size()) {
                        LOGGER.info("Current {} workers in route {}, actual expect {} workers, add {} workers",
                                itph.getWorkerSet().size(), itph.getRouteString(), actualExpectedNumWorkers,
                                actualExpectedNumWorkers - itph.getWorkerSet().size());
                        // TODO: handle exception
                        _workerHelixManager.addWorkersToMirrorMaker(itph, itph.getRoute().getTopic(),
                                itph.getRoute().getPartition(),
                                actualExpectedNumWorkers - itph.getWorkerSet().size());
                    }

                    if (actualExpectedNumWorkers < itph.getWorkerSet().size()) {
                        LOGGER.info(
                                "Current {} workers in route {}, actual expect {} workers, remove {} workers",
                                itph.getWorkerSet().size(), itph.getRouteString(), actualExpectedNumWorkers,
                                itph.getWorkerSet().size() - actualExpectedNumWorkers);
                        // TODO: handle exception
                        _workerHelixManager.removeWorkersToMirrorMaker(itph, itph.getRoute().getTopic(),
                                itph.getRoute().getPartition(),
                                itph.getWorkerSet().size() - actualExpectedNumWorkers);
                    }
                    newTotalNumWorker += actualExpectedNumWorkers;
                } else {
                    LOGGER.warn("Get workload on {} for route: {} returns 0. No change on number of workers",
                            hostname, itph.getRouteString());
                    newTotalNumWorker += itph.getWorkerSet().size();
                    rescaleFailedCount++;
                }
            } catch (Exception e) {
                rescaleFailedCount++;
                LOGGER.error(String.format(
                        "Get workload error when connecting to %s for route %s. No change on number of workers",
                        hostname, itph.getRouteString()), e);
                newTotalNumWorker += itph.getWorkerSet().size();
                rescaleFailedCount++;
            }
        }
        _pipelineToInstanceMap.put(pipeline, newItphQueue);
        _rescaleFailedCount.inc(rescaleFailedCount - _rescaleFailedCount.getCount());
    }
    LOGGER.info("oldTotalNumWorker: {}, newTotalNumWorker: {}", oldTotalNumWorker, newTotalNumWorker);
}

From source file:org.apache.tez.dag.app.rm.DagAwareYarnTaskScheduler.java

@GuardedBy("this")
@Nullable//from   www . j a va  2  s .  co m
private Collection<ContainerId> maybePreempt(Resource freeResources) {
    if (preemptionPercentage == 0
            || numHeartbeats - lastPreemptionHeartbeat < numHeartbeatsBetweenPreemptions) {
        return null;
    }
    if (!requestTracker.isPreemptionDeadlineExpired()
            && requestTracker.fitsHighestPriorityRequest(freeResources)) {
        if (numHeartbeats % 50 == 1) {
            LOG.info("Highest priority request fits in free resources {}", freeResources);
        }
        return null;
    }

    int numIdleContainers = idleTracker.getNumContainers();
    if (numIdleContainers > 0) {
        if (numHeartbeats % 50 == 1) {
            LOG.info("Avoiding preemption since there are {} idle containers", numIdleContainers);
        }
        return null;
    }

    BitSet blocked = requestTracker.createVertexBlockedSet();
    if (!blocked.intersects(assignedVertices)) {
        if (numHeartbeats % 50 == 1) {
            LOG.info(
                    "Avoiding preemption since there are no descendants of the highest priority requests running");
        }
        return null;
    }

    Resource preemptLeft = requestTracker.getAmountToPreempt(preemptionPercentage);
    if (!resourceCalculator.anyAvailable(preemptLeft)) {
        if (numHeartbeats % 50 == 1) {
            LOG.info("Avoiding preemption since amount to preempt is {}", preemptLeft);
        }
        return null;
    }

    PriorityQueue<HeldContainer> candidates = new PriorityQueue<>(11, PREEMPT_ORDER_COMPARATOR);
    blocked.and(assignedVertices);
    for (int i = blocked.nextSetBit(0); i >= 0; i = blocked.nextSetBit(i + 1)) {
        Collection<HeldContainer> containers = vertexAssignments.get(i);
        if (containers != null) {
            candidates.addAll(containers);
        } else {
            LOG.error("Vertex {} in assignedVertices but no assignments?", i);
        }
    }

    ArrayList<ContainerId> preemptedContainers = new ArrayList<>();
    HeldContainer hc;
    while ((hc = candidates.poll()) != null) {
        LOG.info("Preempting container {} currently allocated to task {}", hc.getId(), hc.getAssignedTask());
        preemptedContainers.add(hc.getId());
        resourceCalculator.deductFrom(preemptLeft, hc.getCapability());
        if (!resourceCalculator.anyAvailable(preemptLeft)) {
            break;
        }
    }

    return preemptedContainers;
}

From source file:org.apache.accumulo.examples.wikisearch.iterator.BooleanLogicIterator.java

private void splitLeaves(BooleanLogicTreeNode node) {
    if (log.isDebugEnabled()) {
        log.debug("BoolLogic: splitLeaves()");
    }// www  .  j av  a2  s  . co m
    positives = new PriorityQueue<BooleanLogicTreeNode>(10, new BooleanLogicTreeNodeComparator());
    // positives = new ArrayList<BooleanLogicTreeNodeJexl>();
    negatives.clear();

    Enumeration<?> dfe = node.depthFirstEnumeration();
    while (dfe.hasMoreElements()) {
        BooleanLogicTreeNode elem = (BooleanLogicTreeNode) dfe.nextElement();

        if (elem.isLeaf()) {
            if (elem.isNegated()) {
                negatives.add(elem);
            } else {
                positives.add(elem);
            }
        }
    }
}

From source file:org.calrissian.accumulorecipes.commons.iterators.BooleanLogicIterator.java

/**
 * Extracts positive and negative leaves
 * @param node//from  w  w  w. j ava2s .  c  om
 */
private void splitLeaves(BooleanLogicTreeNode node) {
    if (log.isDebugEnabled()) {
        log.debug("BoolLogic: splitLeaves()");
    }
    positives = new PriorityQueue<BooleanLogicTreeNode>(10, new BooleanLogicTreeNodeComparator());
    // positives = new ArrayList<BooleanLogicTreeNodeJexl>();
    negatives.clear();

    Enumeration<?> dfe = node.depthFirstEnumeration();
    while (dfe.hasMoreElements()) {
        BooleanLogicTreeNode elem = (BooleanLogicTreeNode) dfe.nextElement();

        if (elem.isLeaf()) {
            if (elem.isNegated()) {
                negatives.add(elem);
            } else {
                positives.add(elem);
            }
        }
    }
}

From source file:com.uber.stream.kafka.mirrormaker.manager.core.ControllerHelixManager.java

public InstanceTopicPartitionHolder createNewRoute(String pipeline, int routeId) throws Exception {
    if (_availableControllerList.isEmpty()) {
        LOGGER.info("No available controller!");
        throw new Exception("No available controller!");
    }//from   www . j a  va 2 s.  c  om

    if (_workerHelixManager.getAvailableWorkerList().isEmpty()) {
        LOGGER.info("No available worker!");
        throw new Exception("No available worker!");
    }

    String instanceName = _availableControllerList.get(0);
    InstanceTopicPartitionHolder instance = new InstanceTopicPartitionHolder(instanceName,
            new TopicPartition(pipeline, routeId));
    if (!isPipelineExisted(pipeline)) {
        setEmptyResourceConfig(pipeline);
        _helixAdmin.addResource(_helixClusterName, pipeline,
                IdealStateBuilder.buildCustomIdealStateFor(pipeline, String.valueOf(routeId), instance));
    } else {
        LOGGER.info("Expanding pipeline {} new partition {} to instance {}", pipeline, routeId, instance);
        _helixAdmin.setResourceIdealState(_helixClusterName, pipeline,
                IdealStateBuilder.expandCustomIdealStateFor(
                        _helixAdmin.getResourceIdealState(_helixClusterName, pipeline), pipeline,
                        String.valueOf(routeId), instance));
        LOGGER.info("New IdealState: {}", _helixAdmin.getResourceIdealState(_helixClusterName, pipeline));
    }

    String[] srcDst = pipeline.split(SEPARATOR);
    String controllerWokerHelixClusterName = "controller-worker-" + srcDst[1] + "-" + srcDst[2] + "-" + routeId;
    HelixManager spectator = HelixManagerFactory.getZKHelixManager(controllerWokerHelixClusterName, _instanceId,
            InstanceType.SPECTATOR, _helixZkURL);

    long ts1 = System.currentTimeMillis();
    while (true) {
        try {
            spectator.connect();
            break;
        } catch (Exception e) {
            // Do nothing
        }

        if (System.currentTimeMillis() - ts1 > 60000) {
            throw new Exception(String.format("Controller %s failed to set up new route cluster %s!",
                    instanceName, controllerWokerHelixClusterName));
        }
        Thread.sleep(1000);
    }

    _availableControllerList.remove(instanceName);
    _pipelineToInstanceMap.put(pipeline,
            new PriorityQueue<>(1, InstanceTopicPartitionHolder.totalWorkloadComparator(_pipelineWorkloadMap)));
    _pipelineToInstanceMap.get(pipeline).add(instance);
    _assignedControllerCount.inc();
    _workerHelixManager.addTopicToMirrorMaker(instance, pipeline, routeId);

    // register metrics
    String routeString = srcDst[1] + "-" + srcDst[2] + "-" + routeId;
    maybeRegisterMetrics(routeString);

    spectator.disconnect();
    return instance;
}