Example usage for java.util.concurrent ConcurrentLinkedQueue add

List of usage examples for java.util.concurrent ConcurrentLinkedQueue add

Introduction

In this page you can find the example usage for java.util.concurrent ConcurrentLinkedQueue add.

Prototype

public boolean add(E e) 

Source Link

Document

Inserts the specified element at the tail of this queue.

Usage

From source file:org.restcomm.app.qoslib.Services.Events.EventUploader.java

/**
 * Loads event requests from storage, and adds it to the queue 
 */// ww w. j  av  a2  s. co m
protected void loadEventsQueue() {

    ConcurrentLinkedQueue<EventDataEnvelope> eventQueue = owner.getEventManager().getEventQueue();
    if (eventQueue == null) {
        eventQueue = new ConcurrentLinkedQueue<EventDataEnvelope>();
        owner.getEventManager().setEventQueue(eventQueue);
    } else
        return;

    Gson gson = new Gson();
    SharedPreferences secureSettings = MainService.getSecurePreferences(owner);
    if (secureSettings.contains(PreferenceKeys.Miscellaneous.EVENTS_QUEUE)) {
        try {
            String strQueue = secureSettings.getString(PreferenceKeys.Miscellaneous.EVENTS_QUEUE, "");
            //LoggerUtil.logToFile(LoggerUtil.Level.DEBUG, TAG, "loadQueue", strQueue);
            if (strQueue.length() < 100)
                return;
            JSONArray jsonqueue = new JSONArray(strQueue);
            for (int i = 0; i < jsonqueue.length(); i++) {
                JSONObject jsonRequest = jsonqueue.getJSONObject(i);
                //if(jsonRequest.getString("type").equals(EventDataEnvelope.TAG)) 
                {
                    EventDataEnvelope request = gson.fromJson(jsonRequest.toString(), EventDataEnvelope.class);
                    //EventDataEnvelope request = new EventDataEnvelope(jsonRequest);
                    eventQueue.add(request);
                }
            }
            // remove the oldest events until queue is below 1000
            while (eventQueue.size() > 300)
                eventQueue.poll();
        } catch (JSONException e) {
            LoggerUtil.logToFile(LoggerUtil.Level.ERROR, TAG, "loadEventsQueue",
                    "JSONException loading events from storage", e);
        } catch (Exception e) {
            LoggerUtil.logToFile(LoggerUtil.Level.ERROR, TAG, "loadEventsQueue",
                    "Exception loading events from storage", e);
        }
    }

}

From source file:io.hops.util.DBUtility.java

public static RMNode processHopRMNodeCompsForScheduler(RMNodeComps hopRMNodeComps, RMContext rmContext)
        throws InvalidProtocolBufferException {
    org.apache.hadoop.yarn.api.records.NodeId nodeId;
    RMNode rmNode = null;/* w w  w  .  j a v a 2  s.co m*/
    if (hopRMNodeComps != null) {
        nodeId = ConverterUtils.toNodeId(hopRMNodeComps.getRMNodeId());
        rmNode = rmContext.getRMNodes().get(nodeId);

        // The first time we are receiving the RMNode, this will happen when the node registers
        if (rmNode == null) {
            // Retrieve heartbeat
            boolean nextHeartbeat = true;

            // Create Resource
            Resource resource = null;
            if (hopRMNodeComps.getHopResource() != null) {
                resource = Resource.newInstance(hopRMNodeComps.getHopResource().getMemory(),
                        hopRMNodeComps.getHopResource().getVirtualCores());
            } else {
                LOG.error("ResourceOption should not be null");
                resource = Resource.newInstance(0, 0);
            }
            /*rmNode = new RMNodeImplDist(nodeId, rmContext, hopRMNodeComps.getHopRMNode().getHostName(),
                    hopRMNodeComps.getHopRMNode().getCommandPort(),
                    hopRMNodeComps.getHopRMNode().getHttpPort(),
                    ResourceTrackerService.resolve(hopRMNodeComps.getHopRMNode().getHostName()),
                    resourceOption,
                    hopRMNodeComps.getHopRMNode().getNodemanagerVersion(),
                    hopRMNodeComps.getHopRMNode().getHealthReport(),
                    hopRMNodeComps.getHopRMNode().getLastHealthReportTime(),
                    nextHeartbeat);*/

            rmNode = new RMNodeImplDist(nodeId, rmContext, hopRMNodeComps.getHopRMNode().getHostName(),
                    hopRMNodeComps.getHopRMNode().getCommandPort(), hopRMNodeComps.getHopRMNode().getHttpPort(),
                    ResourceTrackerService.resolve(hopRMNodeComps.getHopRMNode().getHostName()), resource,
                    hopRMNodeComps.getHopRMNode().getNodemanagerVersion());

            // Force Java to put the host in cache
            NetUtils.createSocketAddrForHost(nodeId.getHost(), nodeId.getPort());
        }

        // Update the RMNode
        if (hopRMNodeComps.getHopRMNode() != null) {
            ((RMNodeImplDist) rmNode).setState(hopRMNodeComps.getHopRMNode().getCurrentState());
        }
        if (hopRMNodeComps.getHopUpdatedContainerInfo() != null) {
            List<io.hops.metadata.yarn.entity.UpdatedContainerInfo> hopUpdatedContainerInfoList = hopRMNodeComps
                    .getHopUpdatedContainerInfo();

            if (hopUpdatedContainerInfoList != null && !hopUpdatedContainerInfoList.isEmpty()) {
                ConcurrentLinkedQueue<org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo> updatedContainerInfoQueue = new ConcurrentLinkedQueue<>();

                Map<Integer, org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo> ucis = new HashMap<>();
                LOG.debug(hopRMNodeComps.getRMNodeId() + " getting ucis " + hopUpdatedContainerInfoList.size()
                        + " pending event " + hopRMNodeComps.getPendingEvent().getId().getEventId());

                for (io.hops.metadata.yarn.entity.UpdatedContainerInfo hopUCI : hopUpdatedContainerInfoList) {
                    if (!ucis.containsKey(hopUCI.getUpdatedContainerInfoId())) {
                        ucis.put(hopUCI.getUpdatedContainerInfoId(),
                                new org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo(
                                        new ArrayList<org.apache.hadoop.yarn.api.records.ContainerStatus>(),
                                        new ArrayList<org.apache.hadoop.yarn.api.records.ContainerStatus>(),
                                        hopUCI.getUpdatedContainerInfoId()));
                    }

                    ContainerId cid = ConverterUtils.toContainerId(hopUCI.getContainerId());
                    io.hops.metadata.yarn.entity.ContainerStatus hopContainerStatus = hopRMNodeComps
                            .getHopContainersStatusMap().get(hopUCI.getContainerId());

                    org.apache.hadoop.yarn.api.records.ContainerStatus conStatus = org.apache.hadoop.yarn.api.records.ContainerStatus
                            .newInstance(cid, ContainerState.valueOf(hopContainerStatus.getState()),
                                    hopContainerStatus.getDiagnostics(), hopContainerStatus.getExitstatus());

                    // Check ContainerStatus state to add it in the appropriate list
                    if (conStatus != null) {
                        LOG.debug("add uci for container " + conStatus.getContainerId() + " status "
                                + conStatus.getState());
                        if (conStatus.getState().equals(ContainerState.RUNNING)) {
                            ucis.get(hopUCI.getUpdatedContainerInfoId()).getNewlyLaunchedContainers()
                                    .add(conStatus);
                        } else if (conStatus.getState().equals(ContainerState.COMPLETE)) {
                            ucis.get(hopUCI.getUpdatedContainerInfoId()).getCompletedContainers()
                                    .add(conStatus);
                        }
                    }
                }

                for (org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo uci : ucis
                        .values()) {
                    updatedContainerInfoQueue.add(uci);
                }

                ((RMNodeImplDist) rmNode).setUpdatedContainerInfo(updatedContainerInfoQueue);
            } else {
                LOG.debug(hopRMNodeComps.getRMNodeId()
                        + " hopUpdatedContainerInfoList = null || hopUpdatedContainerInfoList.isEmpty() "
                        + hopRMNodeComps.getPendingEvent().getId().getEventId());
            }
        } else {
            LOG.debug(hopRMNodeComps.getRMNodeId() + " hopRMNodeFull.getHopUpdatedContainerInfo()=null "
                    + hopRMNodeComps.getPendingEvent().getId().getEventId());
        }
    }

    return rmNode;
}

From source file:org.apache.hadoop.yarn.server.resourcemanager.PendingEventRetrieval.java

protected RMNode convertHopToRMNode(RMNodeComps hopRMNodeFull) throws InvalidProtocolBufferException {
    RMNode rmNode = null;/*from ww w  .ja  v  a2s  . c  om*/
    if (hopRMNodeFull != null) {
        NodeId nodeId = ConverterUtils.toNodeId(hopRMNodeFull.getHopRMNode().getNodeId());
        //Retrieve and Initialize NodeBase for RMNode
        Node node = null;
        if (hopRMNodeFull.getHopRMNode().getNodeId() != null) {
            node = new NodeBase(hopRMNodeFull.getHopNode().getName(), hopRMNodeFull.getHopNode().getLocation());
            if (hopRMNodeFull.getHopNode().getParent() != null) {
                node.setParent(new NodeBase(hopRMNodeFull.getHopNode().getParent()));
            }
            node.setLevel(hopRMNodeFull.getHopNode().getLevel());
        }
        //Retrieve nextHeartbeat
        boolean nextHeartbeat = hopRMNodeFull.getHopNextHeartbeat().isNextheartbeat();
        //Create Resource
        ResourceOption resourceOption = null;
        if (hopRMNodeFull.getHopResource() != null) {
            resourceOption = ResourceOption.newInstance(
                    Resource.newInstance(hopRMNodeFull.getHopResource().getMemory(),
                            hopRMNodeFull.getHopResource().getVirtualCores()),
                    hopRMNodeFull.getHopRMNode().getOvercommittimeout());
        }
        //Create RMNode from HopRMNode
        rmNode = new RMNodeImpl(nodeId, rmContext, hopRMNodeFull.getHopRMNode().getHostName(),
                hopRMNodeFull.getHopRMNode().getCommandPort(), hopRMNodeFull.getHopRMNode().getHttpPort(), node,
                resourceOption, hopRMNodeFull.getHopRMNode().getNodemanagerVersion(),
                hopRMNodeFull.getHopRMNode().getHealthReport(),
                hopRMNodeFull.getHopRMNode().getLastHealthReportTime(), nextHeartbeat,
                conf.getBoolean(YarnConfiguration.HOPS_DISTRIBUTED_RT_ENABLED,
                        YarnConfiguration.DEFAULT_HOPS_DISTRIBUTED_RT_ENABLED));

        ((RMNodeImpl) rmNode).setState(hopRMNodeFull.getHopRMNode().getCurrentState());
        // *** Recover maps/lists of RMNode ***
        //1. Recover JustLaunchedContainers
        List<JustLaunchedContainers> hopJlcList = hopRMNodeFull.getHopJustLaunchedContainers();
        if (hopJlcList != null && !hopJlcList.isEmpty()) {
            Map<org.apache.hadoop.yarn.api.records.ContainerId, org.apache.hadoop.yarn.api.records.ContainerStatus> justLaunchedContainers = new HashMap<org.apache.hadoop.yarn.api.records.ContainerId, org.apache.hadoop.yarn.api.records.ContainerStatus>();
            for (JustLaunchedContainers hop : hopJlcList) {
                //Create ContainerId
                org.apache.hadoop.yarn.api.records.ContainerId cid = ConverterUtils
                        .toContainerId(hop.getContainerId());
                //Find and create ContainerStatus
                ContainerStatus hopContainerStatus = hopRMNodeFull.getHopContainersStatus()
                        .get(hop.getContainerId());
                org.apache.hadoop.yarn.api.records.ContainerStatus conStatus = org.apache.hadoop.yarn.api.records.ContainerStatus
                        .newInstance(cid, ContainerState.valueOf(hopContainerStatus.getState()),
                                hopContainerStatus.getDiagnostics(), hopContainerStatus.getExitstatus());
                justLaunchedContainers.put(cid, conStatus);
            }
            ((RMNodeImpl) rmNode).setJustLaunchedContainers(justLaunchedContainers);
        }
        //2. Return ContainerIdToClean
        List<ContainerId> cidToCleanList = hopRMNodeFull.getHopContainerIdsToClean();
        if (cidToCleanList != null && !cidToCleanList.isEmpty()) {
            Set<org.apache.hadoop.yarn.api.records.ContainerId> containersToClean = new TreeSet<org.apache.hadoop.yarn.api.records.ContainerId>();
            for (ContainerId hop : cidToCleanList) {
                //Create ContainerId
                containersToClean.add(ConverterUtils.toContainerId(hop.getContainerId()));
            }
            ((RMNodeImpl) rmNode).setContainersToClean(containersToClean);
        }
        //3. Finished Applications
        List<FinishedApplications> hopFinishedAppsList = hopRMNodeFull.getHopFinishedApplications();
        if (hopFinishedAppsList != null && !hopFinishedAppsList.isEmpty()) {
            List<ApplicationId> finishedApps = new ArrayList<ApplicationId>();
            for (FinishedApplications hop : hopFinishedAppsList) {
                finishedApps.add(ConverterUtils.toApplicationId(hop.getApplicationId()));
            }
            ((RMNodeImpl) rmNode).setFinishedApplications(finishedApps);
        }
        //4. UpdadedContainerInfo
        //Retrieve all UpdatedContainerInfo entries for this particular RMNode
        Map<Integer, List<UpdatedContainerInfo>> hopUpdatedContainerInfoMap = hopRMNodeFull
                .getHopUpdatedContainerInfo();
        if (hopUpdatedContainerInfoMap != null && !hopUpdatedContainerInfoMap.isEmpty()) {
            ConcurrentLinkedQueue<org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo> updatedContainerInfoQueue = new ConcurrentLinkedQueue<org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo>();
            for (int uciId : hopUpdatedContainerInfoMap.keySet()) {
                for (UpdatedContainerInfo hopUCI : hopUpdatedContainerInfoMap.get(uciId)) {
                    List<org.apache.hadoop.yarn.api.records.ContainerStatus> newlyAllocated = new ArrayList<org.apache.hadoop.yarn.api.records.ContainerStatus>();
                    List<org.apache.hadoop.yarn.api.records.ContainerStatus> completed = new ArrayList<org.apache.hadoop.yarn.api.records.ContainerStatus>();
                    //Retrieve containerstatus entries for the particular updatedcontainerinfo
                    org.apache.hadoop.yarn.api.records.ContainerId cid = ConverterUtils
                            .toContainerId(hopUCI.getContainerId());
                    ContainerStatus hopContainerStatus = hopRMNodeFull.getHopContainersStatus()
                            .get(hopUCI.getContainerId());

                    org.apache.hadoop.yarn.api.records.ContainerStatus conStatus = org.apache.hadoop.yarn.api.records.ContainerStatus
                            .newInstance(cid, ContainerState.valueOf(hopContainerStatus.getState()),
                                    hopContainerStatus.getDiagnostics(), hopContainerStatus.getExitstatus());
                    //Check ContainerStatus state to add it to appropriate list
                    if (conStatus != null) {
                        if (conStatus.getState().toString()
                                .equals(TablesDef.ContainerStatusTableDef.STATE_RUNNING)) {
                            newlyAllocated.add(conStatus);
                        } else if (conStatus.getState().toString()
                                .equals(TablesDef.ContainerStatusTableDef.STATE_COMPLETED)) {
                            completed.add(conStatus);
                        }
                    }
                    org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo uci = new org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo(
                            newlyAllocated, completed, hopUCI.getUpdatedContainerInfoId());
                    updatedContainerInfoQueue.add(uci);
                    ((RMNodeImpl) rmNode).setUpdatedContainerInfo(updatedContainerInfoQueue);
                    //Update uci counter
                    ((RMNodeImpl) rmNode).setUpdatedContainerInfoId(hopRMNodeFull.getHopRMNode().getUciId());
                }
            }
        }
        //5. Retrieve latestNodeHeartBeatResponse
        NodeHBResponse hopHB = hopRMNodeFull.getHopNodeHBResponse();
        if (hopHB != null && hopHB.getResponse() != null) {
            NodeHeartbeatResponse hb = new NodeHeartbeatResponsePBImpl(
                    YarnServerCommonServiceProtos.NodeHeartbeatResponseProto.parseFrom(hopHB.getResponse()));
            ((RMNodeImpl) rmNode).setLatestNodeHBResponse(hb);
        }
    }
    return rmNode;
}

From source file:dendroscope.autumn.hybridnumber.ComputeHybridNumber.java

/**
 * recursively compute the hybrid number
 *
 * @param root1/* w  ww  . j  a  v a  2s.  c om*/
 * @param root2
 * @param isReduced       @return hybrid number
 * @param retry
 * @param topLevel
 * @param scoreAbove
 * @param additionalAbove
 */
private int computeHybridNumberRec(final Root root1, final Root root2, boolean isReduced,
        Integer previousHybrid, BitSet retry, final boolean topLevel, final int scoreAbove,
        final ValuesList additionalAbove) throws IOException, CanceledException {
    if (System.currentTimeMillis() > nextTime) {
        synchronized (progressListener) {
            nextTime += waitTime;
            waitTime *= 1.5;
            progressListener.incrementProgress();
        }
    } else
        progressListener.checkForCancel();

    // System.err.println("computeHybridNumberRec: tree1=" + Basic.toString(root1.getTaxa()) + " tree2=" + Basic.toString(root2.getTaxa()));
    // root1.reorderSubTree();
    //  root2.reorderSubTree();
    if (checking) {
        root1.checkTree();
        root2.checkTree();
    }

    BitSet taxa = root1.getTaxa();

    String key = root1.toStringTreeSparse() + root2.toStringTreeSparse();
    // System.err.println("Key: "+key);
    Integer value;
    synchronized (lookupTable) {
        value = (Integer) lookupTable.get(key);
        if (value != null)
            return value;
    }

    if (!root2.getTaxa().equals(taxa))
        throw new RuntimeException("Unequal taxon sets: X=" + Basic.toString(root1.getTaxa()) + " vs "
                + Basic.toString(root2.getTaxa()));
    if (!isReduced) {
        switch (SubtreeReduction.apply(root1, root2, null)) {
        case ISOMORPHIC:
            synchronized (lookupTable) {
                lookupTable.put(key, 0);
            }
            if (topLevel) {
                bestScore.lowerTo(0);
                progressListener.setSubtask("Best score: " + bestScore);
            }
            return 0; // two trees are isomorphic, no hybrid node needed
        case REDUCED: // a reduction was performed, cannot maintain lexicographical ordering in removal loop below
            previousHybrid = null;
            break;
        case IRREDUCIBLE:
            break;
        }

        Single<Integer> placeHolderTaxa = new Single<Integer>();
        final Pair<Root, Root> clusterTrees = ClusterReduction.apply(root1, root2, placeHolderTaxa);
        final boolean retryTop = false && (previousHybrid != null && placeHolderTaxa.get() < previousHybrid);
        // if the taxa involved in the cluster reduction come before the previously removed hybrid, do full retry
        // retryTop doesn't work
        final BitSet fRetry = retry;

        if (clusterTrees != null) // will perform cluster-reduction
        {
            final Value score1 = new Value(0);
            final Value score2 = new Value(1); // because the cluster could not be reduced using an subtree reduction, can assume that we will need one reticulation for this

            final boolean verbose = ProgramProperties.get("verbose-HL-parallel", false);
            if (verbose)
                System.err.println("Starting parallel loop");

            final CountDownLatch countDownLatch = new CountDownLatch(2);
            final Integer fPrevious = previousHybrid;

            // setup task:
            final Task task1 = new Task(); // first of two cluster-reduction tasks
            task1.setRunnable(new Runnable() {
                public void run() {
                    try {
                        if (verbose) {
                            System.err.println("Launching thread on cluster-reduction");
                            System.err
                                    .println("Active threads " + scheduledThreadPoolExecutor.getActiveCount());
                        }
                        final ValuesList additionalAbove1 = additionalAbove.copyWithAdditionalElement(score2);
                        if (scoreAbove + additionalAbove1.sum() < bestScore.get()) {
                            int h = computeHybridNumberRec(root1, root2, false, fPrevious, fRetry, false,
                                    scoreAbove, additionalAbove1);
                            score1.set(h);
                        } else {
                            score1.set(LARGE);
                        }
                        additionalAbove1.clear();
                    } catch (Exception ex) {
                        while (countDownLatch.getCount() > 0)
                            countDownLatch.countDown();
                    }
                    countDownLatch.countDown();
                }
            });

            final Task task2 = new Task(); // second of two cluster-reduction tasks
            task2.setRunnable(new Runnable() {
                public void run() {
                    try {
                        if (verbose) {
                            System.err.println("Launching thread on cluster-reduction");
                            System.err
                                    .println("Active threads " + scheduledThreadPoolExecutor.getActiveCount());
                        }
                        final ValuesList additionalAbove2 = additionalAbove.copyWithAdditionalElement(score1);
                        if (scoreAbove + additionalAbove2.sum() < bestScore.get()) {
                            int h = computeHybridNumberRec(clusterTrees.getFirst(), clusterTrees.getSecond(),
                                    true, fPrevious, fRetry, false, scoreAbove, additionalAbove2);
                            score2.set(h);
                        } else {
                            score2.set(LARGE);
                        }
                        additionalAbove2.clear();
                    } catch (Exception ex) {
                        while (countDownLatch.getCount() > 0)
                            countDownLatch.countDown();
                    }
                    countDownLatch.countDown();
                }
            });

            // start a task in this thread
            scheduledThreadPoolExecutor.execute(task1);
            task2.run();
            task1.run(); // try to run task1 in current thread if it hasn't yet started execution. If the task is already running or has completed, will simply return

            try {
                if (verbose)
                    System.err.println("waiting...");
                // wait until all tasks have completed
                countDownLatch.await();
                if (verbose)
                    System.err.println("done");
            } catch (InterruptedException e) {
                Basic.caught(e);
            }

            clusterTrees.getFirst().deleteSubTree();
            clusterTrees.getSecond().deleteSubTree();

            int total = scoreAbove + additionalAbove.sum() + score1.get() + score2.get();

            if (topLevel && (total < bestScore.get())) // score above will be zero, but put this here anyway to avoid confusion
            {
                bestScore.lowerTo(total);
                progressListener.setSubtask("Current best score: " + bestScore);
            }

            synchronized (lookupTable) {
                Integer old = (Integer) lookupTable.get(key);
                if (old == null || total < old)
                    lookupTable.put(key, total);
            }
            return score1.get() + score2.get();
        }
    }

    List<Root> leaves1 = root1.getAllLeaves();

    if (leaves1.size() <= 2) // try 2 rather than one...
    {
        return 0;
    }

    final boolean verbose = ProgramProperties.get("verbose-HL-parallel", false);
    if (verbose)
        System.err.println("Starting parallel loop");

    final CountDownLatch countDownLatch = new CountDownLatch(leaves1.size());

    final Value bestSubH = new Value(LARGE);

    // schedule all tasks to be performed
    final ConcurrentLinkedQueue<Task> queue = new ConcurrentLinkedQueue<Task>();

    for (Node leaf2remove : leaves1) {
        final BitSet taxa2remove = ((Root) leaf2remove).getTaxa();

        if (previousHybrid == null || previousHybrid < taxa2remove.nextSetBit(0)) {

            if (scoreAbove + additionalAbove.sum() + 1 >= bestScore.get())
                return LARGE; // other thread has found a better result, abort

            // setup task:
            final Task task = new Task();
            task.setRunnable(new Runnable() {
                public void run() {
                    try {
                        if (verbose) {
                            System.err.println("Launching thread on " + Basic.toString(taxa2remove));
                            System.err
                                    .println("Active threads " + scheduledThreadPoolExecutor.getActiveCount());
                        }
                        queue.remove(task);
                        if (scoreAbove + additionalAbove.sum() + 1 < bestScore.get()) {
                            Root tree1X = CopyWithTaxaRemoved.apply(root1, taxa2remove);
                            Root tree2X = CopyWithTaxaRemoved.apply(root2, taxa2remove);

                            Refine.apply(tree1X, tree2X);

                            int scoreBelow = computeHybridNumberRec(tree1X, tree2X, false,
                                    taxa2remove.nextSetBit(0), null, false, scoreAbove + 1, additionalAbove)
                                    + 1;

                            if (topLevel && scoreBelow < bestScore.get()) {
                                bestScore.lowerTo(scoreBelow);
                                progressListener.setSubtask("Current best score: " + bestScore);
                            }

                            synchronized (bestSubH) {
                                if (scoreBelow < bestSubH.get())
                                    bestSubH.set(scoreBelow);
                            }

                            tree1X.deleteSubTree();
                            tree2X.deleteSubTree();
                        }
                    } catch (Exception ex) {
                        while (countDownLatch.getCount() > 0)
                            countDownLatch.countDown();
                    }
                    countDownLatch.countDown();
                }
            });
            queue.add(task);
        } else // no task for this item, count down
        {
            countDownLatch.countDown();
            progressListener.checkForCancel();
        }
    }
    // grab one task for the current thread:
    Task taskForCurrentThread = queue.size() > 0 ? queue.poll() : null;
    // launch all others in the executor
    for (Task task : queue)
        scheduledThreadPoolExecutor.execute(task);

    // start a task in this thread
    if (taskForCurrentThread != null)
        taskForCurrentThread.run();

    // try to run other tasks from the queue. Note that any task that is already running will return immediately
    while (queue.size() > 0) {
        Task task = queue.poll();
        if (task != null)
            task.run();
    }
    try {
        if (verbose)
            System.err.println("waiting...");
        // wait until all tasks have completed
        countDownLatch.await();

        if (verbose)
            System.err.println("done");
    } catch (InterruptedException e) {
        Basic.caught(e);
        return LARGE;
    }
    // return the best value
    synchronized (lookupTable) {
        Integer old = (Integer) lookupTable.get(key);
        if (old == null || old > bestSubH.get())
            lookupTable.put(key, bestSubH.get());
    }
    return bestSubH.get();
}

From source file:io.hops.metadata.util.RMUtilities.java

public static org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode getRMNode(final String id,
        final RMContext context, final Configuration conf) throws IOException {
    LightWeightRequestHandler getRMNodeHandler = new LightWeightRequestHandler(YARNOperationType.TEST) {
        @Override//from  w ww .ja v  a 2 s  . c om
        public Object performTask() throws IOException {
            connector.beginTransaction();
            connector.readLock();
            org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode rmNode = null;
            RMNodeDataAccess rmnodeDA = (RMNodeDataAccess) RMStorageFactory
                    .getDataAccess(RMNodeDataAccess.class);
            RMNode hopRMNode = (RMNode) rmnodeDA.findByNodeId(id);
            if (hopRMNode != null) {
                ResourceDataAccess resDA = (ResourceDataAccess) RMStorageFactory
                        .getDataAccess(ResourceDataAccess.class);
                NodeDataAccess nodeDA = (NodeDataAccess) RMStorageFactory.getDataAccess(NodeDataAccess.class);
                //Retrieve resource of RMNode
                Resource res = (Resource) resDA.findEntry(hopRMNode.getNodeId(), Resource.TOTAL_CAPABILITY,
                        Resource.RMNODE);

                NodeId nodeId = ConverterUtils.toNodeId(id);
                //Retrieve and Initialize NodeBase for RMNode
                org.apache.hadoop.net.Node node = null;
                if (hopRMNode.getNodeId() != null) {
                    Node hopNode = (Node) nodeDA.findById(hopRMNode.getNodeId());
                    node = new NodeBase(hopNode.getName(), hopNode.getLocation());
                    if (hopNode.getParent() != null) {
                        node.setParent(new NodeBase(hopNode.getParent()));
                    }
                    node.setLevel(hopNode.getLevel());
                }
                //Retrieve nextHeartbeat
                NextHeartbeatDataAccess nextHBDA = (NextHeartbeatDataAccess) RMStorageFactory
                        .getDataAccess(NextHeartbeatDataAccess.class);
                boolean nextHeartbeat = nextHBDA.findEntry(id);
                //Create Resource
                ResourceOption resourceOption = null;
                if (res != null) {
                    resourceOption = ResourceOption.newInstance(org.apache.hadoop.yarn.api.records.Resource
                            .newInstance(res.getMemory(), res.getVirtualCores()),
                            hopRMNode.getOvercommittimeout());
                }
                rmNode = new RMNodeImpl(nodeId, context, hopRMNode.getHostName(), hopRMNode.getCommandPort(),
                        hopRMNode.getHttpPort(), node, resourceOption, hopRMNode.getNodemanagerVersion(),
                        hopRMNode.getHealthReport(), hopRMNode.getLastHealthReportTime(), nextHeartbeat,
                        conf.getBoolean(YarnConfiguration.HOPS_DISTRIBUTED_RT_ENABLED,
                                YarnConfiguration.DEFAULT_HOPS_DISTRIBUTED_RT_ENABLED));

                ((RMNodeImpl) rmNode).setState(hopRMNode.getCurrentState());
                // *** Recover maps/lists of RMNode ***
                //Use a cache for retrieved ContainerStatus
                Map<String, ContainerStatus> hopContainerStatuses = new HashMap<String, ContainerStatus>();
                //1. Recover JustLaunchedContainers
                JustLaunchedContainersDataAccess jlcDA = (JustLaunchedContainersDataAccess) RMStorageFactory
                        .getDataAccess(JustLaunchedContainersDataAccess.class);
                ContainerStatusDataAccess containerStatusDA = (ContainerStatusDataAccess) RMStorageFactory
                        .getDataAccess(ContainerStatusDataAccess.class);
                List<JustLaunchedContainers> hopJlcList = jlcDA.findByRMNode(id);
                if (hopJlcList != null && !hopJlcList.isEmpty()) {
                    Map<org.apache.hadoop.yarn.api.records.ContainerId, org.apache.hadoop.yarn.api.records.ContainerStatus> justLaunchedContainers = new HashMap<org.apache.hadoop.yarn.api.records.ContainerId, org.apache.hadoop.yarn.api.records.ContainerStatus>();
                    for (JustLaunchedContainers hop : hopJlcList) {
                        //Create ContainerId
                        org.apache.hadoop.yarn.api.records.ContainerId cid = ConverterUtils
                                .toContainerId(hop.getContainerId());
                        //Find and create ContainerStatus
                        if (!hopContainerStatuses.containsKey(hop.getContainerId())) {
                            hopContainerStatuses.put(hop.getContainerId(),
                                    (ContainerStatus) containerStatusDA.findEntry(hop.getContainerId(), id));
                        }
                        org.apache.hadoop.yarn.api.records.ContainerStatus conStatus = org.apache.hadoop.yarn.api.records.ContainerStatus
                                .newInstance(cid,
                                        ContainerState.valueOf(
                                                hopContainerStatuses.get(hop.getContainerId()).getState()),
                                        hopContainerStatuses.get(hop.getContainerId()).getDiagnostics(),
                                        hopContainerStatuses.get(hop.getContainerId()).getExitstatus());
                        justLaunchedContainers.put(cid, conStatus);
                    }
                    ((RMNodeImpl) rmNode).setJustLaunchedContainers(justLaunchedContainers);
                }
                //2. Return ContainerIdToClean
                ContainerIdToCleanDataAccess cidToCleanDA = (ContainerIdToCleanDataAccess) RMStorageFactory
                        .getDataAccess(ContainerIdToCleanDataAccess.class);
                List<ContainerId> cidToCleanList = cidToCleanDA.findByRMNode(id);
                if (cidToCleanList != null && !cidToCleanList.isEmpty()) {
                    Set<org.apache.hadoop.yarn.api.records.ContainerId> containersToClean = new TreeSet<org.apache.hadoop.yarn.api.records.ContainerId>();
                    for (ContainerId hop : cidToCleanList) {
                        //Create ContainerId
                        containersToClean.add(ConverterUtils.toContainerId(hop.getContainerId()));
                    }
                    ((RMNodeImpl) rmNode).setContainersToClean(containersToClean);
                }
                //3. Finished Applications
                FinishedApplicationsDataAccess finishedAppsDA = (FinishedApplicationsDataAccess) RMStorageFactory
                        .getDataAccess(FinishedApplicationsDataAccess.class);
                List<FinishedApplications> hopFinishedAppsList = finishedAppsDA.findByRMNode(id);
                if (hopFinishedAppsList != null && !hopFinishedAppsList.isEmpty()) {
                    List<ApplicationId> finishedApps = new ArrayList<ApplicationId>();
                    for (FinishedApplications hop : hopFinishedAppsList) {
                        finishedApps.add(ConverterUtils.toApplicationId(hop.getApplicationId()));
                    }
                    ((RMNodeImpl) rmNode).setFinishedApplications(finishedApps);
                }

                //4. UpdadedContainerInfo
                UpdatedContainerInfoDataAccess uciDA = (UpdatedContainerInfoDataAccess) RMStorageFactory
                        .getDataAccess(UpdatedContainerInfoDataAccess.class);
                //Retrieve all UpdatedContainerInfo entries for this particular RMNode
                Map<Integer, List<UpdatedContainerInfo>> hopUpdatedContainerInfoMap = uciDA.findByRMNode(id);
                if (hopUpdatedContainerInfoMap != null && !hopUpdatedContainerInfoMap.isEmpty()) {
                    ConcurrentLinkedQueue<org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo> updatedContainerInfoQueue = new ConcurrentLinkedQueue<org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo>();
                    for (int uciId : hopUpdatedContainerInfoMap.keySet()) {
                        for (UpdatedContainerInfo hopUCI : hopUpdatedContainerInfoMap.get(uciId)) {
                            List<org.apache.hadoop.yarn.api.records.ContainerStatus> newlyAllocated = new ArrayList<org.apache.hadoop.yarn.api.records.ContainerStatus>();
                            List<org.apache.hadoop.yarn.api.records.ContainerStatus> completed = new ArrayList<org.apache.hadoop.yarn.api.records.ContainerStatus>();
                            //Retrieve containerstatus entries for the particular updatedcontainerinfo
                            org.apache.hadoop.yarn.api.records.ContainerId cid = ConverterUtils
                                    .toContainerId(hopUCI.getContainerId());
                            if (!hopContainerStatuses.containsKey(hopUCI.getContainerId())) {
                                hopContainerStatuses.put(hopUCI.getContainerId(),
                                        (ContainerStatus) containerStatusDA.findEntry(hopUCI.getContainerId(),
                                                id));
                            }
                            org.apache.hadoop.yarn.api.records.ContainerStatus conStatus = org.apache.hadoop.yarn.api.records.ContainerStatus
                                    .newInstance(cid,
                                            ContainerState.valueOf(hopContainerStatuses
                                                    .get(hopUCI.getContainerId()).getState()),
                                            hopContainerStatuses.get(hopUCI.getContainerId()).getDiagnostics(),
                                            hopContainerStatuses.get(hopUCI.getContainerId()).getExitstatus());
                            //Check ContainerStatus state to add it to appropriate list
                            if (conStatus != null) {
                                if (conStatus.getState().toString()
                                        .equals(TablesDef.ContainerStatusTableDef.STATE_RUNNING)) {
                                    newlyAllocated.add(conStatus);
                                } else if (conStatus.getState().toString()
                                        .equals(TablesDef.ContainerStatusTableDef.STATE_COMPLETED)) {
                                    completed.add(conStatus);
                                }
                            }
                            org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo uci = new org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo(
                                    newlyAllocated, completed, hopUCI.getUpdatedContainerInfoId());
                            updatedContainerInfoQueue.add(uci);
                            ((RMNodeImpl) rmNode).setUpdatedContainerInfo(updatedContainerInfoQueue);
                            //Update uci counter
                            ((RMNodeImpl) rmNode).setUpdatedContainerInfoId(hopRMNode.getUciId());
                        }
                    }
                }

                //5. Retrieve latestNodeHeartBeatResponse
                NodeHBResponseDataAccess hbDA = (NodeHBResponseDataAccess) RMStorageFactory
                        .getDataAccess(NodeHBResponseDataAccess.class);
                NodeHBResponse hopHB = (NodeHBResponse) hbDA.findById(id);
                if (hopHB != null) {
                    NodeHeartbeatResponse hb = new NodeHeartbeatResponsePBImpl(
                            YarnServerCommonServiceProtos.NodeHeartbeatResponseProto
                                    .parseFrom(hopHB.getResponse()));
                    ((RMNodeImpl) rmNode).setLatestNodeHBResponse(hb);
                }
            }
            connector.commit();
            return rmNode;
        }
    };
    return (org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode) getRMNodeHandler.handle();
}

From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java

@Override
public CompletableFuture<CompactionState> getCompactionState(TableName tableName, CompactType compactType) {
    CompletableFuture<CompactionState> future = new CompletableFuture<>();

    switch (compactType) {
    case MOB://  w w w  . j  a  va 2  s  .  c  om
        addListener(connection.registry.getMasterAddress(), (serverName, err) -> {
            if (err != null) {
                future.completeExceptionally(err);
                return;
            }
            RegionInfo regionInfo = RegionInfo.createMobRegionInfo(tableName);

            addListener(this.<GetRegionInfoResponse>newAdminCaller().serverName(serverName)
                    .action((controller, stub) -> this
                            .<GetRegionInfoRequest, GetRegionInfoResponse, GetRegionInfoResponse>adminCall(
                                    controller, stub,
                                    RequestConverter.buildGetRegionInfoRequest(regionInfo.getRegionName(),
                                            true),
                                    (s, c, req, done) -> s.getRegionInfo(controller, req, done), resp -> resp))
                    .call(), (resp2, err2) -> {
                        if (err2 != null) {
                            future.completeExceptionally(err2);
                        } else {
                            if (resp2.hasCompactionState()) {
                                future.complete(ProtobufUtil.createCompactionState(resp2.getCompactionState()));
                            } else {
                                future.complete(CompactionState.NONE);
                            }
                        }
                    });
        });
        break;
    case NORMAL:
        addListener(getTableHRegionLocations(tableName), (locations, err) -> {
            if (err != null) {
                future.completeExceptionally(err);
                return;
            }
            ConcurrentLinkedQueue<CompactionState> regionStates = new ConcurrentLinkedQueue<>();
            List<CompletableFuture<CompactionState>> futures = new ArrayList<>();
            locations.stream().filter(loc -> loc.getServerName() != null).filter(loc -> loc.getRegion() != null)
                    .filter(loc -> !loc.getRegion().isOffline()).map(loc -> loc.getRegion().getRegionName())
                    .forEach(region -> {
                        futures.add(getCompactionStateForRegion(region).whenComplete((regionState, err2) -> {
                            // If any region compaction state is MAJOR_AND_MINOR
                            // the table compaction state is MAJOR_AND_MINOR, too.
                            if (err2 != null) {
                                future.completeExceptionally(unwrapCompletionException(err2));
                            } else if (regionState == CompactionState.MAJOR_AND_MINOR) {
                                future.complete(regionState);
                            } else {
                                regionStates.add(regionState);
                            }
                        }));
                    });
            addListener(CompletableFuture.allOf(futures.toArray(new CompletableFuture<?>[futures.size()])),
                    (ret, err3) -> {
                        // If future not completed, check all regions's compaction state
                        if (!future.isCompletedExceptionally() && !future.isDone()) {
                            CompactionState state = CompactionState.NONE;
                            for (CompactionState regionState : regionStates) {
                                switch (regionState) {
                                case MAJOR:
                                    if (state == CompactionState.MINOR) {
                                        future.complete(CompactionState.MAJOR_AND_MINOR);
                                    } else {
                                        state = CompactionState.MAJOR;
                                    }
                                    break;
                                case MINOR:
                                    if (state == CompactionState.MAJOR) {
                                        future.complete(CompactionState.MAJOR_AND_MINOR);
                                    } else {
                                        state = CompactionState.MINOR;
                                    }
                                    break;
                                case NONE:
                                default:
                                }
                            }
                            if (!future.isDone()) {
                                future.complete(state);
                            }
                        }
                    });
        });
        break;
    default:
        throw new IllegalArgumentException("Unknown compactType: " + compactType);
    }

    return future;
}

From source file:spade.storage.Neo4j.java

public static void index(String dbpath, boolean printProgress) {

    int totalThreads = Runtime.getRuntime().availableProcessors();
    final ConcurrentLinkedQueue<Node> nodeTaskQueue = new ConcurrentLinkedQueue<Node>();
    final ConcurrentLinkedQueue<Relationship> edgeTaskQueue = new ConcurrentLinkedQueue<Relationship>();
    final ReentrantReadWriteLock nodeRwlock = new ReentrantReadWriteLock();
    final ReentrantReadWriteLock edgeRwlock = new ReentrantReadWriteLock();
    final Index<Node> vertexIndex;
    final RelationshipIndex edgeIndex;
    System.out.println("Loading database...");
    File databaseFile = new File(dbpath);
    final GraphDatabaseService graphDb = new GraphDatabaseFactory().newEmbeddedDatabaseBuilder(databaseFile)
            .setConfig(GraphDatabaseSettings.pagecache_memory,
                    "" + (Runtime.getRuntime().totalMemory() * 9) / 10)
            // .setConfig(GraphDatabaseSettings.keep_logical_logs, "false")
            .newGraphDatabase();/*  www .  j a  v a 2 s . c om*/

    System.out.println("Loaded");
    // clear already present indexes
    try (Transaction tx = graphDb.beginTx()) {
        graphDb.index().forNodes(spade.storage.Neo4j.VERTEX_INDEX).delete();
        tx.success();
    }

    try (Transaction tx = graphDb.beginTx()) {
        graphDb.index().forRelationships(spade.storage.Neo4j.EDGE_INDEX).delete();
        tx.success();
    }
    //

    System.out.println("Creating Indexing discriptors...");

    try (Transaction tx = graphDb.beginTx()) {
        vertexIndex = graphDb.index().forNodes(spade.storage.Neo4j.VERTEX_INDEX);
        tx.success();
    }

    try (Transaction tx = graphDb.beginTx()) {
        edgeIndex = graphDb.index().forRelationships(spade.storage.Neo4j.EDGE_INDEX);
        tx.success();
    }

    System.out.println("Created");

    class NodeIndexer implements Runnable {

        public void run() {

            Transaction tx = graphDb.beginTx();
            int counter = 0;
            try {
                while (!Thread.currentThread().isInterrupted()) {

                    if (counter < 10000) {
                        Node node = nodeTaskQueue.poll();
                        if (node == null) {
                            continue;
                        }

                        for (String key : node.getPropertyKeys()) {
                            vertexIndex.add(node, key, (String) node.getProperty(key));
                        }
                        node.setProperty(ID_STRING, node.getId());
                        vertexIndex.add(node, ID_STRING, Long.toString(node.getId()));

                        counter++;
                    }

                    if (counter > 1000 && nodeRwlock.writeLock().tryLock()) {
                        tx.success();
                        tx.close();
                        tx = graphDb.beginTx();
                        nodeRwlock.writeLock().unlock();
                        counter = 0;
                    }

                }

            } finally {
                // tx.success();
                tx.close();
                if (nodeRwlock.writeLock().isHeldByCurrentThread()) {
                    nodeRwlock.writeLock().unlock();
                }
            }
        }
    }

    class RelationshipIndexer implements Runnable {

        public void run() {

            Transaction tx = graphDb.beginTx();
            int counter = 0;
            try {
                while (!Thread.currentThread().isInterrupted()) {

                    if (counter < 10000) {
                        Relationship relationship = edgeTaskQueue.poll();
                        if (relationship == null) {
                            continue;
                        }

                        for (String key : relationship.getPropertyKeys()) {
                            edgeIndex.add(relationship, key, (String) relationship.getProperty(key));
                        }
                        relationship.setProperty(ID_STRING, relationship.getId());
                        edgeIndex.add(relationship, ID_STRING, Long.toString(relationship.getId()));

                        counter++;
                    }

                    if (counter > 1000 && edgeRwlock.writeLock().tryLock()) {
                        // tx.success();
                        tx.close();
                        tx = graphDb.beginTx();
                        edgeRwlock.writeLock().unlock();
                        counter = 0;
                    }

                }

            } finally {
                // tx.success();
                tx.close();
                if (edgeRwlock.writeLock().isHeldByCurrentThread()) {
                    edgeRwlock.writeLock().unlock();
                }
            }

        }
    }

    ArrayList<Thread> nodeWorkers = new ArrayList<Thread>();
    for (int i = 0; i < totalThreads / 2; i++) {
        Thread th = new Thread(new NodeIndexer());
        nodeWorkers.add(th);
        th.start();
    }

    ArrayList<Thread> edgeWorkers = new ArrayList<Thread>();
    for (int i = 0; i < totalThreads / 2; i++) {
        Thread th = new Thread(new RelationshipIndexer());
        edgeWorkers.add(th);
        th.start();
    }

    System.out.println("Counted Nodes and Relationships to index...");
    final long total;

    try (Transaction tx = graphDb.beginTx()) {
        total = Iterators.count(graphDb.getAllNodes().iterator())
                + Iterators.count(graphDb.getAllRelationships().iterator());
        tx.success();
    }
    System.out.println("done.\n");

    long percentageCompleted = 0;
    int count = 0;

    try (Transaction tx = graphDb.beginTx()) {

        // index nodes
        Iterator<Node> nodeIterator = graphDb.getAllNodes().iterator();
        Iterator<Relationship> edgeIterator = graphDb.getAllRelationships().iterator();

        while (edgeIterator.hasNext() || nodeIterator.hasNext()) {

            if (nodeIterator.hasNext() && nodeTaskQueue.size() < 10000) {
                nodeTaskQueue.add(nodeIterator.next());
                count = count + 1;
            }

            if (edgeIterator.hasNext() && edgeTaskQueue.size() < 10000) {
                edgeTaskQueue.add(edgeIterator.next());
                count = count + 1;
            }

            if (printProgress) {

                if (((count * 100) / total) > percentageCompleted) {
                    Runtime rt = Runtime.getRuntime();
                    long totalMemory = rt.totalMemory() / 1024 / 1024;
                    long freeMemory = rt.freeMemory() / 1024 / 1024;
                    long usedMemory = totalMemory - freeMemory;
                    System.out.print("| Cores: " + rt.availableProcessors() + " | Threads: " + totalThreads
                            + " | Heap (MB) - total: " + totalMemory + " , " + (freeMemory * 100) / totalMemory
                            + "% free"
                            // + " | Total Objects (nodes + relationships) to Index: " + total
                            + " | Indexing Object (nodes + relationships): " + count + " / " + total
                            + " | Completed: " + percentageCompleted + " %" + " |\r");
                }

                percentageCompleted = (count * 100) / total;
            }

        }

        tx.success();
    }

    System.out.println("\n\nIndexing completed. Waiting for queues to clear...");

    try {
        while (nodeTaskQueue.size() != 0 || edgeTaskQueue.size() != 0) {
            Thread.sleep(1000);
        }
    } catch (InterruptedException exception) {

    }

    System.out.println("Queues cleared. Threads teardown started...");

    for (int i = 0; i < totalThreads / 2; i++) {
        nodeWorkers.get(i).interrupt();
        try {
            nodeWorkers.get(i).join();
        } catch (InterruptedException exception) {

        }
    }

    for (int i = 0; i < totalThreads / 2; i++) {
        edgeWorkers.get(i).interrupt();
        try {
            edgeWorkers.get(i).join();
        } catch (InterruptedException exception) {

        }
    }

    System.out.println("Database shutdown started...");
    graphDb.shutdown();
}