Example usage for java.util Queue poll

List of usage examples for java.util Queue poll

Introduction

In this page you can find the example usage for java.util Queue poll.

Prototype

E poll();

Source Link

Document

Retrieves and removes the head of this queue, or returns null if this queue is empty.

Usage

From source file:voldemort.tools.KeyVersionFetcherCLI.java

public boolean sampleStore(StoreDefinition storeDefinition) {
    String storeName = storeDefinition.getName();

    String keysFileName = inDir + System.getProperty("file.separator") + storeName + ".keys";
    File keysFile = new File(keysFileName);
    if (!keysFile.exists()) {
        logger.error("Keys file " + keysFileName + " does not exist!");
        return false;
    }//from w ww .  j a  v  a 2s .  c om

    String kvFileName = outDir + System.getProperty("file.separator") + storeName + ".kvs";
    File kvFile = new File(kvFileName);
    if (kvFile.exists()) {
        logger.info("Key-Version file " + kvFileName + " exists, so will not sample keys from file "
                + keysFileName + ".");
        return true;
    }

    BaseStoreRoutingPlan storeRoutingPlan = new BaseStoreRoutingPlan(cluster, storeDefinition);
    BufferedReader keyReader = null;
    BufferedWriter kvWriter = null;
    try {
        keyReader = new BufferedReader(new FileReader(keysFileName));
        kvWriter = new BufferedWriter(new FileWriter(kvFileName));

        boolean readAllKeys = false;
        while (!readAllKeys) {
            Queue<Future<String>> futureKVs = new LinkedList<Future<String>>();
            for (int numFetchTasks = 0; numFetchTasks < this.outputBatchSize; numFetchTasks++) {
                String keyLine = keyReader.readLine();
                if (keyLine == null) {
                    readAllKeys = true;
                    break;
                }
                byte[] keyInBytes = ByteUtils.fromHexString(keyLine.trim());
                FetchKeyVersionsTask kvFetcher = new FetchKeyVersionsTask(storeRoutingPlan, keyInBytes);
                Future<String> future = kvFetcherService.submit(kvFetcher);
                futureKVs.add(future);
            }

            if (futureKVs.size() > 0) {
                while (!futureKVs.isEmpty()) {
                    Future<String> future = futureKVs.poll();
                    String keyVersions = future.get();
                    kvWriter.append(keyVersions);
                }
            }
        }
        return true;
    } catch (DecoderException de) {
        logger.error("Could not decode key to sample for store " + storeName, de);
        return false;
    } catch (IOException ioe) {
        logger.error("IOException caught while sampling store " + storeName, ioe);
        return false;
    } catch (InterruptedException ie) {
        logger.error("InterruptedException caught while sampling store " + storeName, ie);
        return false;
    } catch (ExecutionException ee) {
        logger.error("Encountered an execution exception while sampling " + storeName, ee);
        ee.printStackTrace();
        return false;
    } finally {
        if (keyReader != null) {
            try {
                keyReader.close();
            } catch (IOException e) {
                logger.error("IOException caught while trying to close keyReader for store " + storeName, e);
                e.printStackTrace();
            }
        }
        if (kvWriter != null) {
            try {
                kvWriter.close();
            } catch (IOException e) {
                logger.error("IOException caught while trying to close kvWriter for store " + storeName, e);
                e.printStackTrace();
            }
        }
    }
}

From source file:com.datatorrent.stram.webapp.TypeGraph.java

private void removeSubGraph(TypeGraphVertex v) {

    // Can't recursively remove because it will get into concurrent modification
    // Use queue to delete all nodes
    Queue<TypeGraphVertex> removingQueue = new LinkedList<>();
    removingQueue.add(v);//from   w w  w.  j a  va2 s .  com
    while (!removingQueue.isEmpty()) {
        TypeGraphVertex tgv = removingQueue.poll();
        if (typeGraph.get(tgv.typeName) == null) {
            // skip node that's been removed already.
            // It comes from common descendants
            continue;
        }
        // put all the descendants to waiting queue
        for (TypeGraphVertex child : tgv.descendants) {
            removingQueue.offer(child);
        }
        // remove from global hashmap
        typeGraph.remove(tgv.typeName);
        // remove from instantiable descendants list of all the (in)direct ancestors
        if (!tgv.allInstantiableDescendants.isEmpty() && !tgv.ancestors.isEmpty()) {
            for (TypeGraphVertex p : tgv.ancestors) {
                removeFromInstantiableDescendants(p, tgv.allInstantiableDescendants);
            }
        }
        // cut links from parent to child
        for (TypeGraphVertex parent : tgv.ancestors) {
            parent.descendants.remove(tgv);
        }
        // cut links form child to parent
        tgv.ancestors.clear();
    }
}

From source file:org.kuali.rice.krad.uif.service.impl.ViewHelperServiceImpl.java

/**
 * {@inheritDoc}//from ww  w.  ja  v a 2 s.  co  m
 */
@Override
public void applyDefaultValues(Component component) {
    if (component == null) {
        return;
    }

    View view = ViewLifecycle.getView();
    Object model = ViewLifecycle.getModel();

    @SuppressWarnings("unchecked")
    Queue<LifecycleElement> elementQueue = RecycleUtils.getInstance(LinkedList.class);
    elementQueue.offer(component);
    try {
        while (!elementQueue.isEmpty()) {
            LifecycleElement currentElement = elementQueue.poll();

            // if component is a data field apply default value
            if (currentElement instanceof DataField) {
                DataField dataField = ((DataField) currentElement);

                // need to make sure binding is initialized since this could be on a page we have not initialized yet
                dataField.getBindingInfo().setDefaults(view, dataField.getPropertyName());

                populateDefaultValueForField(model, dataField, dataField.getBindingInfo().getBindingPath());
            }

            elementQueue.addAll(ViewLifecycleUtils.getElementsForLifecycle(currentElement).values());
        }
    } finally {
        elementQueue.clear();
        RecycleUtils.recycle(elementQueue);
    }
}

From source file:com.baifendian.swordfish.common.utils.graph.Graph.java

/**
 *  topological ?/*from  www  . j a v  a2  s  .  com*/
 *
 * @return key ?, ? true, () false, value  topology sort
 */
private Map.Entry<Boolean, List<VK>> topologicalSortImpl() {
    List<VK> sort = new ArrayList<>();
    Queue<VK> zeroVertex = new LinkedList<>();
    Map<VK, Integer> indegrees = new HashMap<>();

    synchronized (this) {
        // ? vertex , ??
        for (Map.Entry<VK, VD> id2Vertex : vertices.entrySet()) {
            VK key = id2Vertex.getKey();
            int inDegree = getIndegree(key);

            if (inDegree == 0) {
                sort.add(key);
                zeroVertex.add(key);
            } else {
                indegrees.put(key, inDegree);
            }
        }

        //  topology ,  0 , ??
        while (!zeroVertex.isEmpty()) {
            VK key = zeroVertex.poll();
            Collection<VK> postNodes = getPostNode(key);

            for (VK postKey : postNodes) {
                int d = indegrees.getOrDefault(postKey, 0);

                if (d <= 1) {
                    sort.add(postKey);
                    indegrees.remove(postKey);
                    zeroVertex.add(postKey);
                } else {
                    indegrees.put(postKey, d - 1);
                }
            }
        }
    }

    // indegrees , , ?
    return new AbstractMap.SimpleEntry(indegrees.isEmpty(), sort);
}

From source file:eu.itesla_project.modules.validation.OfflineValidationTool.java

@Override
public void run(CommandLine line) throws Exception {
    OfflineConfig config = OfflineConfig.load();
    String rulesDbName = line.hasOption("rules-db-name") ? line.getOptionValue("rules-db-name")
            : OfflineConfig.DEFAULT_RULES_DB_NAME;
    String workflowId = line.getOptionValue("workflow");
    Path outputDir = Paths.get(line.getOptionValue("output-dir"));
    double purityThreshold = line.hasOption("purity-threshold")
            ? Double.parseDouble(line.getOptionValue("purity-threshold"))
            : DEFAULT_PURITY_THRESHOLD;/*w  ww .  j a v a 2s  . c  om*/
    Set<Country> countries = Arrays.stream(line.getOptionValue("base-case-countries").split(","))
            .map(Country::valueOf).collect(Collectors.toSet());
    Interval histoInterval = Interval.parse(line.getOptionValue("history-interval"));
    boolean mergeOptimized = line.hasOption("merge-optimized");
    CaseType caseType = CaseType.valueOf(line.getOptionValue("case-type"));

    CaseRepositoryFactory caseRepositoryFactory = config.getCaseRepositoryFactoryClass().newInstance();
    RulesDbClientFactory rulesDbClientFactory = config.getRulesDbClientFactoryClass().newInstance();
    ContingenciesAndActionsDatabaseClient contingencyDb = config.getContingencyDbClientFactoryClass()
            .newInstance().create();
    SimulatorFactory simulatorFactory = config.getSimulatorFactoryClass().newInstance();
    LoadFlowFactory loadFlowFactory = config.getLoadFlowFactoryClass().newInstance();
    MergeOptimizerFactory mergeOptimizerFactory = config.getMergeOptimizerFactoryClass().newInstance();

    SimulationParameters simulationParameters = SimulationParameters.load();

    try (ComputationManager computationManager = new LocalComputationManager();
            RulesDbClient rulesDb = rulesDbClientFactory.create(rulesDbName);
            CsvMetricsDb metricsDb = new CsvMetricsDb(outputDir, true, "metrics")) {

        CaseRepository caseRepository = caseRepositoryFactory.create(computationManager);

        Queue<DateTime> dates = Queues.synchronizedDeque(
                new ArrayDeque<>(caseRepository.dataAvailable(caseType, countries, histoInterval)));

        Map<String, Map<RuleId, ValidationStatus>> statusPerRulePerCase = Collections
                .synchronizedMap(new TreeMap<>());
        Map<String, Map<RuleId, Map<HistoDbAttributeId, Object>>> valuesPerRulePerCase = Collections
                .synchronizedMap(new TreeMap<>());

        int cores = Runtime.getRuntime().availableProcessors();
        ExecutorService executorService = Executors.newFixedThreadPool(cores);
        try {
            List<Future<?>> tasks = new ArrayList<>(cores);
            for (int i = 0; i < cores; i++) {
                tasks.add(executorService.submit((Runnable) () -> {
                    while (dates.size() > 0) {
                        DateTime date = dates.poll();

                        try {
                            Network network = MergeUtil.merge(caseRepository, date, caseType, countries,
                                    loadFlowFactory, 0, mergeOptimizerFactory, computationManager,
                                    mergeOptimized);

                            System.out.println("case " + network.getId() + " loaded");

                            System.out.println("running simulation on " + network.getId() + "...");

                            network.getStateManager().allowStateMultiThreadAccess(true);
                            String baseStateId = network.getId();
                            network.getStateManager().cloneState(StateManager.INITIAL_STATE_ID, baseStateId);
                            network.getStateManager().setWorkingState(baseStateId);

                            Map<RuleId, ValidationStatus> statusPerRule = new HashMap<>();
                            Map<RuleId, Map<HistoDbAttributeId, Object>> valuesPerRule = new HashMap<>();

                            LoadFlow loadFlow = loadFlowFactory.create(network, computationManager, 0);
                            LoadFlowResult loadFlowResult = loadFlow.run();

                            System.err.println("load flow terminated (" + loadFlowResult.isOk() + ") on "
                                    + network.getId());

                            if (loadFlowResult.isOk()) {
                                Stabilization stabilization = simulatorFactory.createStabilization(network,
                                        computationManager, 0);
                                ImpactAnalysis impactAnalysis = simulatorFactory.createImpactAnalysis(network,
                                        computationManager, 0, contingencyDb);
                                Map<String, Object> context = new HashMap<>();
                                stabilization.init(simulationParameters, context);
                                impactAnalysis.init(simulationParameters, context);
                                StabilizationResult stabilizationResult = stabilization.run();

                                System.err.println("stabilization terminated ("
                                        + stabilizationResult.getStatus() + ") on " + network.getId());

                                metricsDb.store(workflowId, network.getId(), "STABILIZATION",
                                        stabilizationResult.getMetrics());

                                if (stabilizationResult.getStatus() == StabilizationStatus.COMPLETED) {
                                    ImpactAnalysisResult impactAnalysisResult = impactAnalysis
                                            .run(stabilizationResult.getState());

                                    System.err.println("impact analysis terminated on " + network.getId());

                                    metricsDb.store(workflowId, network.getId(), "IMPACT_ANALYSIS",
                                            impactAnalysisResult.getMetrics());

                                    System.out.println("checking rules on " + network.getId() + "...");

                                    for (SecurityIndex securityIndex : impactAnalysisResult
                                            .getSecurityIndexes()) {
                                        for (RuleAttributeSet attributeSet : RuleAttributeSet.values()) {
                                            statusPerRule.put(new RuleId(attributeSet, securityIndex.getId()),
                                                    new ValidationStatus(null, securityIndex.isOk()));
                                        }
                                    }
                                }
                            }

                            Map<HistoDbAttributeId, Object> values = IIDM2DB
                                    .extractCimValues(network, new IIDM2DB.Config(null, false))
                                    .getSingleValueMap();
                            for (RuleAttributeSet attributeSet : RuleAttributeSet.values()) {
                                for (Contingency contingency : contingencyDb.getContingencies(network)) {
                                    List<SecurityRule> securityRules = rulesDb.getRules(workflowId,
                                            attributeSet, contingency.getId(), null);
                                    for (SecurityRule securityRule : securityRules) {
                                        SecurityRuleExpression securityRuleExpression = securityRule
                                                .toExpression(purityThreshold);
                                        SecurityRuleCheckReport checkReport = securityRuleExpression
                                                .check(values);

                                        valuesPerRule.put(securityRule.getId(),
                                                ExpressionAttributeList
                                                        .list(securityRuleExpression.getCondition()).stream()
                                                        .collect(Collectors.toMap(attributeId -> attributeId,
                                                                new Function<HistoDbAttributeId, Object>() {
                                                                    @Override
                                                                    public Object apply(
                                                                            HistoDbAttributeId attributeId) {
                                                                        Object value = values.get(attributeId);
                                                                        return value != null ? value
                                                                                : Float.NaN;
                                                                    }
                                                                })));

                                        ValidationStatus status = statusPerRule.get(securityRule.getId());
                                        if (status == null) {
                                            status = new ValidationStatus(null, null);
                                            statusPerRule.put(securityRule.getId(), status);
                                        }
                                        if (checkReport.getMissingAttributes().isEmpty()) {
                                            status.setRuleOk(checkReport.isSafe());
                                        }
                                    }
                                }
                            }

                            statusPerRulePerCase.put(network.getId(), statusPerRule);
                            valuesPerRulePerCase.put(network.getId(), valuesPerRule);
                        } catch (Exception e) {
                            LOGGER.error(e.toString(), e);
                        }
                    }
                }));
            }
            for (Future<?> task : tasks) {
                task.get();
            }
        } finally {
            executorService.shutdown();
            executorService.awaitTermination(1, TimeUnit.MINUTES);
        }

        writeCsv(statusPerRulePerCase, valuesPerRulePerCase, outputDir);
    }
}

From source file:org.paxle.core.io.temp.impl.CommandTempReleaser.java

private void releaseCommandFiles(final ICommand cmd, final Long id) {
    try {/*from  ww w  .  jav  a  2s. c o  m*/
        File file;
        final ICrawlerDocument cdoc = cmd.getCrawlerDocument();
        if (cdoc != null && (file = cdoc.getContent()) != null) {
            if (tfm.isKnown(file)) {
                try {
                    tfm.releaseTempFile(file);
                } catch (FileNotFoundException e) {
                    this.logger.warn("downloaded crawler-data not available for release");
                }
            } else {
                this.logger.debug(String.format("Crawlerdoc tempfile %s not managed by tempfilemanager",
                        file.toString()));
            }
        }

        final Queue<Map.Entry<String, IParserDocument>> pdocs = new LinkedList<Map.Entry<String, IParserDocument>>();

        IParserDocument pdoc = cmd.getParserDocument();
        Map.Entry<String, IParserDocument> entry = null;
        if (pdoc != null) {
            do {
                if (entry != null) {
                    pdoc = entry.getValue();
                }

                if ((file = pdoc.getTextFile()) != null) {
                    if (tfm.isKnown(file)) {
                        try {
                            tfm.releaseTempFile(file);
                        } catch (FileNotFoundException e) {
                            final String msg = (entry == null) ? "parser-document"
                                    : "sub parser-document '" + entry.getKey() + "'";
                            logger.warn(String.format("data of %s of cmd [%06d] not available for release", msg,
                                    id));
                        }
                    } else {
                        this.logger.debug(String.format("Parserdoc tempfile %s not managed by tempfilemanager",
                                file.toString()));
                    }
                }

                pdocs.addAll(pdoc.getSubDocs().entrySet());
            } while ((entry = pdocs.poll()) != null);
        }

    } catch (Throwable e) {
        this.logger.error(String.format("Unexpected '%s' while releasing temporary files of command '%s'.",
                e.getClass().getName(), cmd.getLocation()), e);
    }
}

From source file:org.jasig.portal.portlet.rendering.PortletEventCoordinatationService.java

@Override
public void resolvePortletEvents(HttpServletRequest request, PortletEventQueue portletEventQueue) {
    final Queue<QueuedEvent> events = portletEventQueue.getUnresolvedEvents();

    //Skip all processing if there are no new events.
    if (events.isEmpty()) {
        return;//from ww w.ja va  2s.  com
    }

    //Get all the portlets the user is subscribed to
    final IUserInstance userInstance = this.userInstanceManager.getUserInstance(request);
    final IUserPreferencesManager preferencesManager = userInstance.getPreferencesManager();
    final IUserLayoutManager userLayoutManager = preferencesManager.getUserLayoutManager();

    //Make a local copy so we can remove data from it
    final Set<String> allLayoutNodeIds = new LinkedHashSet<String>(
            userLayoutManager.getAllSubscribedChannels());

    final Map<String, IPortletEntity> portletEntityCache = new LinkedHashMap<String, IPortletEntity>();

    while (!events.isEmpty()) {
        final QueuedEvent queuedEvent = events.poll();
        if (queuedEvent == null) {
            //no more queued events, done resolving
            return;
        }

        final IPortletWindowId sourceWindowId = queuedEvent.getPortletWindowId();
        final Event event = queuedEvent.getEvent();

        final boolean globalEvent = isGlobalEvent(request, sourceWindowId, event);

        final Set<IPortletDefinition> portletDefinitions = new LinkedHashSet<IPortletDefinition>();
        if (globalEvent) {
            portletDefinitions.addAll(this.portletDefinitionRegistry.getAllPortletDefinitions());
        }

        //Check each subscription to see what events it is registered to see
        for (final Iterator<String> layoutNodeIdItr = allLayoutNodeIds.iterator(); layoutNodeIdItr.hasNext();) {
            final String layoutNodeId = layoutNodeIdItr.next();

            IPortletEntity portletEntity = portletEntityCache.get(layoutNodeId);
            if (portletEntity == null) {
                portletEntity = this.portletEntityRegistry.getOrCreatePortletEntity(request, userInstance,
                        layoutNodeId);

                // if portlet entity registry returned null, then portlet has been deleted - remove it (see UP-3378)
                if (portletEntity == null) {
                    layoutNodeIdItr.remove();
                    continue;
                }

                final IPortletDefinitionId portletDefinitionId = portletEntity.getPortletDefinitionId();
                final PortletDefinition portletDescriptor = this.portletDefinitionRegistry
                        .getParentPortletDescriptor(portletDefinitionId);
                if (portletDescriptor == null) {
                    //Missconfigured portlet, remove it from the list so we don't check again and ignore it
                    layoutNodeIdItr.remove();
                    continue;
                }

                final List<? extends EventDefinitionReference> supportedProcessingEvents = portletDescriptor
                        .getSupportedProcessingEvents();
                //Skip portlets that don't handle any events and remove them from the set so they are not checked again
                if (supportedProcessingEvents == null || supportedProcessingEvents.size() == 0) {
                    layoutNodeIdItr.remove();
                    continue;
                }

                portletEntityCache.put(layoutNodeId, portletEntity);
            }

            final IPortletDefinition portletDefinition = portletEntity.getPortletDefinition();
            final IPortletDefinitionId portletDefinitionId = portletDefinition.getPortletDefinitionId();
            if (this.supportsEvent(event, portletDefinitionId)) {
                this.logger.debug("{} supports event {}", portletDefinition, event);

                //If this is the default portlet entity remove the definition from the all defs set to avoid duplicate processing
                final IPortletEntity defaultPortletEntity = this.portletEntityRegistry
                        .getOrCreateDefaultPortletEntity(request, portletDefinitionId);
                if (defaultPortletEntity.equals(portletEntity)) {
                    portletDefinitions.remove(portletDefinition);
                }

                final IPortletEntityId portletEntityId = portletEntity.getPortletEntityId();
                final Set<IPortletWindow> portletWindows = this.portletWindowRegistry
                        .getAllPortletWindowsForEntity(request, portletEntityId);

                for (final IPortletWindow portletWindow : portletWindows) {
                    this.logger.debug("{} resolved target {}", event, portletWindow);
                    final IPortletWindowId portletWindowId = portletWindow.getPortletWindowId();
                    final Event unmarshalledEvent = this.unmarshall(portletWindow, event);
                    portletEventQueue.offerEvent(portletWindowId,
                            new QueuedEvent(sourceWindowId, unmarshalledEvent));
                }
            } else {
                portletDefinitions.remove(portletDefinition);
            }
        }

        if (!portletDefinitions.isEmpty()) {
            final IPerson user = userInstance.getPerson();
            final EntityIdentifier ei = user.getEntityIdentifier();
            final IAuthorizationPrincipal ap = AuthorizationService.instance().newPrincipal(ei.getKey(),
                    ei.getType());

            //If the event is global there might still be portlet definitions that need targeting
            for (final IPortletDefinition portletDefinition : portletDefinitions) {
                final IPortletDefinitionId portletDefinitionId = portletDefinition.getPortletDefinitionId();
                //Check if the user can render the portlet definition before doing event tests
                if (ap.canRender(portletDefinitionId.getStringId())) {
                    if (this.supportsEvent(event, portletDefinitionId)) {
                        this.logger.debug("{} supports event {}", portletDefinition, event);

                        final IPortletEntity portletEntity = this.portletEntityRegistry
                                .getOrCreateDefaultPortletEntity(request, portletDefinitionId);
                        final IPortletEntityId portletEntityId = portletEntity.getPortletEntityId();
                        final Set<IPortletWindow> portletWindows = this.portletWindowRegistry
                                .getAllPortletWindowsForEntity(request, portletEntityId);

                        for (final IPortletWindow portletWindow : portletWindows) {
                            this.logger.debug("{} resolved target {}", event, portletWindow);
                            final IPortletWindowId portletWindowId = portletWindow.getPortletWindowId();
                            final Event unmarshalledEvent = this.unmarshall(portletWindow, event);
                            portletEventQueue.offerEvent(portletWindowId,
                                    new QueuedEvent(sourceWindowId, unmarshalledEvent));
                        }
                    }
                }
            }
        }
    }
}

From source file:org.apereo.portal.portlet.rendering.PortletEventCoordinatationService.java

@Override
public void resolvePortletEvents(HttpServletRequest request, PortletEventQueue portletEventQueue) {
    final Queue<QueuedEvent> events = portletEventQueue.getUnresolvedEvents();

    //Skip all processing if there are no new events.
    if (events.isEmpty()) {
        return;/*from w w w .  ja v  a 2  s  . co m*/
    }

    //Get all the portlets the user is subscribed to
    final IUserInstance userInstance = this.userInstanceManager.getUserInstance(request);
    final IUserPreferencesManager preferencesManager = userInstance.getPreferencesManager();
    final IUserLayoutManager userLayoutManager = preferencesManager.getUserLayoutManager();

    //Make a local copy so we can remove data from it
    final Set<String> allLayoutNodeIds = new LinkedHashSet<String>(
            userLayoutManager.getAllSubscribedChannels());

    final Map<String, IPortletEntity> portletEntityCache = new LinkedHashMap<String, IPortletEntity>();

    while (!events.isEmpty()) {
        final QueuedEvent queuedEvent = events.poll();
        if (queuedEvent == null) {
            //no more queued events, done resolving
            return;
        }

        final IPortletWindowId sourceWindowId = queuedEvent.getPortletWindowId();
        final Event event = queuedEvent.getEvent();

        final boolean globalEvent = isGlobalEvent(request, sourceWindowId, event);

        final Set<IPortletDefinition> portletDefinitions = new LinkedHashSet<IPortletDefinition>();
        if (globalEvent) {
            portletDefinitions.addAll(this.portletDefinitionRegistry.getAllPortletDefinitions());
        }

        //Check each subscription to see what events it is registered to see
        for (final Iterator<String> layoutNodeIdItr = allLayoutNodeIds.iterator(); layoutNodeIdItr.hasNext();) {
            final String layoutNodeId = layoutNodeIdItr.next();

            IPortletEntity portletEntity = portletEntityCache.get(layoutNodeId);
            if (portletEntity == null) {
                portletEntity = this.portletEntityRegistry.getOrCreatePortletEntity(request, userInstance,
                        layoutNodeId);

                // if portlet entity registry returned null, then portlet has been deleted - remove it (see UP-3378)
                if (portletEntity == null) {
                    layoutNodeIdItr.remove();
                    continue;
                }

                final IPortletDefinitionId portletDefinitionId = portletEntity.getPortletDefinitionId();
                final PortletDefinition portletDescriptor = this.portletDefinitionRegistry
                        .getParentPortletDescriptor(portletDefinitionId);
                if (portletDescriptor == null) {
                    //Missconfigured portlet, remove it from the list so we don't check again and ignore it
                    layoutNodeIdItr.remove();
                    continue;
                }

                final List<? extends EventDefinitionReference> supportedProcessingEvents = portletDescriptor
                        .getSupportedProcessingEvents();
                //Skip portlets that don't handle any events and remove them from the set so they are not checked again
                if (supportedProcessingEvents == null || supportedProcessingEvents.size() == 0) {
                    layoutNodeIdItr.remove();
                    continue;
                }

                portletEntityCache.put(layoutNodeId, portletEntity);
            }

            final IPortletDefinition portletDefinition = portletEntity.getPortletDefinition();
            final IPortletDefinitionId portletDefinitionId = portletDefinition.getPortletDefinitionId();
            if (this.supportsEvent(event, portletDefinitionId)) {
                this.logger.debug("{} supports event {}", portletDefinition, event);

                //If this is the default portlet entity remove the definition from the all defs set to avoid duplicate processing
                final IPortletEntity defaultPortletEntity = this.portletEntityRegistry
                        .getOrCreateDefaultPortletEntity(request, portletDefinitionId);
                if (defaultPortletEntity.equals(portletEntity)) {
                    portletDefinitions.remove(portletDefinition);
                }

                // Is this portlet permitted to receive events?  (Or is it disablePortletEvents=true?)
                IPortletDefinitionParameter disablePortletEvents = portletDefinition
                        .getParameter(PortletExecutionManager.DISABLE_PORTLET_EVENTS_PARAMETER);
                if (disablePortletEvents != null && Boolean.parseBoolean(disablePortletEvents.getValue())) {
                    logger.info("Ignoring portlet events for portlet '{}' because they have been disabled.",
                            portletDefinition.getFName());
                    continue;
                }

                final IPortletEntityId portletEntityId = portletEntity.getPortletEntityId();
                final Set<IPortletWindow> portletWindows = this.portletWindowRegistry
                        .getAllPortletWindowsForEntity(request, portletEntityId);

                for (final IPortletWindow portletWindow : portletWindows) {
                    this.logger.debug("{} resolved target {}", event, portletWindow);
                    final IPortletWindowId portletWindowId = portletWindow.getPortletWindowId();
                    final Event unmarshalledEvent = this.unmarshall(portletWindow, event);
                    portletEventQueue.offerEvent(portletWindowId,
                            new QueuedEvent(sourceWindowId, unmarshalledEvent));
                }
            } else {
                portletDefinitions.remove(portletDefinition);
            }
        }

        if (!portletDefinitions.isEmpty()) {
            final IPerson user = userInstance.getPerson();
            final EntityIdentifier ei = user.getEntityIdentifier();
            final IAuthorizationPrincipal ap = AuthorizationService.instance().newPrincipal(ei.getKey(),
                    ei.getType());

            //If the event is global there might still be portlet definitions that need targeting
            for (final IPortletDefinition portletDefinition : portletDefinitions) {

                // Is this portlet permitted to receive events?  (Or is it disablePortletEvents=true?)
                IPortletDefinitionParameter disablePortletEvents = portletDefinition
                        .getParameter(PortletExecutionManager.DISABLE_PORTLET_EVENTS_PARAMETER);
                if (disablePortletEvents != null && Boolean.parseBoolean(disablePortletEvents.getValue())) {
                    logger.info("Ignoring portlet events for portlet '{}' because they have been disabled.",
                            portletDefinition.getFName());
                    continue;
                }

                final IPortletDefinitionId portletDefinitionId = portletDefinition.getPortletDefinitionId();
                //Check if the user can render the portlet definition before doing event tests
                if (ap.canRender(portletDefinitionId.getStringId())) {
                    if (this.supportsEvent(event, portletDefinitionId)) {
                        this.logger.debug("{} supports event {}", portletDefinition, event);

                        final IPortletEntity portletEntity = this.portletEntityRegistry
                                .getOrCreateDefaultPortletEntity(request, portletDefinitionId);
                        final IPortletEntityId portletEntityId = portletEntity.getPortletEntityId();
                        final Set<IPortletWindow> portletWindows = this.portletWindowRegistry
                                .getAllPortletWindowsForEntity(request, portletEntityId);

                        for (final IPortletWindow portletWindow : portletWindows) {
                            this.logger.debug("{} resolved target {}", event, portletWindow);
                            final IPortletWindowId portletWindowId = portletWindow.getPortletWindowId();
                            final Event unmarshalledEvent = this.unmarshall(portletWindow, event);
                            portletEventQueue.offerEvent(portletWindowId,
                                    new QueuedEvent(sourceWindowId, unmarshalledEvent));
                        }
                    }
                }
            }
        }
    }
}

From source file:edu.umn.cs.spatialHadoop.nasa.StockQuadTree.java

/**
 * Constructs a stock quad tree for the given resolution
 * @param resolution/*from   w  w  w.  ja  v a  2  s  . co m*/
 */
StockQuadTree(int resolution) {
    this.resolution = resolution;
    this.r = new int[resolution * resolution];
    final int[] z = new int[resolution * resolution];
    // The list of all nodes
    Vector<Node> nodes = new Vector<Node>();

    // Compute the Z-order of all values
    for (int i = 0; i < z.length; i++) {
        short x = (short) (i % resolution);
        short y = (short) (i / resolution);
        int zorder = AggregateQuadTree.computeZOrder(x, y);
        z[i] = zorder;
        r[i] = i;
    }

    // Sort ArrayToZOrder1200 by Z-Order and keep the original position of
    // each element by mirroring all swaps to ZOrderToArray1200
    new QuickSort().sort(new IndexedSortable() {
        @Override
        public void swap(int i, int j) {
            int temp;
            // Swap z-values (which are to be sorted)
            temp = z[i];
            z[i] = z[j];
            z[j] = temp;

            // Swap their relative positions in the other array
            temp = r[i];
            r[i] = r[j];
            r[j] = temp;
        }

        @Override
        public int compare(int i, int j) {
            return z[i] - z[j];
        }
    }, 0, z.length);

    // Construct the structure of the quad tree based on Z-values
    // Maximum number of values per node. Set it to a very small number to
    // construct as many levels as possible. Notice that when quad trees
    // are aggregated, a single value might become 366 values in the same pos.
    final int capacity = 100;
    Node root = new Node();
    root.startPosition = 0;
    root.endPosition = z.length;
    root.id = 1;
    Queue<Node> nodesToCheckForSplit = new ArrayDeque<Node>();
    nodesToCheckForSplit.add(root);
    int numOfSignificantBitsInTree = getNumOfSignificantBits(resolution * resolution - 1);
    if ((numOfSignificantBitsInTree & 1) == 1)
        numOfSignificantBitsInTree++; // Round to next even value
    int maxId = 0;
    while (!nodesToCheckForSplit.isEmpty()) {
        Node nodeToCheckForSplit = nodesToCheckForSplit.poll();
        boolean needsToSplit = nodeToCheckForSplit.getNumOfElements() > capacity;
        if (nodeToCheckForSplit.id > maxId)
            maxId = nodeToCheckForSplit.id;
        nodes.add(nodeToCheckForSplit);
        if (needsToSplit) {
            // Need to split
            // Determine split points based on the Z-order values of the first and
            // last elements in this node
            int depth = nodeToCheckForSplit.id == 0 ? 0
                    : (getNumOfSignificantBits(nodeToCheckForSplit.id - 1) / 2 + 1);
            depth = (getNumOfSignificantBits(nodeToCheckForSplit.id) - 1) / 2;
            int numOfSignificantBitsInNode = numOfSignificantBitsInTree - depth * 2;

            // Create four child nodes under this node
            int zOrderCommonBits = z[nodeToCheckForSplit.startPosition]
                    & (0xffffffff << numOfSignificantBitsInNode);
            int childStartPosition = nodeToCheckForSplit.startPosition;
            for (int iChild = 0; iChild < 4; iChild++) {
                int zOrderUpperBound = zOrderCommonBits + ((iChild + 1) << (numOfSignificantBitsInNode - 2));
                int childEndPosition = Arrays.binarySearch(z, childStartPosition,
                        nodeToCheckForSplit.endPosition, zOrderUpperBound);
                if (childEndPosition < 0)
                    childEndPosition = -(childEndPosition + 1);
                Node child = new Node();
                child.startPosition = childStartPosition;
                child.endPosition = childEndPosition;
                child.id = nodeToCheckForSplit.id * 4 + iChild;
                nodesToCheckForSplit.add(child);
                // Prepare for next iteration
                childStartPosition = childEndPosition;
            }
            if (childStartPosition != nodeToCheckForSplit.endPosition)
                throw new RuntimeException();
        }
    }
    // Convert nodes to column format for memory efficiency
    nodesID = new int[nodes.size()];
    nodesStartPosition = new int[nodes.size()];
    nodesEndPosition = new int[nodes.size()];

    for (int i = 0; i < nodes.size(); i++) {
        Node node = nodes.get(i);
        nodesID[i] = node.id;
        nodesStartPosition[i] = node.startPosition;
        nodesEndPosition[i] = node.endPosition;
    }
}

From source file:org.voltdb.iv2.Cartographer.java

private boolean doPartitionsHaveReplicas(int hid) {
    hostLog.debug("Cartographer: Reloading partition information.");
    List<String> partitionDirs = null;
    try {/*from  ww w  .j a  v  a  2s.  c o m*/
        partitionDirs = m_zk.getChildren(VoltZK.leaders_initiators, null);
    } catch (KeeperException | InterruptedException e) {
        return false;
    }

    //Don't fetch the values serially do it asynchronously
    Queue<ZKUtil.ByteArrayCallback> dataCallbacks = new ArrayDeque<>();
    Queue<ZKUtil.ChildrenCallback> childrenCallbacks = new ArrayDeque<>();
    for (String partitionDir : partitionDirs) {
        String dir = ZKUtil.joinZKPath(VoltZK.leaders_initiators, partitionDir);
        try {
            ZKUtil.ByteArrayCallback callback = new ZKUtil.ByteArrayCallback();
            m_zk.getData(dir, false, callback, null);
            dataCallbacks.offer(callback);
            ZKUtil.ChildrenCallback childrenCallback = new ZKUtil.ChildrenCallback();
            m_zk.getChildren(dir, false, childrenCallback, null);
            childrenCallbacks.offer(childrenCallback);
        } catch (Exception e) {
            return false;
        }
    }
    //Assume that we are ksafe
    for (String partitionDir : partitionDirs) {
        int pid = LeaderElector.getPartitionFromElectionDir(partitionDir);
        try {
            //Dont let anyone die if someone is in INITIALIZING state
            byte[] partitionState = dataCallbacks.poll().getData();
            if (partitionState != null && partitionState.length == 1) {
                if (partitionState[0] == LeaderElector.INITIALIZING) {
                    return false;
                }
            }

            List<String> replicas = childrenCallbacks.poll().getChildren();
            //This is here just so callback is polled.
            if (pid == MpInitiator.MP_INIT_PID) {
                continue;
            }
            //Get Hosts for replicas
            final List<Integer> replicaHost = new ArrayList<>();
            boolean hostHasReplicas = false;
            for (String replica : replicas) {
                final String split[] = replica.split("/");
                final long hsId = Long.valueOf(split[split.length - 1].split("_")[0]);
                final int hostId = CoreUtils.getHostIdFromHSId(hsId);
                if (hostId == hid) {
                    hostHasReplicas = true;
                }
                replicaHost.add(hostId);
            }
            hostLog.debug("Replica Host for Partition " + pid + " " + replicaHost);
            if (hostHasReplicas && replicaHost.size() <= 1) {
                return false;
            }
        } catch (InterruptedException | KeeperException | NumberFormatException e) {
            return false;
        }
    }
    return true;
}