Example usage for com.google.common.collect Multimap size

List of usage examples for com.google.common.collect Multimap size

Introduction

In this page you can find the example usage for com.google.common.collect Multimap size.

Prototype

int size();

Source Link

Document

Returns the number of key-value pairs in this multimap.

Usage

From source file:nl.sidn.pcap.PcapReader.java

/**
 * Clear expired cache entries in order to avoid memory problems 
 */// w w w .  j  a  v  a2s  .com
public void clearCache(int tcpFlowCacheTimeout, int fragmentedIPcacheTimeout) {
    //clear tcp flows with expired packets
    List<TCPFlow> expiredList = new ArrayList<>();
    long now = System.currentTimeMillis();
    Multimap<TCPFlow, SequencePayload> flows = tcpDecoder.getFlows();
    for (TCPFlow flow : flows.keySet()) {
        Collection<SequencePayload> payloads = flows.get(flow);
        for (SequencePayload sequencePayload : payloads) {
            if ((sequencePayload.getTime() + tcpFlowCacheTimeout) <= now) {
                expiredList.add(flow);
                break;
            }
        }
    }

    //check IP datagrams
    List<Datagram> dgExpiredList = new ArrayList<>();

    for (Datagram dg : ipDecoder.getDatagrams().keySet()) {
        if ((dg.getTime() + fragmentedIPcacheTimeout) <= now) {
            dgExpiredList.add(dg);
        }
    }

    LOG.info("------------- Cache purge stats --------------");
    LOG.info("TCP flow cache size: " + flows.size());
    LOG.info("IP datagram cache size: " + ipDecoder.getDatagrams().size());
    LOG.info("Expired (to be removed) TCP flows: " + expiredList.size());
    LOG.info("Expired (to be removed) IP datagrams: " + dgExpiredList.size());
    LOG.info("----------------------------------------------------");

    //remove flows with expired packets
    for (TCPFlow tcpFlow : expiredList) {
        flows.removeAll(tcpFlow);
    }

    for (Datagram dg : dgExpiredList) {
        ipDecoder.getDatagrams().removeAll(dg);
    }

}

From source file:org.gatein.wsrp.producer.handlers.PortletManagementHandler.java

public DestroyPortletsResponse destroyPortlets(DestroyPortlets destroyPortlets)
        throws InconsistentParameters, InvalidRegistration, MissingParameters, ModifyRegistrationRequired,
        OperationFailed, OperationNotSupported, ResourceSuspended {
    WSRP2ExceptionFactory.throwOperationFailedIfValueIsMissing(destroyPortlets, "DestroyPortlets");

    List<String> handles = destroyPortlets.getPortletHandles();
    WSRP2ExceptionFactory.throwMissingParametersIfValueIsMissing(handles, "portlet handles to be destroyed",
            "DestroyPortlets");
    handles = WSRPUtils.replaceByEmptyListIfNeeded(handles);

    Registration registration = producer
            .getRegistrationOrFailIfInvalid(destroyPortlets.getRegistrationContext());

    List<org.gatein.pc.api.PortletContext> portletContexts = new ArrayList<org.gatein.pc.api.PortletContext>(
            handles.size());//from  w w  w  . jav a 2 s  . c o  m
    for (String handle : handles) {
        portletContexts.add(org.gatein.pc.api.PortletContext.createPortletContext(handle));
    }

    try {
        RegistrationLocal.setRegistration(registration);
        List<DestroyCloneFailure> failuresList = producer.getPortletInvoker().destroyClones(portletContexts);
        int failuresNumber = failuresList.size();
        List<FailedPortlets> failedPortlets;
        if (failuresNumber > 0) {
            // for each reason of failure, record the associated portlet handles, expecting one portlet handle per message
            Multimap<String, String> reasonToHandles = HashMultimap.create(failuresNumber, 1);
            for (DestroyCloneFailure failure : failuresList) {
                reasonToHandles.put(failure.getMessage(), failure.getPortletId());
            }

            // create a FailedPortlets object for each reason
            failedPortlets = new ArrayList<FailedPortlets>(reasonToHandles.size());
            for (String reason : reasonToHandles.keys()) {
                failedPortlets.add(WSRPTypeFactory.createFailedPortlets(reasonToHandles.get(reason),
                        ErrorCodes.Codes.OPERATIONFAILED, reason));
            }
        } else {
            failedPortlets = null;
        }

        return WSRPTypeFactory.createDestroyPortletsResponse(failedPortlets);
    } catch (PortletInvokerException e) {
        throw WSRP2ExceptionFactory.throwWSException(OperationFailed.class, "Failed to destroy clones", e);
    } finally {
        RegistrationLocal.setRegistration(null);
    }
}

From source file:org.corpus_tools.peppermodules.annis.Salt2ANNISMapper.java

@Override
public DOCUMENT_STATUS mapSDocument() {

    this.preorderTable = new ConcurrentHashMap<>();
    this.postorderTable = new ConcurrentHashMap<>();
    prePostOrder = 0l;//from ww  w.  ja  v a  2  s  .  c  o m

    numberOfMappedNodes.set(0);

    if (this.getDocument() == null || this.getDocument().getDocumentGraph() == null) {
        throw new PepperModuleException(this, "Cannot map sDocumentGraph, because sDocumentGraph is null.");
    }

    {//start traversion of documentStructure

        try {

            if (this.getDocument().getDocumentGraph().getNodes() != null) {
                this.numberOfDocumentNodes = this.getDocument().getDocumentGraph().getNodes().size();
            }

            /**
             * traverse by SpanningRelations: DOCUMENT_STRUCTURE_CR
             * DominanceRelations: DOCUMENT_STRUCTURE_DR PointingRelations:
             * DOCUMENT_STRUCTURE_PR
             *
             * DominanceRelations Subcomponents: DOCUMENT_STRUCTURE_DR_SUB
             * PointingRelations Subcomponents: DOCUMENT_STRUCTURE_PR_SUB
             *
             * Dominance relations may consist of different subcomponents since
             * there are "edge" and "secedge" types
             *
             * Since every root node has it's own component, the pre and post order
             * needs to be 0 for the root node. You need to handle this.
             */
            List<? extends SNode> sRelationRoots;
            Multimap<String, SNode> subComponentRoots;
            //        Map<String, List<SNode>> subComponentRoots;

            Map<SToken, Long> token2Index = calculateToken2Index(getDocument().getDocumentGraph());

            // START Step 1: map SOrderRelation
            subComponentRoots = this.getDocument().getDocumentGraph()
                    .getRootsByRelationType(SALT_TYPE.SORDER_RELATION);
            if (subComponentRoots != null) {
                if (subComponentRoots.size() > 0) {
                    for (Entry<String, SNode> entry : subComponentRoots.entries()) {
                        SRelation2ANNISMapper sOrderRelationMapper = new SOrderRelation2ANNISMapper(
                                getIdManager(), getDocument().getDocumentGraph(), token2Index, tw_node,
                                tw_nodeAnno, tw_rank, tw_edgeAnno, tw_component, this);

                        String traversionType = entry.getKey();
                        if (SaltUtil.SALT_NULL_VALUE.equals(traversionType)) {
                            traversionType = "default_seg";
                        }
                        sOrderRelationMapper.setTraversionSType(traversionType);
                        sOrderRelationMapper.mapSRelations2ANNIS(subComponentRoots.get(entry.getKey()),
                                SALT_TYPE.SORDER_RELATION, null);

                    }
                }
            }
            // END Step 1: map SOrderRelation

            // also map the timeline (by creating a virtual tokenization if necessary)
            STimelineRelation2ANNISMapper timelineMapper = new STimelineRelation2ANNISMapper(getIdManager(),
                    getDocument().getDocumentGraph(), token2Index, tw_node, tw_nodeAnno, tw_rank, tw_edgeAnno,
                    tw_component, this, mergeTextsWithTimeline);
            timelineMapper.run();

            // START Step 2: map SText
            if (idManager.hasVirtualTokenization()) {
                Long sDocID;
                Long textId = 0l;
                String sDocumentElementId = this.getDocument().getId();

                if (sDocumentElementId == null) {
                    throw new PepperModuleException(this,
                            "SId Id of the document '" + this.getDocument().getName() + "' is NULL!");
                }
                sDocID = this.idManager.getNewCorpusTabId(sDocumentElementId);
                String textName = "sText0";
                String textContent = Strings.repeat(" ", idManager.getNumberOfVirtualToken());
                ArrayList<String> tuple = new ArrayList<>();
                tuple.add(sDocID.toString());
                tuple.add(textId.toString());
                tuple.add(textName);
                tuple.add(textContent);

                long transactionId = tw_text.beginTA();
                try {
                    tw_text.addTuple(transactionId, tuple);
                    tw_text.commitTA(transactionId);

                } catch (FileNotFoundException e) {
                    tw_text.abortTA(transactionId);
                    throw new PepperModuleException(this,
                            "Could not write to the node.tab, exception was" + e.getMessage());
                }
            } else {
                this.mapSText();
            }

            ExecutorService exec = null;
            if (mapRelationsInParallel) {
                exec = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors());
            }

            subComponentRoots = getDocument().getDocumentGraph()
                    .getRootsByRelationType(SALT_TYPE.SPOINTING_RELATION);
            if (subComponentRoots != null) {
                //System.out.println("The Pointing relation graphs have "+ subComponentRoots.size() + " STypes.");
                if (subComponentRoots.size() > 0) {

                    for (String key : subComponentRoots.keySet()) {
                        //System.out.println("Count of PR roots for key "+key+" : "+subComponentRoots.get(key).size());
                        //System.out.println("Mapping PointingRelation subcomponents with sType: "+key);
                        SRelation2ANNISMapper sPointingSubRelationMapper = new SPointingRelation2ANNISMapper(
                                getIdManager(), getDocument().getDocumentGraph(), token2Index, tw_node,
                                tw_nodeAnno, tw_rank, tw_edgeAnno, tw_component, this);
                        sPointingSubRelationMapper.mapSRelations2ANNIS(subComponentRoots.get(key),
                                SALT_TYPE.SPOINTING_RELATION, TRAVERSION_TYPE.DOCUMENT_STRUCTURE_PR);
                        sPointingSubRelationMapper.setTraversionSType(key);
                        if (exec != null) {
                            exec.execute(sPointingSubRelationMapper);
                        } else {
                            sPointingSubRelationMapper.run();
                        }
                    }
                } else {
                    //System.out.println("No PointingRelation components found (null map)");
                }
            } else {
                //System.out.println("No PointingRelation components found (empty map)");
            }
            // END Step 2: map SPointingRelations

            // START Step 3: map SDominanceRelations
            sRelationRoots = this.getDocument().getDocumentGraph()
                    .getRootsByRelation(SALT_TYPE.SDOMINANCE_RELATION);
            if (sRelationRoots != null) {
                if (sRelationRoots.size() > 0) {
                    SRelation2ANNISMapper sDominanceRelationMapper = new SDominanceRelation2ANNISMapper(
                            getIdManager(), getDocument().getDocumentGraph(), token2Index, tw_node, tw_nodeAnno,
                            tw_rank, tw_edgeAnno, tw_component, this);
                    sDominanceRelationMapper.mapSRelations2ANNIS(sRelationRoots, SALT_TYPE.SDOMINANCE_RELATION,
                            TRAVERSION_TYPE.DOCUMENT_STRUCTURE_DR);
                    if (exec != null) {
                        exec.execute(sDominanceRelationMapper);
                    } else {
                        sDominanceRelationMapper.run();
                    }
                }
            }
            // END Step 3: map SDominanceRelations

            // START Step 3.1 : map the subComponents of the SDominanceRelations
            subComponentRoots = getDocument().getDocumentGraph()
                    .getRootsByRelationType(SALT_TYPE.SDOMINANCE_RELATION);
            if (subComponentRoots != null) {
                //System.out.println("The Dominance relation graphs have "+ subComponentRoots.size() + " STypes.");
                if (subComponentRoots.size() > 0) {

                    Set<String> domComponentTypeNames = subComponentRoots.keySet();

                    // only output the named relation types if there the user has not choosen
                    // to include them or if there are more than 1 named types
                    if (!((ANNISExporterProperties) this.getProperties()).getExcludeSingleDomType()
                            || domComponentTypeNames.size() >= 2) {
                        for (String key : domComponentTypeNames) {

                            if (!SaltUtil.SALT_NULL_VALUE.equals(key)) {

                                SRelation2ANNISMapper sDominanceSubRelationMapper = new SDominanceRelation2ANNISMapper(
                                        getIdManager(), getDocument().getDocumentGraph(), token2Index, tw_node,
                                        tw_nodeAnno, tw_rank, tw_edgeAnno, tw_component, this);
                                sDominanceSubRelationMapper.setTraversionSType(key);
                                sDominanceSubRelationMapper.mapSRelations2ANNIS(subComponentRoots.get(key),
                                        SALT_TYPE.SDOMINANCE_RELATION, TRAVERSION_TYPE.DOCUMENT_STRUCTURE_DR);
                                if (exec != null) {
                                    exec.execute(sDominanceSubRelationMapper);
                                } else {
                                    sDominanceSubRelationMapper.run();
                                }
                            }
                        }
                    }
                } else {
                    //System.out.println("No DominanceRelation subcomponents found (null map)");
                }
            } else {
                //System.out.println("No DominanceRelation subcomponents found (empty map)");
            }
            // END Step 3.1 : map the subComponents of the SDominanceRelations

            // START Step 4: map SSpanningrelations
            sRelationRoots = this.getDocument().getDocumentGraph()
                    .getRootsByRelation(SALT_TYPE.SSPANNING_RELATION);
            if (sRelationRoots != null) {
                if (sRelationRoots.size() > 0) {
                    SRelation2ANNISMapper spanningRelationMapper = new SSpanningRelation2ANNISMapper(
                            getIdManager(), getDocument().getDocumentGraph(), token2Index, tw_node, tw_nodeAnno,
                            tw_rank, tw_edgeAnno, tw_component, this);
                    spanningRelationMapper.mapSRelations2ANNIS(sRelationRoots, SALT_TYPE.SSPANNING_RELATION,
                            TRAVERSION_TYPE.DOCUMENT_STRUCTURE_CR);
                    if (exec != null) {
                        exec.execute(spanningRelationMapper);
                    } else {
                        spanningRelationMapper.run();
                    }
                }
            }
            // END Step 4: map SSpanningrelations

            // START Step 5: map SMedialRelations
            sRelationRoots = this.getDocument().getDocumentGraph().getTokens();
            if (sRelationRoots != null) {
                if (sRelationRoots.size() > 0) {
                    SRelation2ANNISMapper audioRelationMapper = new Audio2ANNISMapper(getIdManager(),
                            getDocument().getDocumentGraph(), token2Index, tw_node, tw_nodeAnno, tw_rank,
                            tw_edgeAnno, tw_component, this);
                    audioRelationMapper.mapSRelations2ANNIS(sRelationRoots,
                            SALT_TYPE.STIME_OVERLAPPING_RELATION, TRAVERSION_TYPE.DOCUMENT_STRUCTURE_AUDIO);
                    if (exec != null) {
                        exec.execute(audioRelationMapper);
                    } else {
                        audioRelationMapper.run();
                    }
                }
            }
            // END Step 5: map SMedialRelations

            if (exec != null) {
                exec.shutdown();
                while (!exec.awaitTermination(60, TimeUnit.SECONDS)) {
                    // wait to finish
                }
            }

            // START Step 6: map all SToken which were not mapped, yet
            SRelation2ANNISMapper mapper = new SSpanningRelation2ANNISMapper(getIdManager(),
                    getDocument().getDocumentGraph(), token2Index, tw_node, tw_nodeAnno, tw_rank, tw_edgeAnno,
                    tw_component, this);
            mapper.beginTransaction();
            for (SNode node : getDocument().getDocumentGraph().getTokens()) {
                if (this.idManager.getVirtualisedSpanId(node.getId()) == null) {
                    mapper.mapSNode(node);
                }
            }
            mapper.commitTransaction();
            // END Step 6: map all SToken which were not mapped, yet

        } catch (PepperModuleException e) {
            throw new PepperModuleException(this,
                    "Some error occurs while traversing document structure graph.", e);
        } catch (InterruptedException e) {
            throw new PepperModuleException(this,
                    "Some error occurs while traversing document structure graph.", e);
        }
    } //start traversion of corpus structure

    mergeLocalStatsIntoGlobal();

    setProgress(1.0);
    return DOCUMENT_STATUS.COMPLETED;
}

From source file:com.github.cbismuth.fdupes.stream.DuplicatesFinder.java

public void extractDuplicates(final Collection<PathElement> input, final Set<PathElement> uniqueElements,
        final Multimap<PathElement, PathElement> duplicates) {
    Preconditions.checkNotNull(input, "null file metadata collection");

    LOGGER.info("Pass 1/3 - compare file by size ...");
    final Collection<PathElement> duplicatesBySize = duplicateFinderByKey.getDuplicates(input,
            PathElement::size, uniqueElements);
    getMetricRegistry().register(name("duplicates", "by-size", "count"),
            (Gauge<Integer>) duplicatesBySize::size);
    LOGGER.info("Pass 1/3 - compare file by size completed! - {} duplicate(s) found", duplicatesBySize.size());

    LOGGER.info("Pass 2/3 - compare file by MD5 ...");
    final Collection<PathElement> duplicatesByMd5 = duplicateFinderByKey.getDuplicates(duplicatesBySize,
            md5Computer::compute, uniqueElements);
    getMetricRegistry().register(name("duplicates", "by-md5", "count"), (Gauge<Integer>) duplicatesByMd5::size);
    LOGGER.info("Pass 2/3 - compare file by MD5 completed! - {} duplicate(s) found", duplicatesByMd5.size());

    LOGGER.info("Pass 3/3 - compare file byte-by-byte ...");
    final BufferedAnalyzer analyzer = new BufferedAnalyzer(pathComparator, systemPropertyGetter);
    analyzer.analyze(duplicatesByMd5, uniqueElements, duplicates);
    getMetricRegistry().register(name("duplicates", "by-bytes", "count"), (Gauge<Integer>) duplicates::size);
    LOGGER.info("Pass 3/3 - compare file byte-by-byte completed! - {} duplicate(s) found", duplicates.size());
}

From source file:org.waveprotocol.box.server.search.MemorySearchImpl.java

@Inject
public MemorySearchImpl(final WaveMap waveMap) {
    // Let the view expire if it not accessed for some time.
    explicitPerUserWaveViews = CacheBuilder.newBuilder()
            .expireAfterAccess(PER_USER_WAVES_VIEW_CACHE_MINUTES, TimeUnit.MINUTES)
            .build(new CacheLoader<ParticipantId, Multimap<WaveId, WaveletId>>() {

                @Override//from   ww  w  .j  a  v  a2 s  .  c  o m
                public Multimap<WaveId, WaveletId> load(ParticipantId user) throws Exception {
                    Multimap<WaveId, WaveletId> userView = HashMultimap.create();
                    // Create initial per user waves view by looping over all waves
                    // in the waves store.
                    ExceptionalIterator<WaveId, WaveServerException> waveIds = waveMap.getWaveIds();
                    while (waveIds.hasNext()) {
                        WaveId waveId = waveIds.next();
                        ImmutableSet<WaveletId> wavelets = waveMap.getWaveletIds(waveId);
                        for (WaveletId waveletId : wavelets) {
                            LocalWaveletContainer c = waveMap
                                    .getLocalWavelet(WaveletName.of(waveId, waveletId));
                            try {
                                if (!c.hasParticipant(user)) {
                                    continue;
                                }
                                // Add this wave to the user view.
                                userView.put(waveId, waveletId);
                            } catch (WaveletStateException e) {
                                LOG.warning("Failed to access wavelet " + c.getWaveletName(), e);
                            }
                        }
                    }
                    LOG.info("Initalized waves view for user: " + user.getAddress()
                            + ", number of waves in view: " + userView.size());
                    return userView;
                }
            });
}

From source file:org.waveprotocol.box.server.waveserver.MemoryPerUserWaveViewHandlerImpl.java

@Inject
public MemoryPerUserWaveViewHandlerImpl(final WaveMap waveMap) {
    // Let the view expire if it not accessed for some time.
    explicitPerUserWaveViews = CacheBuilder.newBuilder()
            .expireAfterAccess(PER_USER_WAVES_VIEW_CACHE_MINUTES, TimeUnit.MINUTES)
            .<ParticipantId, Multimap<WaveId, WaveletId>>build(
                    new CacheLoader<ParticipantId, Multimap<WaveId, WaveletId>>() {

                        @Override
                        public Multimap<WaveId, WaveletId> load(final ParticipantId user) {
                            Multimap<WaveId, WaveletId> userView = HashMultimap.create();

                            // Create initial per user waves view by looping over all waves
                            // in the waves store.
                            Map<WaveId, Wave> waves = waveMap.getWaves();
                            for (Map.Entry<WaveId, Wave> entry : waves.entrySet()) {
                                Wave wave = entry.getValue();
                                for (WaveletContainer c : wave) {
                                    WaveletId waveletId = c.getWaveletName().waveletId;
                                    try {
                                        if (!c.hasParticipant(user)) {
                                            continue;
                                        }
                                        // Add this wave to the user view.
                                        userView.put(entry.getKey(), waveletId);
                                    } catch (WaveletStateException e) {
                                        LOG.warning("Failed to access wavelet " + c.getWaveletName(), e);
                                    }//  w  ww  . j  ava 2  s . c  o m
                                }
                            }
                            LOG.info("Initalized waves view for user: " + user.getAddress()
                                    + ", number of waves in view: " + userView.size());
                            return userView;
                        }
                    });
}

From source file:de.hzi.helmholtz.Compare.PathwayComparisonUsingModules.java

public void pathwayComparison() {
    Multimap<Integer, Multimap<Double, String>> forward = pcompare(source, target);
    Multimap<Integer, Multimap<Double, String>> copyOfForward = ArrayListMultimap.create(forward); // make changes to this copy while combining genes
    Multimap<Integer, Multimap<Double, String>> reverse = pcompare(target, source);

    int nearestGene = GENE_MAX_DISTANCE;
    double overallScore = 0;

    String matchingGene;/*ww  w .j  av a2 s .c  o m*/

    for (int i = 0; i < forward.size(); i++) {
        int currentIndex = i + 1; // here i is the id of the current gene
        nearestGene = GENE_MAX_DISTANCE; // re-assign to max distance

        boolean currentTargetGeneAssigned = false;

        String whichGeneInTarget = "-999";

        while (currentTargetGeneAssigned == false) {
            // collect best scores in the forward direction into a string array (each array element looks like 0.5=1+2)
            String[] target_scores = getmax(copyOfForward.get(currentIndex), reverse).toString().split("=");

            if (copyOfForward.get(currentIndex).size() > 0) {

                if (target_scores.length >= 1) {

                    double currentTargetGeneScore = Double.parseDouble(target_scores[0].toString().trim());
                    String currentTargetGeneCombination = target_scores[1].trim();
                    currentTargetGeneCombination = currentTargetGeneCombination + ";"; // add a ; symbol in the end to every candidate gene combination in target
                    if (currentTargetGeneScore > 0) { // has a non-zero score
                        // collect individual gene combinations in a string array
                        String[] candidateTargetGenes = currentTargetGeneCombination.split(";");
                        for (String candidateTargetGene : candidateTargetGenes) { // are there multiple gene combinations in target having equal scores?
                            candidateTargetGene = candidateTargetGene + "+"; // add a + symbol in the end to every gene and gene combination in the target

                            String[] genesInTargetCombination = candidateTargetGene.split("\\+");

                            String firstGeneInTargetCombination = genesInTargetCombination[0];
                            if (Integer.parseInt(firstGeneInTargetCombination) == currentIndex) {
                                //  if both are pointing to each other then assign
                                overallScore = currentTargetGeneScore;
                                whichGeneInTarget = candidateTargetGene;
                                currentTargetGeneAssigned = true;
                            }
                        }
                        if (currentTargetGeneAssigned == false) {
                            // no target gene wants to point to the query gene
                        }
                    }
                }
            } else {
                break;
            }

            // add results to finalresults
        }
        if (whichGeneInTarget.equalsIgnoreCase("-999")) {
            whichGeneInTarget = "no";
            overallScore = 0;
        }
    }
}

From source file:io.prestosql.execution.scheduler.SourcePartitionedScheduler.java

@Override
public synchronized ScheduleResult schedule() {
    dropListenersFromWhenFinishedOrNewLifespansAdded();

    int overallSplitAssignmentCount = 0;
    ImmutableSet.Builder<RemoteTask> overallNewTasks = ImmutableSet.builder();
    List<ListenableFuture<?>> overallBlockedFutures = new ArrayList<>();
    boolean anyBlockedOnPlacements = false;
    boolean anyBlockedOnNextSplitBatch = false;
    boolean anyNotBlocked = false;

    for (Entry<Lifespan, ScheduleGroup> entry : scheduleGroups.entrySet()) {
        Lifespan lifespan = entry.getKey();
        ScheduleGroup scheduleGroup = entry.getValue();
        Set<Split> pendingSplits = scheduleGroup.pendingSplits;

        if (scheduleGroup.state == ScheduleGroupState.NO_MORE_SPLITS
                || scheduleGroup.state == ScheduleGroupState.DONE) {
            verify(scheduleGroup.nextSplitBatchFuture == null);
        } else if (pendingSplits.isEmpty()) {
            // try to get the next batch
            if (scheduleGroup.nextSplitBatchFuture == null) {
                scheduleGroup.nextSplitBatchFuture = splitSource.getNextBatch(scheduleGroup.partitionHandle,
                        lifespan, splitBatchSize - pendingSplits.size());

                long start = System.nanoTime();
                addSuccessCallback(scheduleGroup.nextSplitBatchFuture, () -> stage.recordGetSplitTime(start));
            }/*from   w  w w  .  j a  v a  2 s.c o m*/

            if (scheduleGroup.nextSplitBatchFuture.isDone()) {
                SplitBatch nextSplits = getFutureValue(scheduleGroup.nextSplitBatchFuture);
                scheduleGroup.nextSplitBatchFuture = null;
                pendingSplits.addAll(nextSplits.getSplits());
                if (nextSplits.isLastBatch()) {
                    if (scheduleGroup.state == ScheduleGroupState.INITIALIZED && pendingSplits.isEmpty()) {
                        // Add an empty split in case no splits have been produced for the source.
                        // For source operators, they never take input, but they may produce output.
                        // This is well handled by Presto execution engine.
                        // However, there are certain non-source operators that may produce output without any input,
                        // for example, 1) an AggregationOperator, 2) a HashAggregationOperator where one of the grouping sets is ().
                        // Scheduling an empty split kicks off necessary driver instantiation to make this work.
                        pendingSplits
                                .add(new Split(splitSource.getConnectorId(), splitSource.getTransactionHandle(),
                                        new EmptySplit(splitSource.getConnectorId()), lifespan));
                    }
                    scheduleGroup.state = ScheduleGroupState.NO_MORE_SPLITS;
                }
            } else {
                overallBlockedFutures.add(scheduleGroup.nextSplitBatchFuture);
                anyBlockedOnNextSplitBatch = true;
                continue;
            }
        }

        Multimap<Node, Split> splitAssignment = ImmutableMultimap.of();
        if (!pendingSplits.isEmpty()) {
            if (!scheduleGroup.placementFuture.isDone()) {
                anyBlockedOnPlacements = true;
                continue;
            }

            if (scheduleGroup.state == ScheduleGroupState.INITIALIZED) {
                scheduleGroup.state = ScheduleGroupState.SPLITS_ADDED;
            }
            if (state == State.INITIALIZED) {
                state = State.SPLITS_ADDED;
            }

            // calculate placements for splits
            SplitPlacementResult splitPlacementResult = splitPlacementPolicy.computeAssignments(pendingSplits);
            splitAssignment = splitPlacementResult.getAssignments();

            // remove splits with successful placements
            splitAssignment.values().forEach(pendingSplits::remove); // AbstractSet.removeAll performs terribly here.
            overallSplitAssignmentCount += splitAssignment.size();

            // if not completed placed, mark scheduleGroup as blocked on placement
            if (!pendingSplits.isEmpty()) {
                scheduleGroup.placementFuture = splitPlacementResult.getBlocked();
                overallBlockedFutures.add(scheduleGroup.placementFuture);
                anyBlockedOnPlacements = true;
            }
        }

        // if no new splits will be assigned, update state and attach completion event
        Multimap<Node, Lifespan> noMoreSplitsNotification = ImmutableMultimap.of();
        if (pendingSplits.isEmpty() && scheduleGroup.state == ScheduleGroupState.NO_MORE_SPLITS) {
            scheduleGroup.state = ScheduleGroupState.DONE;
            if (!lifespan.isTaskWide()) {
                Node node = ((BucketedSplitPlacementPolicy) splitPlacementPolicy)
                        .getNodeForBucket(lifespan.getId());
                noMoreSplitsNotification = ImmutableMultimap.of(node, lifespan);
            }
        }

        // assign the splits with successful placements
        overallNewTasks.addAll(assignSplits(splitAssignment, noMoreSplitsNotification));

        // Assert that "placement future is not done" implies "pendingSplits is not empty".
        // The other way around is not true. One obvious reason is (un)lucky timing, where the placement is unblocked between `computeAssignments` and this line.
        // However, there are other reasons that could lead to this.
        // Note that `computeAssignments` is quite broken:
        // 1. It always returns a completed future when there are no tasks, regardless of whether all nodes are blocked.
        // 2. The returned future will only be completed when a node with an assigned task becomes unblocked. Other nodes don't trigger future completion.
        // As a result, to avoid busy loops caused by 1, we check pendingSplits.isEmpty() instead of placementFuture.isDone() here.
        if (scheduleGroup.nextSplitBatchFuture == null && scheduleGroup.pendingSplits.isEmpty()
                && scheduleGroup.state != ScheduleGroupState.DONE) {
            anyNotBlocked = true;
        }
    }

    // * `splitSource.isFinished` invocation may fail after `splitSource.close` has been invoked.
    //   If state is NO_MORE_SPLITS/FINISHED, splitSource.isFinished has previously returned true, and splitSource is closed now.
    // * Even if `splitSource.isFinished()` return true, it is not necessarily safe to tear down the split source.
    //   * If anyBlockedOnNextSplitBatch is true, it means we have not checked out the recently completed nextSplitBatch futures,
    //     which may contain recently published splits. We must not ignore those.
    //   * If any scheduleGroup is still in DISCOVERING_SPLITS state, it means it hasn't realized that there will be no more splits.
    //     Next time it invokes getNextBatch, it will realize that. However, the invocation will fail we tear down splitSource now.
    if ((state == State.NO_MORE_SPLITS || state == State.FINISHED)
            || (noMoreScheduleGroups && scheduleGroups.isEmpty() && splitSource.isFinished())) {
        switch (state) {
        case INITIALIZED:
            // We have not scheduled a single split so far.
            // But this shouldn't be possible. See usage of EmptySplit in this method.
            throw new IllegalStateException("At least 1 split should have been scheduled for this plan node");
        case SPLITS_ADDED:
            state = State.NO_MORE_SPLITS;
            splitSource.close();
            // fall through
        case NO_MORE_SPLITS:
            state = State.FINISHED;
            whenFinishedOrNewLifespanAdded.set(null);
            // fall through
        case FINISHED:
            return new ScheduleResult(true, overallNewTasks.build(), overallSplitAssignmentCount);
        default:
            throw new IllegalStateException("Unknown state");
        }
    }

    if (anyNotBlocked) {
        return new ScheduleResult(false, overallNewTasks.build(), overallSplitAssignmentCount);
    }

    if (anyBlockedOnPlacements || groupedExecution) {
        // In a broadcast join, output buffers of the tasks in build source stage have to
        // hold onto all data produced before probe side task scheduling finishes,
        // even if the data is acknowledged by all known consumers. This is because
        // new consumers may be added until the probe side task scheduling finishes.
        //
        // As a result, the following line is necessary to prevent deadlock
        // due to neither build nor probe can make any progress.
        // The build side blocks due to a full output buffer.
        // In the meantime the probe side split cannot be consumed since
        // builder side hash table construction has not finished.
        overallNewTasks.addAll(finalizeTaskCreationIfNecessary());
    }

    ScheduleResult.BlockedReason blockedReason;
    if (anyBlockedOnNextSplitBatch) {
        blockedReason = anyBlockedOnPlacements ? MIXED_SPLIT_QUEUES_FULL_AND_WAITING_FOR_SOURCE
                : WAITING_FOR_SOURCE;
    } else {
        blockedReason = anyBlockedOnPlacements ? SPLIT_QUEUES_FULL : NO_ACTIVE_DRIVER_GROUP;
    }

    overallBlockedFutures.add(whenFinishedOrNewLifespanAdded);
    return new ScheduleResult(false, overallNewTasks.build(),
            nonCancellationPropagating(whenAnyComplete(overallBlockedFutures)), blockedReason,
            overallSplitAssignmentCount);
}

From source file:org.onosproject.p4runtime.ctl.P4RuntimeClientImpl.java

private Collection<PiActionGroup> doDumpGroups(PiActionProfileId piActionProfileId, PiPipeconf pipeconf) {
    log.debug("Dumping groups from action profile {} from {} (pipeconf {})...", piActionProfileId.id(),
            deviceId, pipeconf.id());//from w ww  . j  ava 2  s.c  o m

    final P4InfoBrowser browser = PipeconfHelper.getP4InfoBrowser(pipeconf);
    if (browser == null) {
        log.warn("Unable to get a P4Info browser for pipeconf {}, aborting dump action profile", pipeconf);
        return Collections.emptySet();
    }

    final int actionProfileId;
    try {
        actionProfileId = browser.actionProfiles().getByName(piActionProfileId.id()).getPreamble().getId();
    } catch (P4InfoBrowser.NotFoundException e) {
        log.warn("Unable to dump groups: {}", e.getMessage());
        return Collections.emptySet();
    }

    // Prepare read request to read all groups from the given action profile.
    final ReadRequest groupRequestMsg = ReadRequest.newBuilder().setDeviceId(p4DeviceId)
            .addEntities(Entity.newBuilder()
                    .setActionProfileGroup(
                            ActionProfileGroup.newBuilder().setActionProfileId(actionProfileId).build())
                    .build())
            .build();

    // Read groups.
    final Iterator<ReadResponse> groupResponses;
    try {
        groupResponses = blockingStub.read(groupRequestMsg);
    } catch (StatusRuntimeException e) {
        log.warn("Unable to dump action profile {} from {}: {}", piActionProfileId, deviceId, e.getMessage());
        return Collections.emptySet();
    }

    final List<ActionProfileGroup> groupMsgs = Tools.stream(() -> groupResponses)
            .map(ReadResponse::getEntitiesList).flatMap(List::stream)
            .filter(entity -> entity.getEntityCase() == ACTION_PROFILE_GROUP).map(Entity::getActionProfileGroup)
            .collect(Collectors.toList());

    log.debug("Retrieved {} groups from action profile {} on {}...", groupMsgs.size(), piActionProfileId.id(),
            deviceId);

    // Returned groups contain only a minimal description of their members.
    // We need to issue a new request to get the full description of each member.

    // Keep a map of all member IDs for each group ID, will need it later.
    final Multimap<Integer, Integer> groupIdToMemberIdsMap = HashMultimap.create();
    groupMsgs.forEach(g -> groupIdToMemberIdsMap.putAll(g.getGroupId(), g.getMembersList().stream()
            .map(ActionProfileGroup.Member::getMemberId).collect(Collectors.toList())));

    // Prepare one big read request to read all members in one shot.
    final Set<Entity> entityMsgs = groupMsgs.stream().flatMap(g -> g.getMembersList().stream())
            .map(ActionProfileGroup.Member::getMemberId)
            // Prevent issuing many read requests for the same member.
            .distinct()
            .map(id -> ActionProfileMember.newBuilder().setActionProfileId(actionProfileId).setMemberId(id)
                    .build())
            .map(m -> Entity.newBuilder().setActionProfileMember(m).build()).collect(Collectors.toSet());
    final ReadRequest memberRequestMsg = ReadRequest.newBuilder().setDeviceId(p4DeviceId)
            .addAllEntities(entityMsgs).build();

    // Read members.
    final Iterator<ReadResponse> memberResponses;
    try {
        memberResponses = blockingStub.read(memberRequestMsg);
    } catch (StatusRuntimeException e) {
        log.warn("Unable to read members of action profile {} from {}: {}", piActionProfileId, deviceId,
                e.getMessage());
        return Collections.emptyList();
    }

    final Multimap<Integer, ActionProfileMember> groupIdToMembersMap = HashMultimap.create();
    Tools.stream(() -> memberResponses).map(ReadResponse::getEntitiesList).flatMap(List::stream)
            .filter(e -> e.getEntityCase() == ACTION_PROFILE_MEMBER).map(Entity::getActionProfileMember)
            .forEach(member -> groupIdToMemberIdsMap.asMap()
                    // Get all group IDs that contain this member.
                    .entrySet().stream().filter(entry -> entry.getValue().contains(member.getMemberId()))
                    .map(Map.Entry::getKey).forEach(gid -> groupIdToMembersMap.put(gid, member)));

    log.debug("Retrieved {} group members from action profile {} on {}...", groupIdToMembersMap.size(),
            piActionProfileId.id(), deviceId);

    return groupMsgs.stream().map(groupMsg -> {
        try {
            return ActionProfileGroupEncoder.decode(groupMsg, groupIdToMembersMap.get(groupMsg.getGroupId()),
                    pipeconf);
        } catch (P4InfoBrowser.NotFoundException | EncodeException e) {
            log.warn("Unable to decode group: {}\n {}", e.getMessage(), groupMsg);
            return null;
        }
    }).filter(Objects::nonNull).collect(Collectors.toList());
}

From source file:io.prestosql.execution.SqlStageExecution.java

public synchronized Set<RemoteTask> scheduleSplits(Node node, Multimap<PlanNodeId, Split> splits,
        Multimap<PlanNodeId, Lifespan> noMoreSplitsNotification) {
    requireNonNull(node, "node is null");
    requireNonNull(splits, "splits is null");

    if (stateMachine.getState().isDone()) {
        return ImmutableSet.of();
    }//ww w  . jav a  2s .  c o m
    splitsScheduled.set(true);

    checkArgument(stateMachine.getFragment().getPartitionedSources().containsAll(splits.keySet()),
            "Invalid splits");

    ImmutableSet.Builder<RemoteTask> newTasks = ImmutableSet.builder();
    Collection<RemoteTask> tasks = this.tasks.get(node);
    RemoteTask task;
    if (tasks == null) {
        // The output buffer depends on the task id starting from 0 and being sequential, since each
        // task is assigned a private buffer based on task id.
        TaskId taskId = new TaskId(stateMachine.getStageId(), nextTaskId.getAndIncrement());
        task = scheduleTask(node, taskId, splits, OptionalInt.empty());
        newTasks.add(task);
    } else {
        task = tasks.iterator().next();
        task.addSplits(splits);
    }
    if (noMoreSplitsNotification.size() > 1) {
        // The assumption that `noMoreSplitsNotification.size() <= 1` currently holds.
        // If this assumption no longer holds, we should consider calling task.noMoreSplits with multiple entries in one shot.
        // These kind of methods can be expensive since they are grabbing locks and/or sending HTTP requests on change.
        throw new UnsupportedOperationException(
                "This assumption no longer holds: noMoreSplitsNotification.size() < 1");
    }
    for (Entry<PlanNodeId, Lifespan> entry : noMoreSplitsNotification.entries()) {
        task.noMoreSplits(entry.getKey(), entry.getValue());
    }
    return newTasks.build();
}