Example usage for com.google.common.collect Multimap isEmpty

List of usage examples for com.google.common.collect Multimap isEmpty

Introduction

In this page you can find the example usage for com.google.common.collect Multimap isEmpty.

Prototype

boolean isEmpty();

Source Link

Document

Returns true if this multimap contains no key-value pairs.

Usage

From source file:no.ssb.vtl.test.junit.GrammarRule.java

/**
 * Parse an expression starting from the given <b>ANTLR rule</b>
 * <p>/*  w ww  .jav a 2s.  c om*/
 * In order to get the Rule, use the {@link #withRule(String)} method.
 *
 * @param expression the expression to parse.
 * @param rule       the rule to start from.
 *                   @param diagnostic {@link DiagnosticErrorListener} will be used if true.
 * @return the resulting parse tree.
 * @throws Exception if the expression failed to parse.
 */
public ParserRuleContext parse(String expression, Rule rule, boolean diagnostic) throws Exception {
    Multimap<Integer, String> messages = LinkedListMultimap.create();

    LexerInterpreter lexerInterpreter = grammar.createLexerInterpreter(new ANTLRInputStream(expression));
    GrammarParserInterpreter parserInterpreter = grammar
            .createGrammarParserInterpreter(new CommonTokenStream(lexerInterpreter));

    BaseErrorListener errorListener;
    if (diagnostic) {
        errorListener = new DiagnosticErrorListener();
    } else {
        errorListener = new ConsoleErrorListener();
    }

    BaseErrorListener ruleErrorReporter = new BaseErrorListener() {
        @Override
        public void syntaxError(Recognizer<?, ?> recognizer, Object offendingSymbol, int line,
                int charPositionInLine, String msg, org.antlr.v4.runtime.RecognitionException e) {
            int startLine = line, stopLine = line;
            int startColumn = charPositionInLine, stopColumn = charPositionInLine;
            if (offendingSymbol instanceof Token) {
                Token symbol = (Token) offendingSymbol;
                int start = symbol.getStartIndex();
                int stop = symbol.getStopIndex();
                if (start >= 0 && stop >= 0) {
                    stopColumn = startColumn + (stop - start) + 1;
                }
            }

            messages.put(stopLine,
                    String.format("at [%4s:%6s]:\t%s (%s)\n", String.format("%d,%d", startLine, stopLine),
                            String.format("%d,%d", startColumn, stopColumn), msg,
                            Optional.ofNullable(e).map(ex -> ex.getClass().getSimpleName()).orElse("null")));
        }
    };

    parserInterpreter.setErrorHandler(new GrammarParserInterpreter.BailButConsumeErrorStrategy());
    lexerInterpreter.removeErrorListeners();
    parserInterpreter.removeErrorListeners();

    lexerInterpreter.addErrorListener(errorListener);
    parserInterpreter.addErrorListener(errorListener);
    lexerInterpreter.addErrorListener(ruleErrorReporter);
    parserInterpreter.addErrorListener(ruleErrorReporter);

    ParserRuleContext parse = parserInterpreter.parse(rule.index);

    if (!messages.isEmpty()) {

        StringBuilder expressionWithErrors = new StringBuilder();
        LineNumberReader expressionReader = new LineNumberReader(new StringReader(expression));
        String line;
        while ((line = expressionReader.readLine()) != null) {
            int lineNumber = expressionReader.getLineNumber();
            expressionWithErrors.append(String.format("\t%d:%s%n", lineNumber, line));
            if (messages.containsKey(lineNumber)) {
                expressionWithErrors.append(String.format("%n"));
                for (String message : messages.get(lineNumber)) {
                    expressionWithErrors.append(message);
                }
            }
        }
        throw new Exception(
                String.format("errors parsing expression:%n%n%s%n", expressionWithErrors.toString()));
    }

    return parse;
}

From source file:de.dentrassi.pm.aspect.upgrade.UpgradeTaskProvider.java

private List<Task> updateState() {
    try (Handle handle = Profile.start(this, "updateState")) {
        final Map<String, ChannelAspectInformation> infos = ChannelAspectProcessor
                .scanAspectInformations(this.context);

        final List<Task> result = new LinkedList<>();

        final Multimap<String, Channel> missing = HashMultimap.create();

        final Multimap<Channel, String> channels = HashMultimap.create();

        for (final Channel channel : this.service.listChannels()) {
            logger.debug("Checking channel: {}", channel.getId());

            final Map<String, String> states = channel.getAspectStates();
            for (final Map.Entry<String, String> entry : states.entrySet()) {
                logger.debug("\t{}", entry.getKey());

                final ChannelAspectInformation info = infos.get(entry.getKey());
                if (info == null) {
                    missing.put(entry.getKey(), channel);
                } else {
                    logger.debug("\t{} - {} -> {}", info.getFactoryId(), entry.getValue(), info.getVersion());

                    if (!info.getVersion().equals(Version.valueOf(entry.getValue()))) {
                        result.add(makeUpgradeTask(channel, info, entry.getValue()));
                        channels.put(channel, entry.getKey());
                    }/*  ww w.j a va 2s .  c  o  m*/
                }
            }
        }

        for (final Map.Entry<Channel, Collection<String>> entry : channels.asMap().entrySet()) {
            final Channel channel = entry.getKey();
            final LinkTarget target = new LinkTarget(
                    String.format("/channel/%s/refreshAllAspects", channel.getId()));
            final String description = "Channel aspects active in this channel have been updated. You can refresh the whole channel.";
            result.add(new BasicTask("Refresh channel: " + makeChannelTitle(channel), 100, description, target,
                    RequestMethod.GET, PERFORM_ALL_BUTTON));
        }

        for (final Map.Entry<String, Collection<Channel>> entry : missing.asMap().entrySet()) {
            final String missingChannels = entry.getValue().stream().map(Channel::getId)
                    .collect(Collectors.joining(", "));
            result.add(new BasicTask(String.format("Fix missing channel aspect: %s", entry.getKey()), 1,
                    String.format(
                            "The channel aspect '%s' is being used but not installed in the system. Channels: %s",
                            entry.getKey(), missingChannels),
                    null));
        }

        if (!channels.isEmpty()) {
            result.add(new BasicTask("Refresh all channels", 1, "Refresh all channels in one big task",
                    new LinkTarget(String.format("/job/%s/create", UpgradeAllChannelsJob.ID)),
                    RequestMethod.POST, PERFORM_ALL_SUPER_BUTTON));
        }

        return result;
    }
}

From source file:com.b2international.snowowl.snomed.validation.detail.SnomedValidationIssueDetailExtension.java

private void extendIssueDetails(BranchContext context, Collection<ValidationIssue> issues) {
    final RevisionSearcher searcher = context.service(RevisionSearcher.class);

    final Multimap<String, ValidationIssue> issuesByComponentId = Multimaps.index(issues,
            issue -> issue.getAffectedComponent().getComponentId());

    final Multimap<ComponentCategory, String> issueComponentIdsByComponentCategory = HashMultimap.create();
    issues.stream().forEach(issue -> {
        final ComponentCategory componentCategory = getComponentCategory(
                issue.getAffectedComponent().getTerminologyComponentId());
        issueComponentIdsByComponentCategory.put(componentCategory,
                issue.getAffectedComponent().getComponentId());
    });//from w w  w . j a va2 s  .  c om

    final Multimap<String, String> issueIdsByConceptIds = HashMultimap.create();
    final Set<String> alreadyFetchedConceptIds = Sets.newHashSet();
    for (ComponentCategory category : issueComponentIdsByComponentCategory.keySet()) {
        final Query<String[]> query = buildQuery(category, issueComponentIdsByComponentCategory.get(category));

        for (Hits<String[]> hits : searcher.scroll(query)) {
            for (String[] hit : hits) {
                String id = hit[0];
                String status = hit[1];
                String moduleId = hit[2];
                issuesByComponentId.get(id).forEach(validationIssue -> {
                    validationIssue.setDetails(COMPONENT_STATUS, status);
                    validationIssue.setDetails(COMPONENT_MODULE_ID, moduleId);
                    if (CONCEPT == category) {
                        validationIssue.setDetails(CONCEPT_STATUS, status);
                        validationIssue.setDetails(SnomedDocument.Fields.EFFECTIVE_TIME,
                                Long.parseLong(hit[3]));
                        alreadyFetchedConceptIds.add(id);
                    } else if (DESCRIPTION == category || RELATIONSHIP == category) {
                        validationIssue.setDetails(SnomedDocument.Fields.EFFECTIVE_TIME,
                                Long.parseLong(hit[3]));
                        final String containerConceptId = hit[4];
                        if (!Strings.isNullOrEmpty(containerConceptId)
                                && (!issueIdsByConceptIds.containsKey(containerConceptId)
                                        || !alreadyFetchedConceptIds.contains(containerConceptId))) {
                            issueIdsByConceptIds.put(containerConceptId, id);
                        }
                        // in case of description just add the already fetched term as label to the issue, concepts and relationship will get their 
                        if (DESCRIPTION == category) {
                            validationIssue.setAffectedComponentLabels(Collections.singletonList(hit[5]));
                        }
                    }
                });
            }
        }
    }

    if (!issueIdsByConceptIds.isEmpty()) {
        final Query<String[]> conceptStatusQuery = Query.select(String[].class)
                .from(SnomedConceptDocument.class)
                .fields(SnomedConceptDocument.Fields.ID, SnomedConceptDocument.Fields.ACTIVE)
                .where(SnomedConceptDocument.Expressions.ids(issueIdsByConceptIds.keySet())).limit(SCROLL_SIZE)
                .build();

        for (Hits<String[]> hits : searcher.scroll(conceptStatusQuery)) {
            for (String[] hit : hits) {
                Collection<String> issueIds = issueIdsByConceptIds.get(hit[0]);
                issueIds.stream().forEach(id -> {
                    issuesByComponentId.get(id)
                            .forEach(validationIssue -> validationIssue.setDetails(CONCEPT_STATUS, hit[1]));
                });
            }
        }
    }
}

From source file:org.eclipse.packagedrone.repo.aspect.upgrade.UpgradeTaskProvider.java

private List<Task> updateState() {
    try (Handle handle = Profile.start(this, "updateState")) {
        final Map<String, ChannelAspectInformation> infos = ChannelAspectProcessor
                .scanAspectInformations(this.context);

        final List<Task> result = new LinkedList<>();

        final Multimap<String, ChannelInformation> missing = HashMultimap.create();

        final Multimap<ChannelInformation, String> channels = HashMultimap.create();

        for (final ChannelInformation channel : this.service.list()) {
            logger.debug("Checking channel: {}", channel.getId());

            final Map<String, String> states = channel.getAspectStates();
            for (final Map.Entry<String, String> entry : states.entrySet()) {
                logger.debug("\t{}", entry.getKey());

                final ChannelAspectInformation info = infos.get(entry.getKey());
                if (info == null) {
                    missing.put(entry.getKey(), channel);
                } else {
                    logger.debug("\t{} - {} -> {}", info.getFactoryId(), entry.getValue(), info.getVersion());

                    if (!info.getVersion().equals(Version.valueOf(entry.getValue()))) {
                        result.add(makeUpgradeTask(channel, info, entry.getValue()));
                        channels.put(channel, entry.getKey());
                    }/*from  w  w  w .j a  v  a2s  . c  o m*/
                }
            }
        }

        for (final Map.Entry<ChannelInformation, Collection<String>> entry : channels.asMap().entrySet()) {
            final ChannelInformation channel = entry.getKey();
            final LinkTarget target = new LinkTarget(String.format("/channel/%s/refreshAllAspects",
                    UrlEscapers.urlPathSegmentEscaper().escape(channel.getId())));
            final String description = "Channel aspects active in this channel have been updated. You can refresh the whole channel.";
            result.add(new BasicTask("Refresh channel: " + channel.makeTitle(), 100, description, target,
                    RequestMethod.GET, PERFORM_ALL_BUTTON));
        }

        for (final Map.Entry<String, Collection<ChannelInformation>> entry : missing.asMap().entrySet()) {
            final String missingChannels = entry.getValue().stream().map(ChannelInformation::getId)
                    .collect(Collectors.joining(", "));
            result.add(new BasicTask(String.format("Fix missing channel aspect: %s", entry.getKey()), 1,
                    String.format(
                            "The channel aspect '%s' is being used but not installed in the system. Channels: %s",
                            entry.getKey(), missingChannels),
                    null));
        }

        if (!channels.isEmpty()) {
            result.add(new BasicTask("Refresh all channels", 1, "Refresh all channels in one big task",
                    new LinkTarget(String.format("/job/%s/create", UpgradeAllChannelsJob.ID)),
                    RequestMethod.POST, PERFORM_ALL_SUPER_BUTTON));
        }

        return result;
    }
}

From source file:org.onosproject.pcep.server.impl.BasicPceccHandler.java

/**
 * Deallocates unused labels to device pools.
 *
 * @param tunnel tunnel between ingress to egress
 *//*from  w ww. j  ava2s.  co m*/
public void releaseLabel(Tunnel tunnel) {

    checkNotNull(labelRsrcService, LABEL_RESOURCE_SERVICE_NULL);
    checkNotNull(pceStore, PCE_STORE_NULL);

    Multimap<DeviceId, LabelResource> release = ArrayListMultimap.create();
    List<LspLocalLabelInfo> lspLocalLabelInfoList = pceStore.getTunnelInfo(tunnel.tunnelId());
    if ((lspLocalLabelInfoList != null) && (!lspLocalLabelInfoList.isEmpty())) {
        for (Iterator<LspLocalLabelInfo> iterator = lspLocalLabelInfoList.iterator(); iterator.hasNext();) {
            LspLocalLabelInfo lspLocalLabelInfo = iterator.next();
            DeviceId deviceId = lspLocalLabelInfo.deviceId();
            LabelResourceId inLabelId = lspLocalLabelInfo.inLabelId();
            LabelResourceId outLabelId = lspLocalLabelInfo.outLabelId();
            PortNumber inPort = lspLocalLabelInfo.inPort();
            PortNumber outPort = lspLocalLabelInfo.outPort();

            try {
                // Push into device
                if ((outLabelId != null) && (outPort != null)) {
                    pushLocalLabels(deviceId, outLabelId, outPort, tunnel, false,
                            Long.valueOf(LabelType.OUT_LABEL.value), PcepLabelOp.REMOVE);
                }

                if ((inLabelId != null) && (inPort != null)) {
                    pushLocalLabels(deviceId, inLabelId, inPort, tunnel, false,
                            Long.valueOf(LabelType.IN_LABEL.value), PcepLabelOp.REMOVE);
                }
            } catch (PcepParseException e) {
                log.error("Failed to push local label for device {}for tunnel {}.", deviceId.toString(),
                        tunnel.tunnelName().toString());
            }

            // List is stored from egress to ingress. So, using IN label id to release.
            // Only one local label is assigned to device (destination node)
            // and that is used as OUT label for source node.
            // No need to release label for last node in the list from pool because label was not allocated to
            // ingress node (source node).
            if ((iterator.hasNext()) && (inLabelId != null)) {
                LabelResource labelRsc = new DefaultLabelResource(deviceId, inLabelId);
                release.put(deviceId, labelRsc);
            }
        }
    }

    // Release from label pool
    if (!release.isEmpty()) {
        labelRsrcService.releaseToDevicePool(release);
    }

    pceStore.removeTunnelInfo(tunnel.tunnelId());
}

From source file:org.onosproject.pcep.controller.impl.BasicPceccHandler.java

/**
 * Deallocates unused labels to device pools.
 *
 * @param tunnel tunnel between ingress to egress
 *//*from   ww w.ja v a2  s  .c o  m*/
public void releaseLabel(Tunnel tunnel) {

    checkNotNull(labelRsrcService, LABEL_RESOURCE_SERVICE_NULL);
    checkNotNull(pceStore, PCE_STORE_NULL);

    Multimap<DeviceId, LabelResource> release = ArrayListMultimap.create();
    List<LspLocalLabelInfo> lspLocalLabelInfoList = pceStore.getTunnelInfo(tunnel.tunnelId());
    if ((lspLocalLabelInfoList != null) && (lspLocalLabelInfoList.size() > 0)) {
        for (Iterator<LspLocalLabelInfo> iterator = lspLocalLabelInfoList.iterator(); iterator.hasNext();) {
            LspLocalLabelInfo lspLocalLabelInfo = iterator.next();
            DeviceId deviceId = lspLocalLabelInfo.deviceId();
            LabelResourceId inLabelId = lspLocalLabelInfo.inLabelId();
            LabelResourceId outLabelId = lspLocalLabelInfo.outLabelId();
            PortNumber inPort = lspLocalLabelInfo.inPort();
            PortNumber outPort = lspLocalLabelInfo.outPort();

            try {
                // Push into device
                if ((outLabelId != null) && (outPort != null)) {
                    pushLocalLabels(deviceId, outLabelId, outPort, tunnel, false,
                            Long.valueOf(LabelType.OUT_LABEL.value), PcepLabelOp.REMOVE);
                }

                if ((inLabelId != null) && (inPort != null)) {
                    pushLocalLabels(deviceId, inLabelId, inPort, tunnel, false,
                            Long.valueOf(LabelType.IN_LABEL.value), PcepLabelOp.REMOVE);
                }
            } catch (PcepParseException e) {
                log.error("Failed to push local label for device {}for tunnel {}.", deviceId.toString(),
                        tunnel.tunnelName().toString());
            }

            // List is stored from egress to ingress. So, using IN label id to release.
            // Only one local label is assigned to device (destination node)
            // and that is used as OUT label for source node.
            // No need to release label for last node in the list from pool because label was not allocated to
            // ingress node (source node).
            if ((iterator.hasNext()) && (inLabelId != null)) {
                LabelResource labelRsc = new DefaultLabelResource(deviceId, inLabelId);
                release.put(deviceId, labelRsc);
            }
        }
    }

    // Release from label pool
    if (!release.isEmpty()) {
        labelRsrcService.releaseToDevicePool(release);
    }

    pceStore.removeTunnelInfo(tunnel.tunnelId());
}

From source file:com.github.jsdossier.DocWriter.java

private void getPrototypeData(JsType.Builder jsTypeBuilder, IndexReference indexReference) {
    NominalType nominalType = indexReference.getNominalType();
    Iterable<JSType> assignableTypes;
    if (nominalType.getJsType().isConstructor()) {
        assignableTypes = Iterables.concat(
                Lists.reverse(typeRegistry.getTypeHierarchy(nominalType.getJsType())),
                typeRegistry.getImplementedTypes(nominalType));

    } else if (nominalType.getJsType().isInterface()) {
        assignableTypes = Iterables.concat(ImmutableSet.of(nominalType.getJsType()),
                typeRegistry.getImplementedTypes(nominalType));
    } else {/*  w  ww. java2s  .  com*/
        return;
    }

    Multimap<String, InstanceProperty> properties = MultimapBuilder.treeKeys().linkedHashSetValues().build();

    for (JSType assignableType : assignableTypes) {
        if (assignableType.isConstructor() || assignableType.isInterface()) {
            assignableType = ((FunctionType) assignableType).getInstanceType();
        }

        ObjectType object = assignableType.toObjectType();
        FunctionType ctor = object.getConstructor();
        Set<String> ownProps = new HashSet<>();

        for (String pname : object.getOwnPropertyNames()) {
            if (!"constructor".equals(pname)) {
                ownProps.add(pname);
                Property property = object.getOwnSlot(pname);
                properties.put(pname, new InstanceProperty(object, property.getName(),
                        getType(object, property), property.getNode(), property.getJSDocInfo()));
            }
        }

        if (ctor == null) {
            continue;
        }

        ObjectType prototype = ObjectType.cast(ctor.getPropertyType("prototype"));
        verify(prototype != null);

        for (String pname : prototype.getOwnPropertyNames()) {
            if (!"constructor".equals(pname) && !ownProps.contains(pname)) {
                Property property = prototype.getOwnSlot(pname);
                properties.put(pname, new InstanceProperty(object, property.getName(),
                        getType(object, property), property.getNode(), property.getJSDocInfo()));
            }
        }
    }

    if (properties.isEmpty()) {
        return;
    }

    JSType docType = nominalType.getJsType();
    if (docType.isConstructor() || docType.isInterface()) {
        docType = ((FunctionType) docType).getInstanceType();
    }

    for (String key : properties.keySet()) {
        LinkedList<InstanceProperty> definitions = new LinkedList<>(properties.get(key));
        InstanceProperty property = definitions.removeFirst();

        Comment definedBy = null;
        if (!docType.equals(property.getDefinedOn())) {
            JSType definedByType = stripTemplateTypeInformation(property.getDefinedOn());
            definedBy = linker.formatTypeExpression(definedByType);
        }
        Comment overrides = findOverriddenType(definitions);
        Iterable<Comment> specifications = findSpecifications(definitions);

        JsDoc jsdoc = JsDoc.from(property.getJSDocInfo());
        if (jsdoc != null && jsdoc.getVisibility() == JSDocInfo.Visibility.PRIVATE) {
            continue;
        }

        // TODO: include inherited properties in UI without generate lots of redundant info.
        if (definedBy != null) {
            continue;
        }

        JSType propType = property.getType();
        BaseProperty base;
        if (propType.isFunctionType()) {
            com.github.jsdossier.proto.Function data = getFunctionData(property.getName(), propType,
                    property.getNode(), jsdoc, definedBy, overrides, specifications);
            jsTypeBuilder.addMethod(data);
            base = data.getBase();
        } else {
            com.github.jsdossier.proto.Property data = getPropertyData(property.getName(), propType,
                    property.getNode(), jsdoc, definedBy, overrides, specifications);
            jsTypeBuilder.addField(data);
            base = data.getBase();
        }

        // Do not include the property in the search index if the parent type is an alias,
        // the property is inherited from another type, or the property overrides a parent
        // property but does not provide a comment of its own.
        if (!jsTypeBuilder.hasAliasedType() && !base.hasDefinedBy() && (!base.hasOverrides()
                || (base.hasDescription() && base.getDescription().getTokenCount() > 0))) {
            indexReference.addInstanceProperty(base.getName());
        }
    }
}

From source file:org.onosproject.pce.pceservice.BasicPceccHandler.java

/**
 * Deallocates unused labels to device pools.
 *
 * @param tunnel tunnel between ingress to egress
 */// w  ww .j a va2s .  co m
public void releaseLabel(Tunnel tunnel) {
    boolean isLastLabelToPush = false;

    checkNotNull(labelRsrcService, LABEL_RESOURCE_SERVICE_NULL);
    checkNotNull(pceStore, PCE_STORE_NULL);

    Multimap<DeviceId, LabelResource> release = ArrayListMultimap.create();
    PceccTunnelInfo pceccTunnelInfo = pceStore.getTunnelInfo(tunnel.tunnelId());
    if (pceccTunnelInfo != null) {
        List<LspLocalLabelInfo> lspLocalLabelInfoList = pceccTunnelInfo.lspLocalLabelInfoList();
        if ((lspLocalLabelInfoList != null) && (lspLocalLabelInfoList.size() > 0)) {
            for (Iterator<LspLocalLabelInfo> iterator = lspLocalLabelInfoList.iterator(); iterator.hasNext();) {
                LspLocalLabelInfo lspLocalLabelInfo = iterator.next();
                DeviceId deviceId = lspLocalLabelInfo.deviceId();
                LabelResourceId inLabelId = lspLocalLabelInfo.inLabelId();
                LabelResourceId outLabelId = lspLocalLabelInfo.outLabelId();
                PortNumber inPort = lspLocalLabelInfo.inPort();
                PortNumber outPort = lspLocalLabelInfo.outPort();

                // Check whether this is last link label to push
                if (!iterator.hasNext()) {
                    isLastLabelToPush = true;
                }

                // Push into device
                if ((inLabelId != null) && (inPort != null)) {
                    installLocalLabelRule(deviceId, inLabelId, inPort, tunnel.tunnelId(), isLastLabelToPush,
                            Long.valueOf(LabelType.IN_LABEL.value), Objective.Operation.REMOVE);
                }

                if ((outLabelId != null) && (outPort != null)) {
                    installLocalLabelRule(deviceId, outLabelId, outPort, tunnel.tunnelId(), isLastLabelToPush,
                            Long.valueOf(LabelType.OUT_LABEL.value), Objective.Operation.REMOVE);
                }

                // List is stored from egress to ingress. So, using IN label id to release.
                // Only one local label is assigned to device (destination node)
                // and that is used as OUT label for source node.
                // No need to release label for last node in the list from pool because label was not allocated to
                // ingress node (source node).
                if ((iterator.hasNext()) && (inLabelId != null)) {
                    LabelResource labelRsc = new DefaultLabelResource(deviceId, inLabelId);
                    release.put(deviceId, labelRsc);
                }
            }
        }

        // Release from label pool
        if (!release.isEmpty()) {
            labelRsrcService.releaseToDevicePool(release);
        }

        // Remove tunnel info only if tunnel consumer id is not saved.
        // If tunnel consumer id is saved, this tunnel info will be removed during releasing bandwidth.
        if (pceccTunnelInfo.tunnelConsumerId() == null) {
            pceStore.removeTunnelInfo(tunnel.tunnelId());
        }
    } else {
        log.error("Unable to find PCECC tunnel info in store for a tunnel {}.", tunnel.toString());
    }
}

From source file:org.apache.cassandra.service.PendingRangeCalculatorService.java

/**
 * Calculate pending ranges according to bootsrapping and leaving nodes. Reasoning is:
 *
 * (1) When in doubt, it is better to write too much to a node than too little. That is, if
 * there are multiple nodes moving, calculate the biggest ranges a node could have. Cleaning
 * up unneeded data afterwards is better than missing writes during movement.
 * (2) When a node leaves, ranges for other nodes can only grow (a node might get additional
 * ranges, but it will not lose any of its current ranges as a result of a leave). Therefore
 * we will first remove _all_ leaving tokens for the sake of calculation and then check what
 * ranges would go where if all nodes are to leave. This way we get the biggest possible
 * ranges with regard current leave operations, covering all subsets of possible final range
 * values./*w  w  w  .  ja v  a  2s  . co m*/
 * (3) When a node bootstraps, ranges of other nodes can only get smaller. Without doing
 * complex calculations to see if multiple bootstraps overlap, we simply base calculations
 * on the same token ring used before (reflecting situation after all leave operations have
 * completed). Bootstrapping nodes will be added and removed one by one to that metadata and
 * checked what their ranges would be. This will give us the biggest possible ranges the
 * node could have. It might be that other bootstraps make our actual final ranges smaller,
 * but it does not matter as we can clean up the data afterwards.
 *
 * NOTE: This is heavy and ineffective operation. This will be done only once when a node
 * changes state in the cluster, so it should be manageable.
 */
// public & static for testing purposes
public static void calculatePendingRanges(AbstractReplicationStrategy strategy, String keyspaceName) {
    TokenMetadata tm = StorageService.instance.getTokenMetadata();
    Multimap<Range<Token>, InetAddress> pendingRanges = HashMultimap.create();
    BiMultiValMap<Token, InetAddress> bootstrapTokens = tm.getBootstrapTokens();
    Set<InetAddress> leavingEndpoints = tm.getLeavingEndpoints();

    if (bootstrapTokens.isEmpty() && leavingEndpoints.isEmpty() && tm.getMovingEndpoints().isEmpty()) {
        if (logger.isDebugEnabled())
            logger.debug(
                    "No bootstrapping, leaving or moving nodes, and no relocating tokens -> empty pending ranges for {}",
                    keyspaceName);
        tm.setPendingRanges(keyspaceName, pendingRanges);
        return;
    }

    Multimap<InetAddress, Range<Token>> addressRanges = strategy.getAddressRanges();

    // Copy of metadata reflecting the situation after all leave operations are finished.
    TokenMetadata allLeftMetadata = tm.cloneAfterAllLeft();

    // get all ranges that will be affected by leaving nodes
    Set<Range<Token>> affectedRanges = new HashSet<Range<Token>>();
    for (InetAddress endpoint : leavingEndpoints)
        affectedRanges.addAll(addressRanges.get(endpoint));

    // for each of those ranges, find what new nodes will be responsible for the range when
    // all leaving nodes are gone.
    TokenMetadata metadata = tm.cloneOnlyTokenMap(); // don't do this in the loop! #7758
    for (Range<Token> range : affectedRanges) {
        Set<InetAddress> currentEndpoints = ImmutableSet
                .copyOf(strategy.calculateNaturalEndpoints(range.right, metadata));
        Set<InetAddress> newEndpoints = ImmutableSet
                .copyOf(strategy.calculateNaturalEndpoints(range.right, allLeftMetadata));
        pendingRanges.putAll(range, Sets.difference(newEndpoints, currentEndpoints));
    }

    // At this stage pendingRanges has been updated according to leave operations. We can
    // now continue the calculation by checking bootstrapping nodes.

    // For each of the bootstrapping nodes, simply add and remove them one by one to
    // allLeftMetadata and check in between what their ranges would be.
    Multimap<InetAddress, Token> bootstrapAddresses = bootstrapTokens.inverse();
    for (InetAddress endpoint : bootstrapAddresses.keySet()) {
        Collection<Token> tokens = bootstrapAddresses.get(endpoint);

        allLeftMetadata.updateNormalTokens(tokens, endpoint);
        for (Range<Token> range : strategy.getAddressRanges(allLeftMetadata).get(endpoint))
            pendingRanges.put(range, endpoint);
        allLeftMetadata.removeEndpoint(endpoint);
    }

    // At this stage pendingRanges has been updated according to leaving and bootstrapping nodes.
    // We can now finish the calculation by checking moving and relocating nodes.

    // For each of the moving nodes, we do the same thing we did for bootstrapping:
    // simply add and remove them one by one to allLeftMetadata and check in between what their ranges would be.
    for (Pair<Token, InetAddress> moving : tm.getMovingEndpoints()) {
        InetAddress endpoint = moving.right; // address of the moving node

        //  moving.left is a new token of the endpoint
        allLeftMetadata.updateNormalToken(moving.left, endpoint);

        for (Range<Token> range : strategy.getAddressRanges(allLeftMetadata).get(endpoint)) {
            pendingRanges.put(range, endpoint);
        }

        allLeftMetadata.removeEndpoint(endpoint);
    }

    tm.setPendingRanges(keyspaceName, pendingRanges);

    if (logger.isDebugEnabled())
        logger.debug("Pending ranges:\n" + (pendingRanges.isEmpty() ? "<empty>" : tm.printPendingRanges()));
}

From source file:com.bigdata.dastor.service.StorageService.java

public static void calculatePendingRanges(AbstractReplicationStrategy strategy, String table) {
    TokenMetadata tm = StorageService.instance.getTokenMetadata();
    Multimap<Range, InetAddress> pendingRanges = HashMultimap.create();
    Map<Token, InetAddress> bootstrapTokens = tm.getBootstrapTokens();
    Set<InetAddress> leavingEndPoints = tm.getLeavingEndPoints();

    if (bootstrapTokens.isEmpty() && leavingEndPoints.isEmpty()) {
        if (logger_.isDebugEnabled())
            logger_.debug("No bootstrapping or leaving nodes -> empty pending ranges for " + table);
        tm.setPendingRanges(table, pendingRanges);
        return;/*from www  .j a  v a  2s  .  c  o  m*/
    }

    Multimap<InetAddress, Range> addressRanges = strategy.getAddressRanges(table);

    // Copy of metadata reflecting the situation after all leave operations are finished.
    TokenMetadata allLeftMetadata = tm.cloneAfterAllLeft();

    // get all ranges that will be affected by leaving nodes
    Set<Range> affectedRanges = new HashSet<Range>();
    for (InetAddress endPoint : leavingEndPoints)
        affectedRanges.addAll(addressRanges.get(endPoint));

    // for each of those ranges, find what new nodes will be responsible for the range when
    // all leaving nodes are gone.
    for (Range range : affectedRanges) {
        List<InetAddress> currentEndPoints = strategy.getNaturalEndpoints(range.right, tm, table);
        List<InetAddress> newEndPoints = strategy.getNaturalEndpoints(range.right, allLeftMetadata, table);
        newEndPoints.removeAll(currentEndPoints);
        pendingRanges.putAll(range, newEndPoints);
    }

    // At this stage pendingRanges has been updated according to leave operations. We can
    // now finish the calculation by checking bootstrapping nodes.

    // For each of the bootstrapping nodes, simply add and remove them one by one to
    // allLeftMetadata and check in between what their ranges would be.
    for (Map.Entry<Token, InetAddress> entry : bootstrapTokens.entrySet()) {
        InetAddress endPoint = entry.getValue();

        allLeftMetadata.updateNormalToken(entry.getKey(), endPoint);
        for (Range range : strategy.getAddressRanges(allLeftMetadata, table).get(endPoint))
            pendingRanges.put(range, endPoint);
        allLeftMetadata.removeEndpoint(endPoint);
    }

    tm.setPendingRanges(table, pendingRanges);

    if (logger_.isDebugEnabled())
        logger_.debug("Pending ranges:\n" + (pendingRanges.isEmpty() ? "<empty>" : tm.printPendingRanges()));
}