Example usage for com.google.common.collect Sets newConcurrentHashSet

List of usage examples for com.google.common.collect Sets newConcurrentHashSet

Introduction

In this page you can find the example usage for com.google.common.collect Sets newConcurrentHashSet.

Prototype

public static <E> Set<E> newConcurrentHashSet() 

Source Link

Document

Creates a thread-safe set backed by a hash map.

Usage

From source file:org.onosproject.net.edgeservice.impl.EdgeManager.java

private void addEdgePort(ConnectPoint point) {
    if (!topologyService.isInfrastructure(topology, point) && !point.port().isLogical()) {
        Set<ConnectPoint> set = connectionPoints.get(point.deviceId());
        if (set == null) {
            set = Sets.newConcurrentHashSet();
            connectionPoints.put(point.deviceId(), set);
        }/*ww w .  j  a  v a2  s  . c  o m*/
        if (set.add(point)) {
            post(new EdgePortEvent(EDGE_PORT_ADDED, point));
        }
    }
}

From source file:com.vmware.photon.controller.rootscheduler.service.SchedulerManager.java

/**
 * Tries to issue a place request to all managed schedulers and picks the appropriate one to return to caller.
 *
 * @param request Placement request//from w w w.  j a  va 2s .  co m
 * @return Place response or null if no one responded.
 * @throws InterruptedException
 */
public PlaceResponse place(PlaceRequest request) throws InterruptedException {
    long startTime = System.currentTimeMillis();

    PlaceParams rootPlaceParams = request.getRootSchedulerParams();
    if (rootPlaceParams == null) {
        rootPlaceParams = config.getRootPlaceParams();
    }

    /*
     * If the root scheduler has no children return error
     */
    if (getManagedSchedulersMap().isEmpty()) {
        logger.error("Place failure, root scheduler has no children");
        return new PlaceResponse(PlaceResultCode.SYSTEM_ERROR);
    }

    Collection<ManagedScheduler> placementSchedulers = getPlacementSchedulers(request, rootPlaceParams);

    /*
     * No children satisfying the constraints can be found, return error
     */
    if (placementSchedulers.isEmpty()) {
        assert (hasResourceConstraints(request));
        logger.warn("Place failure, constraints cannot be satisfied for request: {}", request);
        return new PlaceResponse(PlaceResultCode.NO_SUCH_RESOURCE);
    }

    /*
     * If the leaf scheduler place parameters are not set,
     * read it from the config and set it here.
     */
    PlaceParams leafPlaceParams = request.getLeafSchedulerParams();
    if (leafPlaceParams == null) {
        leafPlaceParams = config.getLeafPlaceParams();
        request.setLeafSchedulerParams(leafPlaceParams);
    }

    int fastPlaceResponseMinCount = (int) (rootPlaceParams.getFastPlaceResponseRatio()
            * placementSchedulers.size());
    fastPlaceResponseMinCount = Math.max(fastPlaceResponseMinCount,
            rootPlaceParams.getFastPlaceResponseMinCount());

    final Set<PlaceResponse> okResponses = Sets.newConcurrentHashSet();
    final Map<PlaceResultCode, Integer> responses = Collections
            .synchronizedMap(new HashMap<PlaceResultCode, Integer>());

    final CountDownLatch done = new CountDownLatch(placementSchedulers.size());

    final HashSet<PlaceResultCode> returnCode = new HashSet<>();

    long initialPlaceTimeout = Math
            .round(rootPlaceParams.getTimeout() * rootPlaceParams.getFastPlaceResponseTimeoutRatio());

    logger.info("Running {} placement scheduler(s) for placement with timeout {} ms",
            placementSchedulers.size(), initialPlaceTimeout);
    for (final ManagedScheduler scheduler : placementSchedulers) {
        Futures.addCallback(scheduler.place(request, rootPlaceParams.getTimeout()),
                new MdcContextCallback<PlaceResponse>() {
                    @Override
                    public void onSuccessWithContext(PlaceResponse response) {
                        logger.info("Received a placement response from {}: {}", scheduler, response);

                        PlaceResultCode result = response.getResult();

                        if (result == PlaceResultCode.OK) {
                            okResponses.add(response);
                        } else {
                            returnCode.add(result);
                        }

                        responses.put(result, responses.containsKey(result) ? responses.get(result) + 1 : 1);
                        done.countDown();
                    }

                    @Override
                    public void onFailureWithContext(Throwable t) {
                        logger.warn("Failed to get a placement response from {}: {}", scheduler.getId(), t);
                        done.countDown();
                    }
                });
    }

    done.await(initialPlaceTimeout, TimeUnit.MILLISECONDS);

    if (okResponses.size() < fastPlaceResponseMinCount) {
        long timeoutMs = rootPlaceParams.getTimeout() - initialPlaceTimeout;
        logger.warn("{} scheduler(s) responded OK in {} ms (need at least {}), waiting for another {} ms",
                okResponses.size(), initialPlaceTimeout, fastPlaceResponseMinCount, timeoutMs);
        done.await(timeoutMs, TimeUnit.MILLISECONDS);
    }
    // Log if we still haven't reached the minimum for fast placement
    if (okResponses.size() < fastPlaceResponseMinCount) {
        logger.warn("{} schedulers(s) responded OK in {} ms. Proceeding to select best match",
                okResponses.size(), rootPlaceParams.getTimeout());
    }

    // Whoever responded before the timeout gets to participate in placement.
    // Need to synchronize access to okResponse, b/c some responses might still be coming in
    // in case if initial place timeout yielded a satisfactory number of schedulers.
    PlaceResponse bestResponse;

    logger.debug("{} scheduler(s) responded in {} ms ultimately", responses.size(), initialPlaceTimeout);
    for (Map.Entry<PlaceResultCode, Integer> responsesCount : responses.entrySet()) {
        logger.debug("PlaceResultCode: {} - Count: {}", responsesCount.getKey(), responsesCount.getValue());
    }

    bestResponse = scoreCalculator.pickBestResponse(okResponses);

    if (bestResponse == null) {
        if (returnCode.contains(PlaceResultCode.NOT_ENOUGH_CPU_RESOURCE)) {
            bestResponse = new PlaceResponse(PlaceResultCode.NOT_ENOUGH_CPU_RESOURCE);
        } else if (returnCode.contains(PlaceResultCode.NOT_ENOUGH_MEMORY_RESOURCE)) {
            bestResponse = new PlaceResponse(PlaceResultCode.NOT_ENOUGH_MEMORY_RESOURCE);
        } else if (returnCode.contains((PlaceResultCode.NOT_ENOUGH_DATASTORE_CAPACITY))) {
            bestResponse = new PlaceResponse(PlaceResultCode.NOT_ENOUGH_DATASTORE_CAPACITY);
        } else if (returnCode.contains(PlaceResultCode.NO_SUCH_RESOURCE)) {
            bestResponse = new PlaceResponse(PlaceResultCode.NO_SUCH_RESOURCE);
        } else if (returnCode.contains(PlaceResultCode.INVALID_SCHEDULER)) {
            bestResponse = new PlaceResponse(PlaceResultCode.INVALID_SCHEDULER);
        }

        if (bestResponse == null) {
            bestResponse = new PlaceResponse(PlaceResultCode.SYSTEM_ERROR);
            bestResponse.setError(
                    String.format("%d scheduler responded OK in %d ms out of %d placement scheduler(s)",
                            okResponses.size(), initialPlaceTimeout, placementSchedulers.size()));
        }
    }

    long endTime = System.currentTimeMillis();
    logger.info("Returning bestResponse: {} in roughly {} ms", bestResponse, (endTime - startTime));
    return bestResponse;
}

From source file:org.onosproject.net.intent.impl.compiler.PathCompiler.java

/**
 * Creates the flow rules for the path intent using MPLS
 * encapsulation.//from   w  ww . ja v a2s  . c  o  m
 *
 * @param creator the flowrules creator
 * @param flows the list of flows to fill
 * @param devices the devices on the path
 * @param intent the PathIntent to compile
 */
private void manageMplsEncap(PathCompilerCreateFlow<T> creator, List<T> flows, List<DeviceId> devices,
        PathIntent intent) {

    Set<Link> linksSet = Sets.newConcurrentHashSet();
    for (int i = 1; i <= intent.path().links().size() - 2; i++) {
        linksSet.add(intent.path().links().get(i));
    }

    Map<LinkKey, Identifier<?>> mplsLabels = labelAllocator.assignLabelToLinks(linksSet, intent.id(),
            EncapsulationType.MPLS);
    Iterator<Link> links = intent.path().links().iterator();
    Link srcLink = links.next();

    Link link = links.next();
    // List of flow rules to be installed

    // Ingress traffic
    MplsLabel mplsLabel = (MplsLabel) mplsLabels.get(linkKey(link));
    if (mplsLabel == null) {
        throw new IntentCompilationException(ERROR_MPLS + link);
    }
    MplsLabel prevMplsLabel = mplsLabel;

    Optional<MplsCriterion> mplsCriterion = intent.selector().criteria().stream()
            .filter(criterion -> criterion.type() == Criterion.Type.MPLS_LABEL)
            .map(criterion -> (MplsCriterion) criterion).findAny();

    //Push MPLS if selector does not include MPLS
    TrafficTreatment.Builder treatBuilder = DefaultTrafficTreatment.builder();
    if (!mplsCriterion.isPresent()) {
        treatBuilder.pushMpls();
    }
    //Tag the traffic with the new encapsulation MPLS label
    treatBuilder.setMpls(mplsLabel);
    creator.createFlow(intent.selector(), treatBuilder.build(), srcLink.dst(), link.src(), intent.priority(),
            true, flows, devices);

    ConnectPoint prev = link.dst();

    while (links.hasNext()) {

        link = links.next();

        if (links.hasNext()) {
            // Transit traffic
            MplsLabel transitMplsLabel = (MplsLabel) mplsLabels.get(linkKey(link));
            if (transitMplsLabel == null) {
                throw new IntentCompilationException(ERROR_MPLS + link);
            }
            TrafficSelector transitSelector = DefaultTrafficSelector.builder().matchInPort(prev.port())
                    .matchEthType(Ethernet.MPLS_UNICAST).matchMplsLabel(prevMplsLabel).build();

            TrafficTreatment.Builder transitTreat = DefaultTrafficTreatment.builder();

            // Set the new MPLS label only if the previous one is different
            if (!prevMplsLabel.equals(transitMplsLabel)) {
                transitTreat.setMpls(transitMplsLabel);
            }
            creator.createFlow(transitSelector, transitTreat.build(), prev, link.src(), intent.priority(), true,
                    flows, devices);
            prevMplsLabel = transitMplsLabel;
            prev = link.dst();
        } else {
            TrafficSelector.Builder egressSelector = DefaultTrafficSelector.builder().matchInPort(prev.port())
                    .matchEthType(Ethernet.MPLS_UNICAST).matchMplsLabel(prevMplsLabel);
            TrafficTreatment.Builder egressTreat = DefaultTrafficTreatment.builder(intent.treatment());

            // Egress traffic
            // check if the treatement is popVlan or setVlan (rewrite),
            // than selector needs to match any VlanId
            for (Instruction instruct : intent.treatment().allInstructions()) {
                if (instruct instanceof L2ModificationInstruction) {
                    L2ModificationInstruction l2Mod = (L2ModificationInstruction) instruct;
                    if (l2Mod.subtype() == L2ModificationInstruction.L2SubType.VLAN_PUSH) {
                        break;
                    }
                    if (l2Mod.subtype() == L2ModificationInstruction.L2SubType.VLAN_POP
                            || l2Mod.subtype() == L2ModificationInstruction.L2SubType.VLAN_ID) {
                        egressSelector.matchVlanId(VlanId.ANY);
                    }
                }
            }

            if (mplsCriterion.isPresent()) {
                egressTreat.setMpls(mplsCriterion.get().label());
            } else {
                egressTreat.popMpls(getEthType(intent.selector()));
            }

            creator.createFlow(egressSelector.build(), egressTreat.build(), prev, link.src(), intent.priority(),
                    true, flows, devices);
        }

    }

}

From source file:org.onosproject.provider.nil.link.impl.NullLinkProvider.java

private void configureWorkers() {
    if (eventRate > 0) {
        // now set to 'flicker', previously not flickering
        if (!flicker) {
            flicker = true;//  w ww  .ja  v a  2  s  . c  om
            allocateLinks();
            // kill off refresh worker for symmetry
            if (driverMap.containsKey(DEFAULT)) {
                driverMap.get(DEFAULT).forEach(d -> d.setTasks(Lists.newArrayList()));
                driverMap.remove(DEFAULT);
            }
            for (int i = 0; i < linkTasks.size(); i++) {
                List<LinkDescription> links = linkTasks.get(i);
                LinkDriver driver = new LinkDriver(links);
                links.forEach(v -> {
                    DeviceId sd = v.src().deviceId();
                    DeviceId dd = v.src().deviceId();
                    driverMap.computeIfAbsent(sd, k -> Sets.newConcurrentHashSet()).add(driver);
                    driverMap.computeIfAbsent(dd, k -> Sets.newConcurrentHashSet()).add(driver);
                });
                linkDriver.schedule(driver, eventRate, TimeUnit.MICROSECONDS);
            }
        }
        // no need for was flicker since eventRate will be read by workers
    } else {
        // now set to 'refresh' was 'flicker' before
        if (flicker) {
            driverMap.forEach((dev, lds) -> lds.forEach(l -> l.deviceRemoved(dev)));
            driverMap.clear();
            linkTasks.clear();
            flicker = false;
            LinkDriver driver = new LinkDriver(linkDescrs);
            driverMap.computeIfAbsent(DEFAULT, k -> Sets.newConcurrentHashSet()).add(driver);
            linkDriver.schedule(driver, DEFAULT_RATE, TimeUnit.SECONDS);
            // was 'refresh' - something changed or we're just starting.
        } else {
            if (driverMap.containsKey(DEFAULT)) {
                driverMap.forEach((dev, ld) -> ld.forEach(d -> d.setTasks(linkDescrs)));
                return;
            }
            LinkDriver driver = new LinkDriver(linkDescrs);
            driverMap.computeIfAbsent(DEFAULT, k -> Sets.newConcurrentHashSet()).add(driver);
            linkDriver.schedule(driver, DEFAULT_RATE, TimeUnit.SECONDS);
        }
    }
}

From source file:org.apache.jackrabbit.oak.plugins.index.lucene.IndexCopier.java

/**
 * Provide the corresponding shared state to enable COW inform COR
 * about new files it is creating while indexing. This would allow COR to ignore
 * such files while determining the deletion candidates.
 *
 * @param defn index definition for which the directory is being created
 * @return a set to maintain the state of new files being created by the COW Directory
 *///w  ww . j a  va 2 s .com
private Set<String> getSharedWorkingSet(String indexPath) {
    Set<String> sharedSet;
    synchronized (sharedWorkingSetMap) {
        sharedSet = sharedWorkingSetMap.get(indexPath);
        if (sharedSet == null) {
            sharedSet = Sets.newConcurrentHashSet();
            sharedWorkingSetMap.put(indexPath, sharedSet);
        }
    }
    return sharedSet;
}

From source file:com.quancheng.saluki.core.registry.internal.FailbackRegistry.java

private void addFailedSubscribed(GrpcURL url, NotifyListener.NotifyServiceListener listener) {
    Set<NotifyListener.NotifyServiceListener> listeners = failedSubscribed.get(url);
    if (listeners == null) {
        listeners = Sets.newConcurrentHashSet();
        listeners = failedSubscribed.putIfAbsent(url, listeners);
    }//from w ww .  j a va2 s  . co  m
    listeners.add(listener);
}

From source file:com.quancheng.saluki.registry.consul.ConsulRegistry.java

@Override
public void subscribe(String group, NotifyRouterListener listener) {
    Set<NotifyListener.NotifyRouterListener> listeners = notifyRouterListeners.get(group);
    if (listeners == null) {
        listeners = Sets.newConcurrentHashSet();
        listeners.add(listener);/*w w  w  . j  a  v a  2s. c  om*/
    } else {
        listeners.add(listener);
    }
    notifyRouterListeners.put(group, listeners);
    if (!routerGroupLookUped.contains(group)) {
        routerGroupLookUped.add(group);
        RouterLookUper routerLookUper = new RouterLookUper(group);
        routerLookUper.setDaemon(true);
        routerLookUper.start();
    }
}

From source file:org.locationtech.geogig.remotes.internal.LocalRemoteRepo.java

private void copyNewObjects(RevTree oldTree, RevTree newTree, final ObjectDatabase fromDb,
        final ObjectDatabase toDb, final ProgressListener progress) {
    checkNotNull(oldTree);//from   w w w.j  a va 2  s. c  o  m
    checkNotNull(newTree);
    checkNotNull(fromDb);
    checkNotNull(toDb);
    checkNotNull(progress);

    // the diff walk uses fromDb as both left and right data source since we're comparing what
    // we have in the "origin" database against trees on the same repository
    PostOrderDiffWalk diffWalk = new PostOrderDiffWalk(oldTree, newTree, fromDb, fromDb);

    // holds object ids that need to be copied to the target db. Pruned when it reaches a
    // threshold.
    final Set<ObjectId> ids = new HashSet<>();
    final ReadWriteLock lock = new ReentrantReadWriteLock();

    // This filter further refines the post order diff walk by making it ignore trees/buckets
    // that are already present in the target db
    Predicate<Bounded> filter = new Predicate<Bounded>() {

        @Override
        public boolean apply(@Nullable Bounded b) {
            if (b == null) {
                return false;
            }

            if (b instanceof NodeRef && FEATURE.equals(((NodeRef) b).getType())) {
                // check of existence of trees only. For features the diff filtering is good
                // enough and checking for existence on each feature would be killer
                // performance wise
                return true;
            }

            final ObjectId id = b.getObjectId();
            lock.readLock().lock();
            try {
                boolean exists = !progress.isCanceled() && (ids.contains(id) || toDb.exists(id));
                return !exists;
            } finally {
                lock.readLock().unlock();
            }
        }
    };

    // receives notifications of feature/bucket/tree diffs. Only interested in the "new"/right
    // side of the comparisons
    Consumer consumer = new Consumer() {
        final int bulkSize = 10_000;

        /**
         * Cache already inserted metadata ids, in order to avoid inserting the same
         * RevFeatureType over and over, yet handling the case where a feature node has a
         * different metadata id than it's tree's default one
         */
        final Set<ObjectId> insertedMetadataIds = Sets.newConcurrentHashSet();

        @Override
        public void feature(@Nullable NodeRef left, NodeRef right) {
            // add(left);
            add(right);
        }

        @Override
        public void tree(@Nullable NodeRef left, NodeRef right) {
            // add(left);
            add(right);
        }

        private void add(@Nullable NodeRef node) {
            if (node == null) {
                return;
            }
            Optional<ObjectId> metadataId = node.getNode().getMetadataId();
            lock.writeLock().lock();
            try {
                ids.add(node.getObjectId());
                if (metadataId.isPresent()) {
                    ObjectId mdid = metadataId.get();
                    if (!insertedMetadataIds.contains(mdid)) {
                        ids.add(mdid);
                        insertedMetadataIds.add(mdid);
                    }
                }
            } finally {
                lock.writeLock().unlock();
            }
            checkLimitAndCopy();
        }

        @Override
        public void bucket(NodeRef lparent, NodeRef rparent, BucketIndex bucketIndex, @Nullable Bucket left,
                Bucket right) {
            // if (left != null) {
            // ids.add(left.getObjectId());
            // }
            if (right != null) {
                lock.writeLock().lock();
                try {
                    ids.add(right.getObjectId());
                } finally {
                    lock.writeLock().unlock();
                }
            }
            checkLimitAndCopy();
        }

        private void checkLimitAndCopy() {
            // double check lock on ids to reduce contention when pruning it as this method can
            // be called from several concurrent threads from inside PreOrderDiffWalk
            Set<ObjectId> copyIds = null;
            lock.readLock().lock();
            try {
                if (ids.size() >= bulkSize) {
                    lock.readLock().unlock();
                    lock.writeLock().lock();
                    try {
                        copyIds = Sets.newHashSet(ids);
                        ids.clear();
                    } finally {
                        lock.writeLock().unlock();
                        lock.readLock().lock();
                    }
                }
            } finally {
                lock.readLock().unlock();
            }
            if (copyIds != null) {
                copy(copyIds, fromDb, toDb, progress);
            }
        }
    };
    diffWalk.walk(filter, consumer);
    // copy remaining objects
    copy(ids, fromDb, toDb, progress);
}

From source file:com.linkedin.pinot.broker.routing.HelixExternalViewBasedRouting.java

private void buildRoutingTable(String tableNameWithType, ExternalView externalView,
        List<InstanceConfig> instanceConfigs) {
    // Save the current version number of the external view to avoid unnecessary routing table updates
    int externalViewRecordVersion = externalView.getRecord().getVersion();
    _lastKnownExternalViewVersionMap.put(tableNameWithType, externalViewRecordVersion);

    RoutingTableBuilder routingTableBuilder = _routingTableBuilderMap.get(tableNameWithType);
    if (routingTableBuilder == null) {
        //TODO: warn
        return;//w  w  w .  j  a v a2s .  c  o m
    }
    CommonConstants.Helix.TableType tableType = TableNameBuilder.getTableTypeFromTableName(tableNameWithType);

    LOGGER.info("Trying to compute routing table for table {} using {}", tableNameWithType,
            routingTableBuilder);
    long startTimeMillis = System.currentTimeMillis();

    try {
        Map<String, InstanceConfig> relevantInstanceConfigs = new HashMap<>();

        routingTableBuilder.computeRoutingTableFromExternalView(tableNameWithType, externalView,
                instanceConfigs);

        // Keep track of the instance configs that are used in that routing table
        updateInstanceConfigsMapFromExternalView(relevantInstanceConfigs, instanceConfigs, externalView);

        // Save the instance configs used so that we can avoid unnecessary routing table updates later
        _lastKnownInstanceConfigsForTable.put(tableNameWithType, relevantInstanceConfigs);
        for (InstanceConfig instanceConfig : relevantInstanceConfigs.values()) {
            _lastKnownInstanceConfigs.put(instanceConfig.getInstanceName(), instanceConfig);
        }

        // Ensure this table is registered with all relevant instances
        for (String instanceName : relevantInstanceConfigs.keySet()) {
            Set<String> tablesForCurrentInstance = _tablesForInstance.get(instanceName);

            // Ensure there is a table set for this instance
            if (tablesForCurrentInstance == null) {
                synchronized (_tablesForInstance) {
                    if (!_tablesForInstance.containsKey(instanceName)) {
                        tablesForCurrentInstance = Sets.newConcurrentHashSet();
                        _tablesForInstance.put(instanceName, tablesForCurrentInstance);
                    } else {
                        // Another thread has created a table set for this instance, use it
                        tablesForCurrentInstance = _tablesForInstance.get(instanceName);
                    }
                }
            }

            // Add the table to the set of tables for this instance
            tablesForCurrentInstance.add(tableNameWithType);
        }
    } catch (Exception e) {
        _brokerMetrics.addMeteredTableValue(tableNameWithType, BrokerMeter.ROUTING_TABLE_REBUILD_FAILURES, 1L);
        LOGGER.error("Failed to compute/update the routing table", e);

        // Mark the routing table as needing a rebuild
        _lastKnownExternalViewVersionMap.put(tableNameWithType, INVALID_EXTERNAL_VIEW_VERSION);
    }

    try {
        // We need to compute the time boundary only in two situations:
        // 1) We're adding/updating an offline table and there's a realtime table that we're serving
        // 2) We're adding a new realtime table and there's already an offline table, in which case we need to update the
        //    time boundary for the existing offline table
        String tableForTimeBoundaryUpdate = null;
        ExternalView externalViewForTimeBoundaryUpdate = null;

        if (tableType == CommonConstants.Helix.TableType.OFFLINE) {
            // Does a realtime table exist?
            String realtimeTableName = TableNameBuilder.REALTIME
                    .tableNameWithType(TableNameBuilder.extractRawTableName(tableNameWithType));
            if (_routingTableBuilderMap.containsKey(realtimeTableName)) {
                tableForTimeBoundaryUpdate = tableNameWithType;
                externalViewForTimeBoundaryUpdate = externalView;
            }
        }

        if (tableType == CommonConstants.Helix.TableType.REALTIME) {
            // Does an offline table exist?
            String offlineTableName = TableNameBuilder.OFFLINE
                    .tableNameWithType(TableNameBuilder.extractRawTableName(tableNameWithType));
            if (_routingTableBuilderMap.containsKey(offlineTableName)) {
                // Is there no time boundary?
                if (_timeBoundaryService.getTimeBoundaryInfoFor(offlineTableName) == null) {
                    tableForTimeBoundaryUpdate = offlineTableName;
                    externalViewForTimeBoundaryUpdate = fetchExternalView(offlineTableName);
                }
            }
        }

        if (tableForTimeBoundaryUpdate != null) {
            updateTimeBoundary(tableForTimeBoundaryUpdate, externalViewForTimeBoundaryUpdate);
        } else {
            LOGGER.info("No need to update time boundary for table {}", tableNameWithType);
        }
    } catch (Exception e) {
        LOGGER.error("Failed to update the TimeBoundaryService", e);
    }

    long updateTime = System.currentTimeMillis() - startTimeMillis;

    if (_brokerMetrics != null) {
        _brokerMetrics.addTimedValue(BrokerTimer.ROUTING_TABLE_UPDATE_TIME, updateTime, TimeUnit.MILLISECONDS);
    }

    LOGGER.info("Routing table update for table {} completed in {} ms", tableNameWithType, updateTime);
}

From source file:com.quancheng.saluki.core.registry.internal.FailbackRegistry.java

@Override
public void unsubscribe(GrpcURL url, NotifyListener.NotifyServiceListener listener) {
    super.unsubscribe(url, listener);
    removeFailedSubscribed(url, listener);
    try {//from   ww w  .  j  a  v  a 2s  .  c o  m
        // ?????
        doUnsubscribe(url, listener);
    } catch (Exception e) {
        logger.error("Failed to unsubscribe " + url + ", waiting for retry, cause: " + e.getMessage(), e);
        // ??
        Set<NotifyListener.NotifyServiceListener> listeners = failedUnsubscribed.get(url);
        if (listeners == null) {
            listeners = Sets.newConcurrentHashSet();
            listeners = failedUnsubscribed.putIfAbsent(url, listeners);
        }
        listeners.add(listener);
    }
}