Example usage for com.google.common.collect Sets newConcurrentHashSet

List of usage examples for com.google.common.collect Sets newConcurrentHashSet

Introduction

In this page you can find the example usage for com.google.common.collect Sets newConcurrentHashSet.

Prototype

public static <E> Set<E> newConcurrentHashSet() 

Source Link

Document

Creates a thread-safe set backed by a hash map.

Usage

From source file:org.apache.streams.converter.ActivityConverterProcessor.java

protected List<Class> detectClasses(Object document) {

    Set<Class> detectedClasses = Sets.newConcurrentHashSet();
    for (DocumentClassifier classifier : classifiers) {
        List<Class> detected = classifier.detectClasses(document);
        if (detected != null && detected.size() > 0)
            detectedClasses.addAll(detected);
    }/*w w w .jav  a  2s . c o m*/

    return Lists.newArrayList(detectedClasses);
}

From source file:org.onosproject.store.consistent.impl.PartitionedDatabase.java

@Override
public CompletableFuture<Set<Entry<String, Versioned<byte[]>>>> mapEntrySet(String mapName) {
    checkState(isOpen.get(), DB_NOT_OPEN);
    Set<Entry<String, Versioned<byte[]>>> entrySet = Sets.newConcurrentHashSet();
    return CompletableFuture.allOf(partitions.stream()
            .map(p -> p.mapEntrySet(mapName).thenApply(entrySet::addAll)).toArray(CompletableFuture[]::new))
            .thenApply(v -> entrySet);
}

From source file:samza.examples.rss.system.RssFeed.java

/**
 * Reads the url and queues the data// www.j a v a  2  s .  c o  m
 *
 * @param feedDetail feedDetails object
 * @return set of all article urls that were read from the feed
 * @throws IOException                          when it cannot connect to the url or the url is malformed
 * @throws com.sun.syndication.io.FeedException when it cannot reed the feed.
 */
protected Set<String> queueFeedEntries(FeedDetails feedDetail, List<Datum> dataQueue)
        throws IOException, FeedException {
    URL feedUrl = new URL(feedDetail.getUrl());
    URLConnection connection = feedUrl.openConnection();
    connection.setConnectTimeout(this.timeOut);
    SyndFeedInput input = new SyndFeedInput();
    SyndFeed feed = input.build(new InputStreamReader(connection.getInputStream()));
    Set<String> batch = Sets.newConcurrentHashSet();
    for (Object entryObj : feed.getEntries()) {
        SyndEntry entry = (SyndEntry) entryObj;
        ObjectNode nodeEntry = this.serializer.deserialize(entry);
        nodeEntry.put(RSS_KEY, feedDetail.getUrl());
        String entryId = determineId(nodeEntry);
        batch.add(entryId);
        Datum datum = new Datum(nodeEntry, entryId, DateTime.now());
        JsonNode published = nodeEntry.get(DATE_KEY);
        if (published != null) {
            try {
                DateTime date = RFC3339Utils.parseToUTC(published.asText());
                if (date.isAfter(this.publishedSince) && (!seenBefore(entryId, feedDetail.getUrl()))) {
                    dataQueue.add(datum);
                    log.debug("Added entry, {}, to provider queue.", entryId);
                }
            } catch (Exception e) {
                log.trace("Failed to parse date from object node, attempting to add node to queue by default.");
                if (!seenBefore(entryId, feedDetail.getUrl())) {
                    dataQueue.add(datum);
                    log.debug("Added entry, {}, to provider queue.", entryId);
                }
            }
        } else {
            log.debug("No published date present, attempting to add node to queue by default.");
            if (!seenBefore(entryId, feedDetail.getUrl())) {
                dataQueue.add(datum);
                log.debug("Added entry, {}, to provider queue.", entryId);
            }
        }
    }
    return batch;
}

From source file:org.opendaylight.lispflowmapping.implementation.lisp.MapResolver.java

private void updateSubscribers(Rloc itrRloc, Eid reqEid, Eid mapEid, Eid srcEid) {
    SubscriberRLOC subscriberRloc = new SubscriberRLOC(itrRloc, srcEid);
    Eid subscribedEid = mapEid;//from  w  ww . ja v  a2  s.c  om

    // If the eid in the matched mapping is SourceDest and the requested eid IS NOT then we subscribe itrRloc only
    // to dst from the src/dst since that what's been requested. Note though that any updates to to the src/dst
    // mapping will be pushed to dst as well (see sendSMRs in MapServer)
    if (mapEid.getAddressType().equals(SourceDestKeyLcaf.class)
            && !reqEid.getAddressType().equals(SourceDestKeyLcaf.class)) {
        subscribedEid = SourceDestKeyHelper.getDst(mapEid);
    }

    Set<SubscriberRLOC> subscribers = getSubscribers(subscribedEid);
    if (subscribers == null) {
        subscribers = Sets.newConcurrentHashSet();
    } else if (subscribers.contains(subscriberRloc)) {
        // If there is an entry already for this subscriberRloc, remove it, so that it gets the new
        // timestamp
        subscribers.remove(subscriberRloc);
    }
    LOG.trace("Adding new subscriber: " + subscriberRloc.toString());
    subscribers.add(subscriberRloc);
    addSubscribers(subscribedEid, subscribers);
}

From source file:org.illalabs.rss.RssStreamProviderTask.java

/**
 * Reads the url and queues the data/*from   ww w. ja  va2 s  . c o m*/
 * 
 * @param feedUrl
 *            rss feed url
 * @return set of all article urls that were read from the feed
 * @throws IOException
 *             when it cannot connect to the url or the url is malformed
 * @throws FeedException
 *             when it cannot reed the feed.
 */
@VisibleForTesting
protected Set<String> queueFeedEntries(URL feedUrl) throws IOException, FeedException {
    Set<String> batch = Sets.newConcurrentHashSet();
    URLConnection connection = feedUrl.openConnection();
    connection.setConnectTimeout(this.timeOut);
    connection.setConnectTimeout(this.timeOut);
    SyndFeedInput input = new SyndFeedInput();
    SyndFeed feed = input.build(new InputStreamReader(connection.getInputStream()));
    for (Object entryObj : feed.getEntries()) {
        SyndEntry entry = (SyndEntry) entryObj;
        ObjectNode nodeEntry = this.serializer.deserialize(entry);
        nodeEntry.put(RSS_KEY, this.feedDetails.getUrl());
        String entryId = determineId(nodeEntry);
        batch.add(entryId);
        Datum datum = new Datum(nodeEntry, entryId, DateTime.now());
        try {
            JsonNode published = nodeEntry.get(DATE_KEY);
            if (published != null) {
                try {
                    DateTime date = RFC3339Utils.parseToUTC(published.asText());
                    if (date.isAfter(this.publishedSince)
                            && (!seenBefore(entryId, this.feedDetails.getUrl()))) {
                        this.dataQueue.put(datum);
                        LOGGER.debug("Added entry, {}, to provider queue.", entryId);
                    }
                } catch (InterruptedException ie) {
                    Thread.currentThread().interrupt();
                } catch (Exception e) {
                    LOGGER.trace(
                            "Failed to parse date from object node, attempting to add node to queue by default.");
                    if (!seenBefore(entryId, this.feedDetails.getUrl())) {
                        this.dataQueue.put(datum);
                        LOGGER.debug("Added entry, {}, to provider queue.", entryId);
                    }
                }
            } else {
                LOGGER.debug("No published date present, attempting to add node to queue by default.");
                if (!seenBefore(entryId, this.feedDetails.getUrl())) {
                    this.dataQueue.put(datum);
                    LOGGER.debug("Added entry, {}, to provider queue.", entryId);
                }
            }
        } catch (InterruptedException ie) {
            LOGGER.error("Interupted Exception.");
            Thread.currentThread().interrupt();
        }
    }
    return batch;
}

From source file:org.onosproject.incubator.net.virtual.impl.provider.DefaultVirtualFlowRuleProvider.java

@Override
public void executeBatch(NetworkId networkId, FlowRuleBatchOperation batch) {
    checkNotNull(batch);/*from   w ww .j a va  2s  .co  m*/

    for (FlowRuleBatchEntry fop : batch.getOperations()) {
        devirtualize(networkId, fop.target()).forEach(f -> flowRuleService.applyFlowRules(f));
    }

    //FIXME: check the success of the all batch operations
    CompletedBatchOperation status = new CompletedBatchOperation(true, Sets.newConcurrentHashSet(),
            batch.deviceId());

    VirtualFlowRuleProviderService providerService = (VirtualFlowRuleProviderService) providerRegistryService
            .getProviderService(networkId, VirtualFlowRuleProvider.class);
    providerService.batchOperationCompleted(batch.id(), status);
}

From source file:com.navercorp.pinpoint.web.service.map.processor.RpcCallProcessor.java

private Set<AcceptApplication> getAcceptApplications(Application fromApplication, Range range) {
    AcceptApplicationCacheKey cacheKey = new AcceptApplicationCacheKey(fromApplication, range);
    Set<AcceptApplication> cachedAcceptApplications = acceptApplicationCache.get(cacheKey);
    if (cachedAcceptApplications == null) {
        logger.debug("acceptApplicationCache miss {}", fromApplication);
        Set<AcceptApplication> queriedAcceptApplications = hostApplicationMapDao
                .findAcceptApplicationName(fromApplication, range);

        final Set<AcceptApplication> filteredApplicationList = filterAlias(queriedAcceptApplications);
        logger.debug("filteredApplicationList" + filteredApplicationList);

        Set<AcceptApplication> acceptApplications = Sets.newConcurrentHashSet();
        if (!CollectionUtils.isEmpty(filteredApplicationList)) {
            acceptApplications.addAll(filteredApplicationList);
        }// w  w w . j a  v  a2s.  co m
        cachedAcceptApplications = acceptApplicationCache.putIfAbsent(cacheKey, acceptApplications);
        if (cachedAcceptApplications == null) {
            cachedAcceptApplications = acceptApplications;
        }
    } else {
        logger.debug("acceptApplicationCache hit {}", fromApplication);
    }
    return cachedAcceptApplications;
}

From source file:com.google.devtools.build.lib.skyframe.FilesystemValueChecker.java

/**
 * Return a collection of action values which have output files that are not in-sync with
 * the on-disk file value (were modified externally).
 *///w  ww. ja  va 2 s.c o  m
Collection<SkyKey> getDirtyActionValues(Map<SkyKey, SkyValue> valuesMap, @Nullable final BatchStat batchStatter,
        ModifiedFileSet modifiedOutputFiles) throws InterruptedException {
    if (modifiedOutputFiles == ModifiedFileSet.NOTHING_MODIFIED) {
        LOG.info("Not checking for dirty actions since nothing was modified");
        return ImmutableList.of();
    }
    LOG.info("Accumulating dirty actions");
    final int numOutputJobs = Runtime.getRuntime().availableProcessors() * 4;
    final Set<SkyKey> actionSkyKeys = new HashSet<>();
    for (SkyKey key : valuesMap.keySet()) {
        if (ACTION_FILTER.apply(key)) {
            actionSkyKeys.add(key);
        }
    }
    final Sharder<Pair<SkyKey, ActionExecutionValue>> outputShards = new Sharder<>(numOutputJobs,
            actionSkyKeys.size());

    for (SkyKey key : actionSkyKeys) {
        outputShards.add(Pair.of(key, (ActionExecutionValue) valuesMap.get(key)));
    }
    LOG.info("Sharded action values for batching");

    ExecutorService executor = Executors.newFixedThreadPool(numOutputJobs,
            new ThreadFactoryBuilder().setNameFormat("FileSystem Output File Invalidator %d").build());

    Collection<SkyKey> dirtyKeys = Sets.newConcurrentHashSet();
    ThrowableRecordingRunnableWrapper wrapper = new ThrowableRecordingRunnableWrapper(
            "FileSystemValueChecker#getDirtyActionValues");

    modifiedOutputFilesCounter.set(0);
    modifiedOutputFilesIntraBuildCounter.set(0);
    final ImmutableSet<PathFragment> knownModifiedOutputFiles = modifiedOutputFiles == ModifiedFileSet.EVERYTHING_MODIFIED
            ? null
            : modifiedOutputFiles.modifiedSourceFiles();

    // Initialized lazily through a supplier because it is only used to check modified
    // TreeArtifacts, which are not frequently used in builds.
    Supplier<NavigableSet<PathFragment>> sortedKnownModifiedOutputFiles = Suppliers
            .memoize(new Supplier<NavigableSet<PathFragment>>() {
                @Override
                public NavigableSet<PathFragment> get() {
                    if (knownModifiedOutputFiles == null) {
                        return null;
                    } else {
                        return ImmutableSortedSet.copyOf(knownModifiedOutputFiles);
                    }
                }
            });

    for (List<Pair<SkyKey, ActionExecutionValue>> shard : outputShards) {
        Runnable job = (batchStatter == null)
                ? outputStatJob(dirtyKeys, shard, knownModifiedOutputFiles, sortedKnownModifiedOutputFiles)
                : batchStatJob(dirtyKeys, shard, batchStatter, knownModifiedOutputFiles,
                        sortedKnownModifiedOutputFiles);
        executor.submit(wrapper.wrap(job));
    }

    boolean interrupted = ExecutorUtil.interruptibleShutdown(executor);
    Throwables.propagateIfPossible(wrapper.getFirstThrownError());
    LOG.info("Completed output file stat checks");
    if (interrupted) {
        throw new InterruptedException();
    }
    return dirtyKeys;
}

From source file:com.vmware.photon.controller.rootscheduler.service.FlatSchedulerService.java

@Override
public PlaceResponse place(PlaceRequest request) throws TException {
    initRequestId(request);//w w w .ja va  2s.  c o  m
    logger.info("Place request: {}", request);
    Stopwatch watch = Stopwatch.createStarted();

    int numSamples = config.getRootPlaceParams().getMaxFanoutCount();
    long timeoutMs = config.getRootPlaceParams().getTimeout();

    // Pick candidates that satisfy the resource constraints.
    Stopwatch getCandidatesStopwatch = Stopwatch.createStarted();
    List<ResourceConstraint> constraints = getResourceConstraints(request);
    Map<String, ServerAddress> candidates = checker.getCandidates(constraints, numSamples);
    logger.info("elapsed-time flat-place-get-candidates {} milliseconds",
            getCandidatesStopwatch.elapsed(TimeUnit.MILLISECONDS));

    if (candidates.isEmpty()) {
        logger.warn("Place failure, constraints cannot be satisfied for request: {}", request);
        return new PlaceResponse(PlaceResultCode.NO_SUCH_RESOURCE);
    }

    // Send place request to the candidates.
    logger.info("Sending place requests to {} with timeout {} ms", candidates, timeoutMs);
    Stopwatch scoreCandidatesStopwatch = Stopwatch.createStarted();
    final Set<PlaceResponse> okResponses = Sets.newConcurrentHashSet();
    final Set<PlaceResultCode> returnCodes = Sets.newConcurrentHashSet();
    final CountDownLatch done = new CountDownLatch(candidates.size());
    for (Map.Entry<String, ServerAddress> entry : candidates.entrySet()) {
        ServerAddress address = entry.getValue();
        try {
            HostClient hostClient = getHostClient();
            hostClient.setIpAndPort(address.getHost(), address.getPort());
            hostClient.place(request.getResource(), new AsyncMethodCallback<Host.AsyncClient.place_call>() {
                @Override
                public void onComplete(Host.AsyncClient.place_call call) {
                    initRequestId(request);
                    PlaceResponse response;
                    try {
                        response = call.getResult();
                    } catch (TException ex) {
                        onError(ex);
                        return;
                    }
                    logger.info("Received a place response from {}: {}", entry, response);
                    returnCodes.add(response.getResult());
                    if (response.getResult() == PlaceResultCode.OK) {
                        okResponses.add(response);
                    }
                    done.countDown();
                }

                @Override
                public void onError(Exception ex) {
                    initRequestId(request);
                    logger.warn("Failed to get a placement response from {}: {}", entry, ex);
                    done.countDown();
                }
            });
        } catch (RpcException ex) {
            logger.warn("Failed to get a placement response from {}: {}", entry, ex);
            done.countDown();
        }
    }

    // Wait for responses to come back.
    try {
        done.await(timeoutMs, TimeUnit.MILLISECONDS);
    } catch (InterruptedException ex) {
        logger.debug("Got interrupted waiting for place responses", ex);
    }
    logger.info("elapsed-time flat-place-score-candidates {} milliseconds",
            scoreCandidatesStopwatch.elapsed(TimeUnit.MILLISECONDS));

    // Return the best response.
    PlaceResponse response = scoreCalculator.pickBestResponse(okResponses);
    watch.stop();
    if (response == null) {
        // TODO(mmutsuzaki) Arbitrarily defining a precedence for return codes doesn't make sense.
        if (returnCodes.contains(PlaceResultCode.NOT_ENOUGH_CPU_RESOURCE)) {
            response = new PlaceResponse(PlaceResultCode.NOT_ENOUGH_CPU_RESOURCE);
        } else if (returnCodes.contains(PlaceResultCode.NOT_ENOUGH_MEMORY_RESOURCE)) {
            response = new PlaceResponse(PlaceResultCode.NOT_ENOUGH_MEMORY_RESOURCE);
        } else if (returnCodes.contains((PlaceResultCode.NOT_ENOUGH_DATASTORE_CAPACITY))) {
            response = new PlaceResponse(PlaceResultCode.NOT_ENOUGH_DATASTORE_CAPACITY);
        } else if (returnCodes.contains(PlaceResultCode.NO_SUCH_RESOURCE)) {
            response = new PlaceResponse(PlaceResultCode.NO_SUCH_RESOURCE);
        } else if (returnCodes.contains(PlaceResultCode.INVALID_SCHEDULER)) {
            response = new PlaceResponse(PlaceResultCode.INVALID_SCHEDULER);
        } else {
            response = new PlaceResponse(PlaceResultCode.SYSTEM_ERROR);
            String msg = String.format("Received no response in %d ms", watch.elapsed(TimeUnit.MILLISECONDS));
            response.setError(msg);
            logger.error(msg);
        }
    } else {
        logger.info("Returning bestResponse: {} in {} ms", response, watch.elapsed(TimeUnit.MILLISECONDS));
    }
    return response;
}

From source file:org.onosproject.store.trivial.impl.SimpleLinkStore.java

/**
 * Creates concurrent readable, synchronized HashMultimap.
 *
 * @return SetMultimap//from w w  w  . j  a v  a 2  s. c o  m
 */
private static <K, V> SetMultimap<K, V> createSynchronizedHashMultiMap() {
    return synchronizedSetMultimap(Multimaps.newSetMultimap(new ConcurrentHashMap<K, Collection<V>>(),
            () -> Sets.newConcurrentHashSet()));
}