List of usage examples for com.google.common.collect Sets newConcurrentHashSet
public static <E> Set<E> newConcurrentHashSet()
From source file:org.onosproject.store.consistent.impl.PartitionedDatabase.java
@Override public CompletableFuture<Set<String>> maps() { checkState(isOpen.get(), DB_NOT_OPEN); Set<String> mapNames = Sets.newConcurrentHashSet(); return CompletableFuture.allOf(partitions.stream().map(db -> db.maps().thenApply(mapNames::addAll)) .toArray(CompletableFuture[]::new)).thenApply(v -> mapNames); }
From source file:org.onosproject.cli.net.WipeOutCommand.java
private void wipeOutIntents() { print("Wiping intents"); IntentService intentService = get(IntentService.class); Set<Key> keysToWithdrawn = Sets.newConcurrentHashSet(); Set<Intent> intentsToWithdrawn = Tools.stream(intentService.getIntents()) .filter(intent -> intentService.getIntentState(intent.key()) != WITHDRAWN) .collect(Collectors.toSet()); intentsToWithdrawn.stream().map(Intent::key).forEach(keysToWithdrawn::add); CompletableFuture<Void> completableFuture = new CompletableFuture<>(); IntentListener listener = e -> {/* www . ja v a2 s .c o m*/ if (e.type() == IntentEvent.Type.WITHDRAWN) { keysToWithdrawn.remove(e.subject().key()); } if (keysToWithdrawn.isEmpty()) { completableFuture.complete(null); } }; intentService.addListener(listener); intentsToWithdrawn.forEach(intentService::withdraw); try { if (!intentsToWithdrawn.isEmpty()) { // Wait 1.5 seconds for each Intent completableFuture.get(intentsToWithdrawn.size() * 1500L, TimeUnit.MILLISECONDS); } } catch (InterruptedException | ExecutionException | TimeoutException e) { print("Encountered exception while withdrawing intents: " + e.toString()); } finally { intentService.removeListener(listener); } intentsToWithdrawn.forEach(intentService::purge); }
From source file:gobblin.metrics.reporter.ContextAwareReporter.java
public ContextAwareReporter(String name, Config config) { this.name = name; this.config = config; this.started = false; RootMetricContext.get().addNewReporter(this); this.notificationTargetUUID = RootMetricContext.get() .addNotificationTarget(new Function<Notification, Void>() { @Nullable/* w ww.ja v a2s .c o m*/ @Override public Void apply(Notification input) { notificationCallback(input); return null; } }); this.contextFilter = ContextFilterFactory.createContextFilter(config); this.contextsToReport = Sets.newConcurrentHashSet(); for (MetricContext context : this.contextFilter.getMatchingContexts()) { this.contextsToReport.add(context.getInnerMetricContext()); } }
From source file:com.android.build.gradle.shrinker.IncrementalShrinker.java
/** * Perform incremental shrinking, in the supported cases (where only code in pre-existing * methods has been modified).//w ww. j a v a2s. c o m * * <p>The general idea is this: for every method in modified classes, remove all outgoing * "code reference" edges, add them again based on the current code and then set the counters * again (traverse the graph) using the new set of edges. * * <p>The counters are re-calculated every time from scratch (starting from known entry points * from the config file) to avoid cycles being left in the output. * * @throws IncrementalRunImpossibleException If incremental shrinking is impossible and a full * run should be done instead. */ public void incrementalRun(@NonNull Iterable<TransformInput> inputs, @NonNull TransformOutputProvider output) throws IOException, IncrementalRunImpossibleException { final Set<T> classesToWrite = Sets.newConcurrentHashSet(); final Set<File> classFilesToDelete = Sets.newConcurrentHashSet(); final Set<PostProcessingData.UnresolvedReference<T>> unresolvedReferences = Sets.newConcurrentHashSet(); Stopwatch stopwatch = Stopwatch.createStarted(); SetMultimap<T, String> oldState = resetState(); logTime("resetState()", stopwatch); processInputs(inputs, classesToWrite, unresolvedReferences); logTime("processInputs", stopwatch); finishGraph(unresolvedReferences); logTime("finish graph", stopwatch); setCounters(CounterSet.SHRINK); logTime("set counters", stopwatch); chooseClassesToWrite(inputs, output, classesToWrite, classFilesToDelete, oldState); logTime("choose classes", stopwatch); updateClassFiles(classesToWrite, classFilesToDelete, inputs, output); logTime("update class files", stopwatch); mGraph.saveState(); logTime("save state", stopwatch); }
From source file:com.arpnetworking.metrics.mad.performance.FilePerfTestBase.java
/** * Runs a filter.// w ww. ja v a2 s . c o m * * @param pipelineConfigurationFile Pipeline configuration file. * @param duration Timeout period. * @param variables Substitution key-value pairs into pipeline configuration file. * @throws IOException if configuration cannot be loaded. */ protected void benchmark(final String pipelineConfigurationFile, final Duration duration, final ImmutableMap<String, String> variables) throws IOException { // Replace any variables in the configuration file String configuration = Resources.toString(Resources.getResource(pipelineConfigurationFile), Charsets.UTF_8); for (final Map.Entry<String, String> entry : variables.entrySet()) { configuration = configuration.replace(entry.getKey(), entry.getValue()); } // Load the specified stock configuration final PipelineConfiguration stockPipelineConfiguration = new StaticConfiguration.Builder() .addSource(new JsonNodeLiteralSource.Builder().setSource(configuration).build()) .setObjectMapper(PipelineConfiguration.createObjectMapper(_injector)).build() .getRequiredAs(PipelineConfiguration.class); // Canary tracking LOGGER.info(String.format("Expected canaries; periods=%s", stockPipelineConfiguration.getPeriods())); final CountDownLatch latch = new CountDownLatch(stockPipelineConfiguration.getPeriods().size()); final Set<Period> periods = Sets.newConcurrentHashSet(); // Create custom "canary" sink final ListeningSink sink = new ListeningSink((periodicData) -> { if (periodicData != null) { for (final String metricName : periodicData.getData().keys()) { if (TestFileGenerator.CANARY.equals(metricName)) { if (periods.add(periodicData.getPeriod())) { LOGGER.info(String.format("Canary flew; filter=%s, period=%s", this.getClass(), periodicData.getPeriod())); latch.countDown(); } } } } return null; }); // Add the custom "canary" sink final List<Sink> benchmarkSinks = Lists.newArrayList(stockPipelineConfiguration.getSinks()); benchmarkSinks.add(sink); // Create the custom configuration final PipelineConfiguration benchmarkPipelineConfiguration = OvalBuilder.<PipelineConfiguration, PipelineConfiguration.Builder>clone( stockPipelineConfiguration).setSinks(benchmarkSinks).build(); // Instantiate the pipeline final Pipeline pipeline = new Pipeline(benchmarkPipelineConfiguration); // Execute the pipeline until the canary flies the coop try { LOGGER.debug(String.format("Launching pipeline; configuration=%s", pipelineConfigurationFile)); final Stopwatch timer = Stopwatch.createUnstarted(); timer.start(); pipeline.launch(); if (!latch.await(duration.getMillis(), TimeUnit.MILLISECONDS)) { LOGGER.error("Test timed out"); throw new RuntimeException("Test timed out"); } timer.stop(); LOGGER.info(String.format("Performance filter result; filter=%s, seconds=%s", this.getClass(), timer.elapsed(TimeUnit.SECONDS))); } catch (final InterruptedException e) { Thread.interrupted(); throw new RuntimeException("Test interrupted"); } finally { pipeline.shutdown(); } }
From source file:io.wcm.caravan.jaxrs.publisher.impl.ServletContainerBridge.java
@Activate void activate(ComponentContext componentContext) { // bundle which contains the JAX-RS services bundle = (Bundle) componentContext.getProperties().get(PROPERTY_BUNDLE); bundleContext = bundle.getBundleContext(); // initialize component tracker to detect local and global JAX-RS components for current bundle localComponents = Sets.newConcurrentHashSet(); globalComponents = Sets.newConcurrentHashSet(); localComponentTracker = new JaxRsComponentTracker(); localComponentTracker.open();//from ww w. j ava 2s .c o m // initialize JAX-RS application and Jersey Servlet container application = new JaxRsApplication(localComponents, globalComponents); servletContainer = new ServletContainer(ResourceConfig.forApplication(application)); }
From source file:org.eclipse.sirius.business.internal.dialect.description.InterpretedExpressionQueryProviderRegistry.java
/** * Constructor./* w ww . j a v a 2 s. c o m*/ * * @param registry * the registry to look for registered providers. * @param context * the plug-in in the context of which we're running; used for * logging. */ public InterpretedExpressionQueryProviderRegistry(IExtensionRegistry registry, Plugin context) { this.registry = Preconditions.checkNotNull(registry); this.context = context; this.entries = Sets.newConcurrentHashSet(); }
From source file:org.onosproject.provider.nil.flow.impl.NullFlowRuleProvider.java
@Override public void executeBatch(FlowRuleBatchOperation batch) { Set<FlowEntry> flowRules = flowTable.getOrDefault(batch.deviceId(), Sets.newConcurrentHashSet()); for (FlowRuleBatchEntry fbe : batch.getOperations()) { switch (fbe.operator()) { case ADD: flowRules.add(new DefaultFlowEntry(fbe.target())); break; case REMOVE: flowRules.remove(new DefaultFlowEntry(fbe.target())); break; case MODIFY: FlowEntry entry = new DefaultFlowEntry(fbe.target()); flowRules.remove(entry);/*from w w w. j a v a 2 s . c o m*/ flowRules.add(entry); break; default: log.error("Unknown flow operation: {}", fbe); } } flowTable.put(batch.deviceId(), flowRules); providerService.batchOperationCompleted(batch.id(), new CompletedBatchOperation(true, Collections.emptySet(), batch.deviceId())); }
From source file:org.restcomm.media.control.mgcp.connection.AbstractMgcpConnection.java
public AbstractMgcpConnection(int identifier, int callId, int halfOpenTimeout, int openTimeout, MgcpEventProvider eventProvider, ListeningScheduledExecutorService executor) { // Connection State this.identifier = identifier; this.callIdentifier = callId; this.mode = ConnectionMode.INACTIVE; this.state = MgcpConnectionState.CLOSED; this.stateLock = new Object(); // Events// w ww . jav a2 s . c o m this.eventProvider = eventProvider; this.observers = Sets.newConcurrentHashSet(); // Timers this.executor = executor; this.timerFuture = null; this.halfOpenTimeout = halfOpenTimeout; this.timeout = openTimeout; }
From source file:io.druid.curator.inventory.CuratorInventoryManager.java
public CuratorInventoryManager(CuratorFramework curatorFramework, InventoryManagerConfig config, ExecutorService exec, CuratorInventoryManagerStrategy<ContainerClass, InventoryClass> strategy) { this.curatorFramework = curatorFramework; this.config = config; this.strategy = strategy; this.containers = new MapMaker().makeMap(); this.uninitializedInventory = Sets.newConcurrentHashSet(); //NOTE: cacheData is temporarily set to false and we get data directly from ZK on each event. //this is a workaround to solve curator's out-of-order events problem //https://issues.apache.org/jira/browse/CURATOR-191 this.cacheFactory = new SimplePathChildrenCacheFactory(false, true, new ShutdownNowIgnoringExecutorService(exec)); }