List of usage examples for com.google.common.collect Sets newConcurrentHashSet
public static <E> Set<E> newConcurrentHashSet()
From source file:com.streamsets.pipeline.lib.jdbc.multithread.MultithreadedTableProvider.java
public MultithreadedTableProvider(Map<String, TableContext> tableContextMap, Queue<String> sortedTableOrder, Map<Integer, Integer> threadNumToMaxTableSlots, int numThreads, BatchTableStrategy batchTableStrategy) { this.tableContextMap = new ConcurrentHashMap<>(tableContextMap); initializeRemainingSchemasToTableContexts(); this.numThreads = numThreads; this.batchTableStrategy = batchTableStrategy; final Map<String, Integer> tableNameToOrder = new HashMap<>(); int order = 1; for (String tableName : sortedTableOrder) { tableNameToOrder.put(tableName, order++); }/* w w w. j av a 2 s. co m*/ int sharedPartitionQueueSize = 0; for (TableContext tableContext : tableContextMap.values()) { sharedPartitionQueueSize += maxNumActivePartitions(tableContext); } sharedAvailableTablesQueue = new ArrayBlockingQueue<TableRuntimeContext>( sharedPartitionQueueSize * SHARED_QUEUE_SIZE_FUDGE_FACTOR, true); this.sortedTableOrder = new ArrayDeque<>(sortedTableOrder); // always construct initial values for partition queue based on table contexts // if stored offsets come into play, those will be handled by a subsequent invocation generateInitialPartitionsInSharedQueue(false, null, null); this.tablesWithNoMoreData = Sets.newConcurrentHashSet(); this.threadNumToMaxTableSlots = threadNumToMaxTableSlots; }
From source file:org.onosproject.cpman.impl.ControlPlaneMonitor.java
@Activate public void activate() { cpuMetrics = genMDbBuilder(DEFAULT_RESOURCE, Type.CPU, CPU_METRICS); memoryMetrics = genMDbBuilder(DEFAULT_RESOURCE, Type.MEMORY, MEMORY_METRICS); controlMessageMap = Maps.newConcurrentMap(); diskMetricsMap = Maps.newConcurrentMap(); networkMetricsMap = Maps.newConcurrentMap(); cpuBuf = Maps.newConcurrentMap();/*from w w w. ja v a 2s . co m*/ memoryBuf = Maps.newConcurrentMap(); diskBuf = Maps.newConcurrentMap(); networkBuf = Maps.newConcurrentMap(); ctrlMsgBuf = Maps.newConcurrentMap(); availableResourceMap = Maps.newConcurrentMap(); availableDeviceIdSet = Sets.newConcurrentHashSet(); communicationService.<ControlMetricsRequest, ControlLoadSnapshot>addSubscriber(CONTROL_STATS, SERIALIZER::decode, this::handleMetricsRequest, SERIALIZER::encode); communicationService.<ControlResourceRequest, Set<String>>addSubscriber(CONTROL_RESOURCE, SERIALIZER::decode, this::handleResourceRequest, SERIALIZER::encode); log.info("Started"); }
From source file:com.android.builder.shrinker.Shrinker.java
private void buildGraph(Iterable<TransformInput> programInputs, Iterable<TransformInput> libraryInputs) throws IOException { final Set<T> virtualMethods = Sets.newConcurrentHashSet(); final Set<T> multipleInheritance = Sets.newConcurrentHashSet(); final Set<UnresolvedReference<T>> unresolvedReferences = Sets.newConcurrentHashSet(); readPlatformJar();/*from w ww. ja va2 s .co m*/ for (TransformInput input : libraryInputs) { for (File folder : getAllDirectories(input)) { for (final File classFile : getClassFiles(folder)) { mExecutor.execute(new Callable<Void>() { @Override public Void call() throws Exception { processLibraryClass(Files.toByteArray(classFile)); return null; } }); } } } for (TransformInput input : programInputs) { for (File folder : getAllDirectories(input)) { for (final File classFile : getClassFiles(folder)) { mExecutor.execute(new Callable<Void>() { @Override public Void call() throws Exception { processNewClassFile(classFile, virtualMethods, multipleInheritance, unresolvedReferences); return null; } }); } } } waitForAllTasks(); mGraph.allClassesAdded(); handleOverrides(virtualMethods); handleMultipleInheritance(multipleInheritance); resolveReferences(unresolvedReferences); waitForAllTasks(); mGraph.checkDependencies(); }
From source file:org.onosproject.store.consistent.impl.PartitionedDatabase.java
@Override public CompletableFuture<Set<String>> mapKeySet(String mapName) { checkState(isOpen.get(), DB_NOT_OPEN); Set<String> keySet = Sets.newConcurrentHashSet(); return CompletableFuture.allOf(partitions.stream().map(p -> p.mapKeySet(mapName).thenApply(keySet::addAll)) .toArray(CompletableFuture[]::new)).thenApply(v -> keySet); }
From source file:com.google.gerrit.lucene.AbstractLuceneIndex.java
AbstractLuceneIndex(Schema<V> schema, SitePaths sitePaths, Directory dir, String name, String subIndex, GerritIndexWriterConfig writerConfig, SearcherFactory searcherFactory) throws IOException { this.schema = schema; this.sitePaths = sitePaths; this.dir = dir; this.name = name; String index = Joiner.on('_').skipNulls().join(name, subIndex); IndexWriter delegateWriter;/*from w w w.j a v a2s . com*/ long commitPeriod = writerConfig.getCommitWithinMs(); if (commitPeriod < 0) { delegateWriter = new AutoCommitWriter(dir, writerConfig.getLuceneConfig()); } else if (commitPeriod == 0) { delegateWriter = new AutoCommitWriter(dir, writerConfig.getLuceneConfig(), true); } else { final AutoCommitWriter autoCommitWriter = new AutoCommitWriter(dir, writerConfig.getLuceneConfig()); delegateWriter = autoCommitWriter; autoCommitExecutor = new ScheduledThreadPoolExecutor(1, new ThreadFactoryBuilder().setNameFormat(index + " Commit-%d").setDaemon(true).build()); @SuppressWarnings("unused") // Error handling within Runnable. Future<?> possiblyIgnoredError = autoCommitExecutor.scheduleAtFixedRate(() -> { try { if (autoCommitWriter.hasUncommittedChanges()) { autoCommitWriter.manualFlush(); autoCommitWriter.commit(); } } catch (IOException e) { log.error("Error committing " + index + " Lucene index", e); } catch (OutOfMemoryError e) { log.error("Error committing " + index + " Lucene index", e); try { autoCommitWriter.close(); } catch (IOException e2) { log.error("SEVERE: Error closing " + index + " Lucene index after OOM;" + " index may be corrupted.", e); } } }, commitPeriod, commitPeriod, MILLISECONDS); } writer = new TrackingIndexWriter(delegateWriter); searcherManager = new WrappableSearcherManager(writer.getIndexWriter(), true, searcherFactory); notDoneNrtFutures = Sets.newConcurrentHashSet(); writerThread = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(1, new ThreadFactoryBuilder().setNameFormat(index + " Write-%d").setDaemon(true).build())); reopenThread = new ControlledRealTimeReopenThread<>(writer, searcherManager, 0.500 /* maximum stale age (seconds) */, 0.010 /* minimum stale age (seconds) */); reopenThread.setName(index + " NRT"); reopenThread.setPriority(Math.min(Thread.currentThread().getPriority() + 2, Thread.MAX_PRIORITY)); reopenThread.setDaemon(true); // This must be added after the reopen thread is created. The reopen thread // adds its own listener which copies its internally last-refreshed // generation to the searching generation. removeIfDone() depends on the // searching generation being up to date when calling // reopenThread.waitForGeneration(gen, 0), therefore the reopen thread's // internal listener needs to be called first. // TODO(dborowitz): This may have been fixed by // http://issues.apache.org/jira/browse/LUCENE-5461 searcherManager.addListener(new RefreshListener() { @Override public void beforeRefresh() throws IOException { } @Override public void afterRefresh(boolean didRefresh) throws IOException { for (NrtFuture f : notDoneNrtFutures) { f.removeIfDone(); } } }); reopenThread.start(); }
From source file:org.sakaiproject.nakamura.lite.storage.mem.MemoryStorageClient.java
private void addIndexValue(String keySpace, String columnFamily, String key, String columnKey, Object columnValue) {// ww w . j a v a 2 s. c om String indexKey = keyHash(keySpace, columnFamily, columnKey, columnValue); @SuppressWarnings("unchecked") Set<String> index = (Set<String>) store.get(indexKey); if (index == null) { index = Sets.newConcurrentHashSet(); store.put(indexKey, index); } index.add(rowHash(keySpace, columnFamily, key)); }
From source file:com.github.hilcode.versionator.DefaultPomParser.java
@Override public ImmutableList<Property> findProperties(final Document pom) { final Set<Property> properties = Sets.newConcurrentHashSet(); final NodeList propertiesNodeList = evaluateNodes(this.propertyExpr, pom); for (int i = 0; i < propertiesNodeList.getLength(); i++) { final Node propertyNode = propertiesNodeList.item(i); final Key key = Key.BUILDER.build(propertyNode.getNodeName()); final String value = propertyNode.getFirstChild() != null ? propertyNode.getFirstChild().getTextContent().trim() : ""; properties.add(Property.BUILDER.build(key, value)); }//from w w w .jav a2 s.c o m final List<Property> sortedProperties = Lists.newArrayList(properties); Collections.sort(sortedProperties); return ImmutableList.copyOf(sortedProperties); }
From source file:it.anyplace.sync.bep.BlockPusher.java
public FileUploadObserver pushFile(final DataSource dataSource, @Nullable FileInfo fileInfo, final String folder, final String path) { checkArgument(connectionHandler.hasFolder(folder), "supplied connection handler %s will not share folder %s", connectionHandler, folder); checkArgument(fileInfo == null || equal(fileInfo.getFolder(), folder)); checkArgument(fileInfo == null || equal(fileInfo.getPath(), path)); try {/*w ww.ja v a 2 s .c o m*/ final ExecutorService monitoringProcessExecutorService = Executors.newCachedThreadPool(); final long fileSize = dataSource.getSize(); final Set<String> sentBlocks = Sets.newConcurrentHashSet(); final AtomicReference<Exception> uploadError = new AtomicReference<>(); final AtomicBoolean isCompleted = new AtomicBoolean(false); final Object updateLock = new Object(); final Object listener = new Object() { @Subscribe public void handleRequestMessageReceivedEvent(RequestMessageReceivedEvent event) { BlockExchageProtos.Request request = event.getMessage(); if (equal(request.getFolder(), folder) && equal(request.getName(), path)) { try { final String hash = BaseEncoding.base16().encode(request.getHash().toByteArray()); logger.debug("handling block request = {}:{}-{} ({})", request.getName(), request.getOffset(), request.getSize(), hash); byte[] data = dataSource.getBlock(request.getOffset(), request.getSize(), hash); checkNotNull(data, "data not found for hash = %s", hash); final Future future = connectionHandler.sendMessage( Response.newBuilder().setCode(BlockExchageProtos.ErrorCode.NO_ERROR) .setData(ByteString.copyFrom(data)).setId(request.getId()).build()); monitoringProcessExecutorService.submit(new Runnable() { @Override public void run() { try { future.get(); sentBlocks.add(hash); synchronized (updateLock) { updateLock.notifyAll(); } //TODO retry on error, register error and throw on watcher } catch (InterruptedException ex) { //return and do nothing } catch (ExecutionException ex) { uploadError.set(ex); synchronized (updateLock) { updateLock.notifyAll(); } } } }); } catch (Exception ex) { logger.error("error handling block request", ex); connectionHandler.sendMessage(Response.newBuilder() .setCode(BlockExchageProtos.ErrorCode.GENERIC).setId(request.getId()).build()); uploadError.set(ex); synchronized (updateLock) { updateLock.notifyAll(); } } } } }; connectionHandler.getEventBus().register(listener); logger.debug("send index update for file = {}", path); final Object indexListener = new Object() { @Subscribe public void handleIndexRecordAquiredEvent(IndexHandler.IndexRecordAquiredEvent event) { if (equal(event.getFolder(), folder)) { for (FileInfo fileInfo : event.getNewRecords()) { if (equal(fileInfo.getPath(), path) && equal(fileInfo.getHash(), dataSource.getHash())) { //TODO check not invalid // sentBlocks.addAll(dataSource.getHashes()); isCompleted.set(true); synchronized (updateLock) { updateLock.notifyAll(); } } } } } }; if (indexHandler != null) { indexHandler.getEventBus().register(indexListener); } final IndexUpdate indexUpdate = sendIndexUpdate(folder, BlockExchageProtos.FileInfo.newBuilder().setName(path).setSize(fileSize) .setType(BlockExchageProtos.FileInfoType.FILE).addAllBlocks(dataSource.getBlocks()), fileInfo == null ? null : fileInfo.getVersionList()).getRight(); final FileUploadObserver messageUploadObserver = new FileUploadObserver() { @Override public void close() { logger.debug("closing upload process"); try { connectionHandler.getEventBus().unregister(listener); monitoringProcessExecutorService.shutdown(); if (indexHandler != null) { indexHandler.getEventBus().unregister(indexListener); } } catch (Exception ex) { } if (closeConnection && connectionHandler != null) { connectionHandler.close(); } if (indexHandler != null) { FileInfo fileInfo = indexHandler.pushRecord(indexUpdate.getFolder(), Iterables.getOnlyElement(indexUpdate.getFilesList())); logger.info("sent file info record = {}", fileInfo); } } @Override public double getProgress() { return isCompleted() ? 1d : sentBlocks.size() / ((double) dataSource.getHashes().size()); } @Override public String getProgressMessage() { return (Math.round(getProgress() * 1000d) / 10d) + "% " + sentBlocks.size() + "/" + dataSource.getHashes().size(); } @Override public boolean isCompleted() { // return sentBlocks.size() == dataSource.getHashes().size(); return isCompleted.get(); } @Override public double waitForProgressUpdate() throws InterruptedException { synchronized (updateLock) { updateLock.wait(); } if (uploadError.get() != null) { throw new RuntimeException(uploadError.get()); } return getProgress(); } @Override public DataSource getDataSource() { return dataSource; } }; return messageUploadObserver; } catch (Exception ex) { throw new RuntimeException(ex); } }
From source file:com.cinchapi.concourse.importer.cli.ImportCli.java
@Override protected void doTask() { final ImportOptions opts = (ImportOptions) options; final Set<Long> records; final Constructor<? extends Importer> constructor = getConstructor(opts.type); if (opts.data == null) { // Import data from stdin Importer importer = Reflection.newInstance(constructor, concourse); if (!opts.dynamic.isEmpty()) { importer.setParams(options.dynamic); }/* w ww . j a v a 2 s .c o m*/ if (importer instanceof Headered && !opts.header.isEmpty()) { ((Headered) importer).parseHeader(opts.header); } try { ConsoleReader reader = new ConsoleReader(); String line; records = Sets.newLinkedHashSet(); Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() { // Interactive import is ended when user presses CTRL + C, // so we need this shutdown hook to ensure that they get // feedback about the import before the JVM dies. @Override public void run() { if (options.verbose) { System.out.println(records); } System.out.println(Strings.format("Imported data into {} records", records.size())); } })); try { final AtomicBoolean lock = new AtomicBoolean(false); new Thread(new Runnable() { // If there is no input in // 100ms, assume that the // session is interactive (i.e. // not piped) and display a // prompt @Override public void run() { try { Thread.sleep(100); if (lock.compareAndSet(false, true)) { System.out.println("Importing from stdin. Press " + "CTRL + C when finished"); } } catch (InterruptedException e) { } } }).start(); while ((line = reader.readLine()) != null) { try { lock.set(true); records.addAll(importer.importString(line)); } catch (Exception e) { System.err.println(e); } } } catch (IOException e) { throw Throwables.propagate(e); } } catch (IOException e) { throw Throwables.propagate(e); } finally { try { TerminalFactory.get().restore(); } catch (Exception e) { throw Throwables.propagate(e); } } } else { String path = FileOps.expandPath(opts.data, getLaunchDirectory()); Collection<String> files = FileOps.isDirectory(path) ? scan(Paths.get(path)) : ImmutableList.of(path); Stopwatch watch = Stopwatch.createUnstarted(); if (files.size() > 1) { records = Sets.newConcurrentHashSet(); final Queue<String> filesQueue = (Queue<String>) files; List<Runnable> runnables = Lists.newArrayListWithCapacity(opts.numThreads); // Create just enough Runnables with instantiated Importers in // advance. Each of those Runnables will work until #filesQueue // is exhausted. opts.numThreads = Math.min(opts.numThreads, files.size()); for (int i = 0; i < opts.numThreads; ++i) { final Importer importer0 = Reflection.newInstance(constructor, i == 0 ? concourse : Concourse.connect(opts.host, opts.port, opts.username, opts.password, opts.environment)); if (!opts.dynamic.isEmpty()) { importer0.setParams(opts.dynamic); } if (importer0 instanceof Headered && !opts.header.isEmpty()) { ((Headered) importer0).parseHeader(opts.header); } runnables.add(new Runnable() { private final Importer importer = importer0; @Override public void run() { String file; while ((file = filesQueue.poll()) != null) { records.addAll(importer.importFile(file)); } } }); } ExecutorService executor = Executors.newFixedThreadPool(runnables.size()); System.out.println("Starting import..."); watch.start(); for (Runnable runnable : runnables) { executor.execute(runnable); } executor.shutdown(); try { if (!executor.awaitTermination(1, TimeUnit.MINUTES)) { while (!executor.isTerminated()) { System.out.print('.'); // block until all tasks are // completed and provide some // feedback to the user } } } catch (InterruptedException e) { throw Throwables.propagate(e); } } else { Importer importer = Reflection.newInstance(constructor, concourse); if (!opts.dynamic.isEmpty()) { importer.setParams(opts.dynamic); } if (importer instanceof Headered && !opts.header.isEmpty()) { ((Headered) importer).parseHeader(opts.header); } System.out.println("Starting import..."); watch.start(); records = importer.importFile(files.iterator().next()); } watch.stop(); long elapsed = watch.elapsed(TimeUnit.MILLISECONDS); double seconds = elapsed / 1000.0; if (options.verbose) { System.out.println(records); } System.out.println(MessageFormat.format("Imported data " + "into {0} records in {1} seconds", records.size(), seconds)); } }
From source file:com.oakhole.Generate.java
/** * ??@Entity//from w w w. jav a2 s . c o m * @param packageToScan * @return * @throws java.io.IOException */ private Set<Class<?>> autoScan(String packageToScan) throws IOException { Set<Class<?>> entities = Sets.newConcurrentHashSet(); Enumeration<URL> dirs = Thread.currentThread().getContextClassLoader() .getResources(packageToScan.replace('.', '/')); while (dirs.hasMoreElements()) { URL url = dirs.nextElement(); // ?@Entity if ("file".endsWith(url.getProtocol())) { String filePath = URLDecoder.decode(url.getFile(), "utf-8"); fetchEntities(packageToScan, filePath, entities); } } return entities; }