List of usage examples for com.google.common.collect Maps newConcurrentMap
public static <K, V> ConcurrentMap<K, V> newConcurrentMap()
From source file:org.icgc.dcc.download.server.config.CompletionServiceConfig.java
@Bean public Map<String, Future<String>> submittedJobs() { return Maps.newConcurrentMap(); }
From source file:org.artifactory.storage.binstore.service.providers.FileCacheBinaryProviderImpl.java
@Override public void initialize() { super.initialize(); lruCache = Maps.newConcurrentMap(); totalSize = new AtomicLong(0); maxTotalSize = getLongParam("maxSize", getStorageProperties().getBinaryProviderCacheMaxSize()); cacheCleanerSemaphore = new Semaphore(1); syncCacheEntries();/*from w w w . j a va 2 s . c o m*/ }
From source file:co.cask.cdap.data.stream.service.LocalStreamService.java
@Inject public LocalStreamService(StreamCoordinatorClient streamCoordinatorClient, StreamFileJanitorService janitorService, StreamMetaStore streamMetaStore, StreamAdmin streamAdmin, StreamWriterSizeCollector streamWriterSizeCollector, NotificationService notificationService, MetricStore metricStore) {//from ww w .jav a 2 s . co m super(streamCoordinatorClient, janitorService, streamWriterSizeCollector, metricStore); this.streamAdmin = streamAdmin; this.streamMetaStore = streamMetaStore; this.streamWriterSizeCollector = streamWriterSizeCollector; this.notificationService = notificationService; this.aggregators = Maps.newConcurrentMap(); }
From source file:org.sonatype.nexus.scheduling.internal.ThreadPoolTaskExecutorSPI.java
@Inject public ThreadPoolTaskExecutorSPI(final TaskFactory taskFactory) { this.taskFactory = checkNotNull(taskFactory); this.executorService = (ThreadPoolExecutor) Executors.newFixedThreadPool(15); this.tasks = Maps.newConcurrentMap(); this.taskFutures = Maps.newConcurrentMap(); }
From source file:co.cask.cdap.app.stream.DefaultStreamWriter.java
@Inject public DefaultStreamWriter(@Assisted("namespace") Id.Namespace namespace, @Assisted("owners") List<Id> owners, UsageRegistry usageRegistry, DiscoveryServiceClient discoveryServiceClient) { this.namespace = namespace; this.owners = owners; this.endpointStrategy = new RandomEndpointStrategy( discoveryServiceClient.discover(Constants.Service.STREAMS)); this.isStreamRegistered = Maps.newConcurrentMap(); this.usageRegistry = usageRegistry; }
From source file:com.facebook.buck.remoteexecution.event.listener.RemoteExecutionEventListener.java
public RemoteExecutionEventListener() { this.downloads = new AtomicInteger(0); this.donwloadBytes = new AtomicLong(0); this.uploads = new AtomicInteger(0); this.uploadBytes = new AtomicLong(0); this.totalBuildRules = new AtomicInteger(0); this.hasFirstRemoteActionStarted = new AtomicBoolean(false); localFallbackTotalExecutions = new AtomicInteger(0); localFallbackLocalExecutions = new AtomicInteger(0); localFallbackSuccessfulLocalExecutions = new AtomicInteger(0); this.actionStateCount = Maps.newConcurrentMap(); for (State state : RemoteExecutionActionEvent.State.values()) { actionStateCount.put(state, new AtomicInteger(0)); }//from w w w .j av a2 s.c o m }
From source file:se.svt.helios.serviceregistration.consul.ConsulServiceRegistrar.java
public ConsulServiceRegistrar(final ConsulClient consulClient, final String serviceCheckScript, final String serviceCheckInterval) { this.consulClient = consulClient; this.serviceCheckScript = serviceCheckScript; this.serviceCheckInterval = serviceCheckInterval; this.handles = Maps.newConcurrentMap(); this.endpoints = Sets.newConcurrentHashSet(); this.executor = MoreExecutors.getExitingScheduledExecutorService( (ScheduledThreadPoolExecutor) Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder().setNameFormat("consul-registrar-%d").build()), 0, TimeUnit.SECONDS); // If the Consul agent is restarted, all services will forgotten. Therefore we sync the // state between services known by this plugin and services registered in Consul. Runnable registrationRunnable = new Runnable() { @Override/*from w ww.ja v a2 s . c o m*/ public void run() { syncState(); } }; this.executor.scheduleAtFixedRate(registrationRunnable, CONSUL_UPDATE_INTERVAL, CONSUL_UPDATE_INTERVAL, TimeUnit.SECONDS); }
From source file:org.apache.kylin.dimension.DimensionEncodingFactory.java
private synchronized static void initFactoryMap() { if (factoryMap == null) { Map<Pair<String, Integer>, DimensionEncodingFactory> map = Maps.newConcurrentMap(); // built-in encodings, note dictionary is a special case {//from ww w.ja va 2s.c o m FixedLenDimEnc.Factory value = new FixedLenDimEnc.Factory(); map.put(Pair.newPair(FixedLenDimEnc.ENCODING_NAME, value.getCurrentVersion()), value); } { IntDimEnc.Factory value = new IntDimEnc.Factory(); map.put(Pair.newPair(IntDimEnc.ENCODING_NAME, value.getCurrentVersion()), value); } { IntegerDimEnc.Factory value = new IntegerDimEnc.Factory(); map.put(Pair.newPair(IntegerDimEnc.ENCODING_NAME, value.getCurrentVersion()), value); } { FixedLenHexDimEnc.Factory value = new FixedLenHexDimEnc.Factory(); map.put(Pair.newPair(FixedLenHexDimEnc.ENCODING_NAME, value.getCurrentVersion()), value); } { DateDimEnc.Factory value = new DateDimEnc.Factory(); map.put(Pair.newPair(DateDimEnc.ENCODING_NAME, value.getCurrentVersion()), value); } { TimeDimEnc.Factory value = new TimeDimEnc.Factory(); map.put(Pair.newPair(TimeDimEnc.ENCODING_NAME, value.getCurrentVersion()), value); } { BooleanDimEnc.Factory value = new BooleanDimEnc.Factory(); map.put(Pair.newPair(BooleanDimEnc.ENCODING_NAME, value.getCurrentVersion()), value); } // custom encodings String[] clsNames = KylinConfig.getInstanceFromEnv().getCubeDimensionCustomEncodingFactories(); for (String clsName : clsNames) { try { DimensionEncodingFactory factory = (DimensionEncodingFactory) ClassUtil.newInstance(clsName); map.put(Pair.newPair(factory.getSupportedEncodingName(), factory.getCurrentVersion()), factory); } catch (Exception ex) { logger.error("Failed to init dimension encoding factory " + clsName, ex); } } factoryMap = map; } }
From source file:org.smartdeveloperhub.harvesters.it.notification.CountingNotificationListener.java
CountingNotificationListener(final CountDownLatch expectedNotifications) { this.expectedNotifications = expectedNotifications; this.notifications = Maps.newConcurrentMap(); this.random = new Random(System.nanoTime()); }
From source file:com.cloudera.livy.client.local.driver.RemoteDriver.java
private RemoteDriver(String[] args) throws Exception { this.activeJobs = Maps.newConcurrentMap(); this.jcLock = new Object(); this.shutdownLock = new Object(); localTmpDir = Files.createTempDir(); SparkConf conf = new SparkConf(); String serverAddress = null;//from w w w .j a va 2 s . c o m int serverPort = -1; for (int idx = 0; idx < args.length; idx += 2) { String key = args[idx]; if (key.equals("--remote-host")) { serverAddress = getArg(args, idx); } else if (key.equals("--remote-port")) { serverPort = Integer.parseInt(getArg(args, idx)); } else if (key.equals("--client-id")) { conf.set(LocalConf.SPARK_CONF_PREFIX + CLIENT_ID.key, getArg(args, idx)); } else if (key.equals("--secret")) { conf.set(LocalConf.SPARK_CONF_PREFIX + CLIENT_SECRET.key, getArg(args, idx)); } else if (key.equals("--conf")) { String[] val = getArg(args, idx).split("[=]", 2); conf.set(val[0], val[1]); } else { throw new IllegalArgumentException("Invalid command line: " + Joiner.on(" ").join(args)); } } executor = Executors.newCachedThreadPool(); LOG.info("Connecting to: {}:{}", serverAddress, serverPort); LocalConf livyConf = new LocalConf(null); for (Tuple2<String, String> e : conf.getAll()) { if (e._1().startsWith(LocalConf.SPARK_CONF_PREFIX)) { String key = e._1().substring(LocalConf.SPARK_CONF_PREFIX.length()); livyConf.set(key, e._2()); LOG.debug("Remote Driver config: {} = {}", key, e._2()); } } String clientId = livyConf.get(CLIENT_ID); Preconditions.checkArgument(clientId != null, "No client ID provided."); String secret = livyConf.get(CLIENT_SECRET); Preconditions.checkArgument(secret != null, "No secret provided."); System.out.println("MAPCONF-->"); System.out.println(livyConf); this.egroup = new NioEventLoopGroup(livyConf.getInt(RPC_MAX_THREADS), new ThreadFactoryBuilder().setNameFormat("Driver-RPC-Handler-%d").setDaemon(true).build()); this.serializer = new Serializer(); this.protocol = new DriverProtocol(this, jcLock); // The RPC library takes care of timing out this. this.clientRpc = Rpc.createClient(livyConf, egroup, serverAddress, serverPort, clientId, secret, protocol) .get(); this.running = true; this.clientRpc.addListener(new Rpc.Listener() { @Override public void rpcClosed(Rpc rpc) { LOG.warn("Shutting down driver because RPC channel was closed."); shutdown(null); } }); try { long t1 = System.currentTimeMillis(); LOG.info("Starting Spark context at {}", t1); JavaSparkContext sc = new JavaSparkContext(conf); LOG.info("Spark context finished initialization in {}ms", System.currentTimeMillis() - t1); sc.sc().addSparkListener(new DriverSparkListener(this)); synchronized (jcLock) { jc = new JobContextImpl(sc, localTmpDir); jcLock.notifyAll(); } } catch (Exception e) { LOG.error("Failed to start SparkContext: " + e, e); shutdown(e); synchronized (jcLock) { jcLock.notifyAll(); } throw e; } synchronized (jcLock) { for (JobWrapper<?> job : jobQueue) { job.submit(executor); } jobQueue.clear(); } }