List of usage examples for com.google.common.collect Maps newConcurrentMap
public static <K, V> ConcurrentMap<K, V> newConcurrentMap()
From source file:org.syncany.gui.tray.TrayIcon.java
public TrayIcon(Shell shell) { this.trayShell = shell; this.messages = new HashMap<String, String>(); this.eventBus = GuiEventBus.getInstance(); this.eventBus.register(this); this.syncing = new AtomicBoolean(false); this.clientSyncStatus = Maps.newConcurrentMap(); this.clientUploadFileSize = Maps.newConcurrentMap(); initInternationalization();//from ww w . ja v a 2 s. c om initAnimationThread(); initTrayImage(); }
From source file:org.apache.bookkeeper.stream.storage.impl.sc.ZkStorageContainerManager.java
public ZkStorageContainerManager(Endpoint myEndpoint, StorageConfiguration conf, ClusterMetadataStore clusterMetadataStore, StorageContainerRegistry registry, StatsLogger statsLogger) { super("zk-storage-container-manager", conf, statsLogger); this.endpoint = myEndpoint; this.metadataStore = clusterMetadataStore; this.registry = registry; this.executor = Executors.newSingleThreadScheduledExecutor( new ThreadFactoryBuilder().setNameFormat("zk-storage-container-manager").build()); this.liveContainers = Collections.synchronizedMap(Maps.newConcurrentMap()); this.pendingStartStopContainers = Collections.synchronizedSet(Sets.newConcurrentHashSet()); this.containerAssignmentMap = new ConcurrentLongHashMap<>(); this.clusterAssignmentMap = Maps.newHashMap(); // probe the containers every 1/2 of controller scheduling interval. this ensures the manager // can attempt to start containers before controller reassign them. this.probeInterval = Duration.ofMillis(conf.getClusterControllerScheduleIntervalMs() / 2); }
From source file:org.apache.drill.exec.memory.AccountorImpl.java
public AccountorImpl(DrillConfig config, boolean errorOnLeak, LimitConsumer context, AccountorImpl parent, long max, long preAllocated, boolean applyFragLimit) { // TODO: fix preallocation stuff this.errorOnLeak = errorOnLeak; AtomicRemainder parentRemainder = parent != null ? parent.remainder : null; this.parent = parent; boolean enableFragmentLimit; double fragmentMemOvercommitFactor; try {/*www . j a va 2 s . co m*/ enableFragmentLimit = config.getBoolean(ENABLE_FRAGMENT_MEMORY_LIMIT); fragmentMemOvercommitFactor = config.getDouble(FRAGMENT_MEM_OVERCOMMIT_FACTOR); } catch (Exception e) { enableFragmentLimit = DEFAULT_ENABLE_FRAGMENT_LIMIT; fragmentMemOvercommitFactor = DEFAULT_FRAGMENT_MEM_OVERCOMMIT_FACTOR; } this.enableFragmentLimit = enableFragmentLimit; this.fragmentMemOvercommitFactor = fragmentMemOvercommitFactor; this.applyFragmentLimit = applyFragLimit; this.remainder = new AtomicRemainder(errorOnLeak, parentRemainder, max, preAllocated, applyFragmentLimit); this.total = max; this.limitConsumer = context; this.fragmentLimit = this.total; // Allow as much as possible to start with; if (ENABLE_ACCOUNTING) { buffers = Maps.newConcurrentMap(); } else { buffers = null; } this.limitConsumers = new ArrayList<LimitConsumer>(); if (parent != null && parent.parent == null) { // Only add the fragment context to the fragment level accountor synchronized (this) { addLimitConsumer(this.limitConsumer); } } }
From source file:com.baidu.rigel.biplatform.ma.report.query.ReportRuntimeModel.java
/** * //ww w . j av a2 s.c om * * @param model * */ public ReportRuntimeModel(String reportModelId) { this.reportModelId = reportModelId; this.datas = Maps.newConcurrentMap(); queryActions = Maps.newConcurrentMap(); this.localContext = Maps.newConcurrentMap(); this.context = new QueryContext(); }
From source file:io.druid.sql.calcite.DruidSchema.java
@Inject public DruidSchema(final QuerySegmentWalker walker, final TimelineServerView serverView, final PlannerConfig config) { this.walker = Preconditions.checkNotNull(walker, "walker"); this.serverView = Preconditions.checkNotNull(serverView, "serverView"); this.config = Preconditions.checkNotNull(config, "config"); this.cacheExec = ScheduledExecutors.fixed(1, "DruidSchema-Cache-%d"); this.tables = Maps.newConcurrentMap(); }
From source file:things.connectors.xstream.XstreamConnector.java
@Override public Observable<? extends Thing<?>> findAllThings() { if (allThingsCache == null) { allThingsCache = Maps.newConcurrentMap(); Observable<? extends Thing<?>> obs = Observable.create((Subscriber<? super Thing<?>> subscriber) -> { new Thread(() -> { try { Files.walk(Paths.get(thingsFolder.toURI())) .filter((path) -> path.toString().endsWith(".thing")) .map(path -> assembleThing(path)).peek(t -> addElement(t)) .forEach(t -> subscriber.onNext(t)); subscriber.onCompleted(); } catch (IOException e) { throw new ThingRuntimeException("Could not read thing files.", e); }/* w w w . java 2 s . co m*/ }).start(); }); return obs; } else { Observable<? extends Thing<?>> obs = Observable.create((Subscriber<? super Thing<?>> subscriber) -> { for (String type : allThingsCache.keySet()) { for (String key : allThingsCache.get(type).keySet()) { for (Thing<?> thing : allThingsCache.get(type).get(key)) { subscriber.onNext(thing); } } } subscriber.onCompleted(); }); return obs; } }
From source file:com.spotify.helios.serviceregistration.skydns.SkyDnsServiceRegistrar.java
/** * @param etcdClient client to talk to etcd with. * @param timeToLiveSeconds how long entries in the discovery service should live. * @param format the hostname format.//from w ww. j a v a 2 s. c om */ public SkyDnsServiceRegistrar(final MiniEtcdClient etcdClient, final int timeToLiveSeconds, final String format) { this.etcdClient = Preconditions.checkNotNull(etcdClient); this.timeToLiveSeconds = timeToLiveSeconds; this.handles = Maps.newConcurrentMap(); this.executor = MoreExecutors.getExitingScheduledExecutorService( (ScheduledThreadPoolExecutor) Executors.newScheduledThreadPool(1, threadFactory), 0, SECONDS); // Dividing into thirds, since at least halves are necessary to ensure that the item doesn't // expire due to a slight delay, and went to thirds so that a single failure won't tank the // registration this.executor.scheduleAtFixedRate(registrationRunnable, timeToLiveSeconds / 3, timeToLiveSeconds / 3, SECONDS); this.srvFormat = format; }
From source file:org.axonframework.ext.hazelcast.samples.executor.AxonService.java
public AxonService(final String nodeName, final HazelcastInstance hzInstance) { m_hzInstance = hzInstance;/*from ww w .j a v a 2 s . c om*/ m_nodeName = nodeName; m_evtBusTer = null; m_connector = null; m_cmdBus = null; m_cmdGw = null; m_evtStore = null; m_evtBus = null; m_cache = HzAxon.cacheAdapter("default"); m_eventListeners = Sets.newHashSet(); m_eventHandlers = Maps.newConcurrentMap(); m_aggregates = Maps.newConcurrentMap(); }
From source file:com.facebook.buck.parser.DaemonicCellState.java
DaemonicCellState(Cell cell, int parsingThreads) { this.cell = cell; this.parsingThreads = parsingThreads; this.cellRoot = cell.getRoot(); this.buildFileDependents = HashMultimap.create(); this.targetsCornucopia = HashMultimap.create(); this.buildFileConfigs = new HashMap<>(); this.buildFileEnv = new HashMap<>(); this.allRawNodes = new ConcurrentMapCache<>(parsingThreads); this.typedNodeCaches = Maps.newConcurrentMap(); this.rawAndComputedNodesLock = new AutoCloseableReadWriteUpdateLock(); }
From source file:org.apache.giraph.ooc.OutOfCoreIOStatistics.java
/** * Constructor// w w w . j a v a 2s .co m * * @param conf configuration * @param numIOThreads number of disks/IO threads */ public OutOfCoreIOStatistics(ImmutableClassesGiraphConfiguration conf, int numIOThreads) { this.diskBandwidthEstimate = new AtomicLong( DISK_BANDWIDTH_ESTIMATE.get(conf) * (long) GiraphConstants.ONE_MB); this.maxHistorySize = IO_COMMAND_HISTORY_SIZE.get(conf); this.updateCoefficient = 1.0 / maxHistorySize; // Adding more entry to the capacity of the queue to have a wiggle room // if all IO threads are adding/removing entries from the queue this.commandHistory = new ArrayBlockingQueue<>(maxHistorySize + numIOThreads); this.aggregateStats = Maps.newConcurrentMap(); for (IOCommandType type : IOCommandType.values()) { aggregateStats.put(type, new StatisticsEntry(type, 0, 0, 0)); } }