List of usage examples for java.util.concurrent ConcurrentSkipListMap ConcurrentSkipListMap
public ConcurrentSkipListMap()
From source file:org.apache.cassandra.db.index.SecondaryIndexManager.java
public SecondaryIndexManager(ColumnFamilyStore baseCfs) { indexesByColumn = new ConcurrentSkipListMap<>(); rowLevelIndexMap = new ConcurrentHashMap<>(); indexesByName = new ConcurrentHashMap<String, SecondaryIndex>(); allIndexes = indexesByName.values(); this.baseCfs = baseCfs; }
From source file:org.apache.hadoop.hbase.client.HTableMultiplexer.java
/** * //from www. ja va2 s . c o m * @param conf The HBaseConfiguration * @param perRegionServerBufferQueueSize determines the max number of the buffered Put ops * for each region server before dropping the request. */ public HTableMultiplexer(Configuration conf, int perRegionServerBufferQueueSize) throws ZooKeeperConnectionException { this.conf = conf; this.serverToBufferQueueMap = new ConcurrentHashMap<HRegionLocation, LinkedBlockingQueue<PutStatus>>(); this.serverToFlushWorkerMap = new ConcurrentHashMap<HRegionLocation, HTableFlushWorker>(); this.tableNameToHTableMap = new ConcurrentSkipListMap<TableName, HTable>(); this.retryNum = this.conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); this.perRegionServerBufferQueueSize = perRegionServerBufferQueueSize; }
From source file:at.uni_salzburg.cs.ckgroup.cpcc.engmap.EngMapServlet.java
public void init(ServletConfig servletConfig) throws ServletException { this.servletConfig = servletConfig; regdata = new ConcurrentSkipListMap<String, IRegistrationData>(); centralEngines = new ConcurrentSkipListSet<String>(); neighborZones = new ConcurrentSkipListSet<IZone>(); super.init(); myInit();//from w w w .ja v a2 s. com }
From source file:org.polymap.p4.atlas.ui.SearchContentProvider.java
/** * *//* w w w . ja va2 s. c om*/ protected void updateMap(IMap elm, int currentChildCount) { updateChildrenLoading(elm); ConcurrentMap<String, ILayer> children = new ConcurrentSkipListMap(); List<UIJob> jobs = new ArrayList(); for (ILayer layer : elm.layers) { UIJob job = UIJob.schedule(layer.label.get(), monitor -> { if (AtlasFeatureLayer.of(layer).get().isPresent()) { children.put(layer.label.get(), layer); } }); jobs.add(job); } ; jobs.forEach(job -> job.joinAndDispatch(5000)); updateChildren(elm, children.values().toArray(), currentChildCount); }
From source file:org.opennms.netmgt.selta.scheduler.LegacyScheduler.java
/** * Constructs a new instance of the scheduler. The maximum number of * executable threads is specified in the constructor. The executable * threads are part of a runnable thread pool where the scheduled runnables * are executed.//from w w w. j av a 2 s.com * * @param parent * String prepended to "Scheduler" to create fiber name * @param maxSize * The maximum size of the thread pool. */ public LegacyScheduler(String parent, int maxSize) { m_status = START_PENDING; m_runner = Executors.newFixedThreadPool(maxSize, new LogPreservingThreadFactory(getClass().getSimpleName(), maxSize, false)); m_queues = new ConcurrentSkipListMap<Long, PeekableFifoQueue<ReadyRunnable>>(); m_scheduled = 0; m_worker = null; }
From source file:com.rsegismont.androlife.common.utils.ImageCacher.java
/** * Initialize the cache, providing all parameters. * // ww w. j av a 2 s. c o m * @param cacheParams * The cache parameters to initialize the cache */ @SuppressLint("UseSparseArrays") private void init(ImageCacheParams cacheParams) { mCacheParams = cacheParams; // Set up memory cache if (mCacheParams.memoryCacheEnabled) { if (BuildConfig.DEBUG) { Log.d(TAG, "Memory cache created (size = " + mCacheParams.memCacheSize + ")"); } // If we're running on Honeycomb or newer, then if (Utils.hasHoneycomb()) { mReusableBitmaps = new ConcurrentSkipListMap<Integer, SoftReference<Bitmap>>(); } mMemoryCacheSize = new HashMap<String, Point>(); mMemoryCacheData = new LruCache<String, BitmapDrawable>(mCacheParams.memCacheSize) { /** * Notify the removed entry that is no longer being cached */ @Override protected void entryRemoved(boolean evicted, String key, BitmapDrawable oldValue, BitmapDrawable newValue) { if (RecyclingBitmapDrawable.class.isInstance(oldValue)) { // The removed entry is a recycling drawable, so notify // it // that it has been removed from the memory cache ((RecyclingBitmapDrawable) oldValue).setIsCached(false); } else { // The removed entry is a standard BitmapDrawable if (Utils.hasHoneycomb()) { // We're running on Honeycomb or later, so add the // bitmap // to a SoftRefrence set for possible use with // inBitmap later synchronized (mMemCacheLock) { if (evicted == true) { mReusableBitmaps.put(getBitmapSize(oldValue), new SoftReference<Bitmap>(oldValue.getBitmap())); } } } } } /** * Measure item size in kilobytes rather than units which is * more practical for a bitmap cache */ @Override protected int sizeOf(String key, BitmapDrawable value) { final int bitmapSize = getBitmapSize(value) / 1024; return bitmapSize == 0 ? 1 : bitmapSize; } }; } // By default the disk cache is not initialized here as it should be // initialized // on a separate thread due to disk access. if (cacheParams.initDiskCacheOnCreate) { // Set up disk cache initDiskCache(); } }
From source file:com.microsoft.wake.contrib.grouper.impl.AdaptiveSnowshovelGrouper.java
@Inject public AdaptiveSnowshovelGrouper(Combiner<OutType, K, V> c, Partitioner<K> p, Extractor<InType, K, V> ext, @Parameter(StageConfiguration.StageObserver.class) Observer<Tuple<Integer, OutType>> o, @Parameter(StageConfiguration.StageName.class) String stageName, @Parameter(InitialPeriod.class) long initialPeriod, @Parameter(MaxPeriod.class) long maxPeriod, @Parameter(MinPeriod.class) long minPeriod, @Parameter(Interval.class) long interval) throws InjectionException { super(stageName); this.c = c;// w ww . j a va2 s . c o m this.p = p; this.ext = ext; this.o = o; this.outputHandler = new OutputImpl<Long>(); this.outputDriver = new InitialDelayStage<Long>(outputHandler, 1, stageName + "-output"); this.doneHandler = ((InitialDelayStage<Long>) outputDriver).getDoneHandler(); register = new ConcurrentSkipListMap<>(); inputDone = false; this.inputObserver = this.new InputImpl(); this.sleeping = new AtomicInteger(); this.combiningMeter = new Meter(stageName); // there is no dependence from input finish to output start // The alternative placement of this event is in the first call to onNext, // but Output onNext already provides blocking outputDriver.onNext(new Long(initialPeriod)); prevAggregatedCount = 0; prevCombiningRate = currCombiningRate = 0.0; prevFlushingPeriod = 0; currFlushingPeriod = initialPeriod; prevAdjustedTime = startTime = System.nanoTime(); flushingPeriodInterval = interval; this.minPeriod = minPeriod; this.maxPeriod = maxPeriod; }
From source file:org.apache.hadoop.raid.RaidHistogram.java
public synchronized void initialize(ArrayList<Long> newWindows) { windows = newWindows;//from ww w . ja va 2 s.c o m Collections.sort(windows); points = new ConcurrentSkipListSet<Point>(); windowNum = windows.size(); totalPoints = new CounterArray(windowNum); totalFailedPaths = new CounterArray(windowNum); histo = new ConcurrentSkipListMap<Long, CounterArray>(); failedRecoveredFiles = new ConcurrentHashMap<String, AtomicInteger>(); }
From source file:org.apache.sling.models.impl.AdapterImplementations.java
/** * Add implementation mapping for the given adapter type. * @param adapterType Adapter type//from w w w . jav a2s . c om * @param implType Implementation type */ @SuppressWarnings("unchecked") public void add(Class<?> adapterType, Class<?> implType) { String key = adapterType.getName(); if (adapterType == implType) { modelClasses.put(key, new ModelClass(implType, sortedStaticInjectAnnotationProcessorFactories)); } else { // although we already use a ConcurrentMap synchronize explicitly because we apply non-atomic operations on it synchronized (adapterImplementations) { ConcurrentNavigableMap<String, ModelClass<?>> implementations = adapterImplementations.get(key); if (implementations == null) { // to have a consistent ordering independent of bundle loading use a ConcurrentSkipListMap that sorts by class name implementations = new ConcurrentSkipListMap<String, ModelClass<?>>(); adapterImplementations.put(key, implementations); } implementations.put(implType.getName(), new ModelClass(implType, sortedStaticInjectAnnotationProcessorFactories)); } } }
From source file:org.apache.hadoop.hbase.client.coprocessor.TimeseriesAggregationClient.java
/** * It gives the maximum value of a column for a given column family for the given range. In case * qualifier is null, a max of all values for the given family is returned. * @param table/* www . ja v a2s . c om*/ * @param ci * @param scan * @return max val ConcurrentSkipListMap<Long, R> (Will come as proto from region needs to be * passed out as ConcurrentSkipListMap) * @throws Throwable The caller is supposed to handle the exception as they are thrown & * propagated to it. */ public <R, S, P extends Message, Q extends Message, T extends Message> ConcurrentSkipListMap<Long, R> max( final HTable table, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan) throws Throwable { final TimeseriesAggregateRequest requestArg = validateArgAndGetPB(scan, ci, false, intervalSeconds, timestampSecondsMin, timestampSecondsMax, keyFilterPattern); class MaxCallBack implements Batch.Callback<TimeseriesAggregateResponse> { ConcurrentSkipListMap<Long, R> max = new ConcurrentSkipListMap<Long, R>(); ConcurrentSkipListMap<Long, R> getMax() { return max; } @Override public synchronized void update(byte[] region, byte[] row, TimeseriesAggregateResponse result) { List<TimeseriesAggregateResponseMapEntry> results = ((TimeseriesAggregateResponse) result) .getEntryList(); for (TimeseriesAggregateResponseMapEntry entry : results) { R candidate; if (entry.getValue().getFirstPartCount() > 0) { ByteString b = entry.getValue().getFirstPart(0); Q q = null; try { q = ProtobufUtil.getParsedGenericInstance(ci.getClass(), 3, b); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } candidate = ci.getCellValueFromProto(q); if (null != q) { if (max.containsKey(entry.getKey())) { R current = max.get(entry.getKey()); max.put(entry.getKey(), (current == null || (candidate != null && ci.compare(current, candidate) < 0)) ? candidate : current); } else { max.put(entry.getKey(), ci.getCellValueFromProto(q)); } } } } } } MaxCallBack aMaxCallBack = new MaxCallBack(); table.coprocessorService(TimeseriesAggregateService.class, scan.getStartRow(), scan.getStopRow(), new Batch.Call<TimeseriesAggregateService, TimeseriesAggregateResponse>() { @Override public TimeseriesAggregateResponse call(TimeseriesAggregateService instance) throws IOException { ServerRpcController controller = new ServerRpcController(); BlockingRpcCallback<TimeseriesAggregateResponse> rpcCallback = new BlockingRpcCallback<TimeseriesAggregateResponse>(); instance.getMax(controller, requestArg, rpcCallback); TimeseriesAggregateResponse response = rpcCallback.get(); if (controller.failedOnException()) { throw controller.getFailedOn(); } return response; } }, aMaxCallBack); return aMaxCallBack.getMax(); }