List of usage examples for java.util.concurrent ConcurrentLinkedQueue ConcurrentLinkedQueue
public ConcurrentLinkedQueue()
From source file:org.grails.datastore.mapping.core.AbstractSession.java
public void addPendingInsert(PendingInsert insert) { Collection<PendingInsert> inserts = pendingInserts.get(insert.getEntity()); if (inserts == null) { inserts = new ConcurrentLinkedQueue<PendingInsert>(); pendingInserts.put(insert.getEntity(), inserts); }/* w w w.j av a 2s . c o m*/ inserts.add(insert); }
From source file:graph.module.OntologyEdgeModule.java
/** * Gets all edges but the one given by the key. * /*from w ww .j av a 2 s. c o m*/ * @param node * The edges must include this node. * @param butEdgeKey * The key that is NOT added to the results. * @return A collection of edges that are indexed by node, but none from the * butEdgeKey (though they may be added if included under other * keys). */ public Collection<Edge> getAllButEdges(Node node, Object butEdgeKey) { MultiMap<Object, Edge> indexedEdges = relatedEdges_.get(node); if (indexedEdges == null) { return new ConcurrentLinkedQueue<>(); } Collection<Edge> edges = new HashSet<>(); for (Object key : indexedEdges.keySet()) { if (!key.equals(butEdgeKey)) { // Need to check same function level as butEdge if (StringUtils.countMatches((String) key, FUNC_SPLIT) == StringUtils .countMatches((String) butEdgeKey, FUNC_SPLIT)) edges.addAll(indexedEdges.get(key)); } } return edges; }
From source file:org.apache.streams.datasift.provider.DatasiftStreamProvider.java
@Override public void prepare(Object configurationObject) { this.interactions = new ConcurrentLinkedQueue<Interaction>(); this.clients = Maps.newHashMap(); this.mapper = StreamsDatasiftMapper.getInstance(); }
From source file:org.openhab.binding.powermax.internal.message.PowermaxCommManager.java
/** * Connect to the Powermax alarm system/*from w w w. j av a 2s .co m*/ * * @return true if connected or false if not */ public boolean open() { if (connector != null) { connector.open(); } lastSendMsg = null; msgQueue = new ConcurrentLinkedQueue<PowermaxBaseMessage>(); return isConnected(); }
From source file:org.grails.datastore.mapping.core.AbstractSession.java
public void addPendingUpdate(PendingUpdate update) { Collection<PendingUpdate> inserts = pendingUpdates.get(update.getEntity()); if (inserts == null) { inserts = new ConcurrentLinkedQueue<PendingUpdate>(); pendingUpdates.put(update.getEntity(), inserts); }/*from w w w. j a v a 2 s . c o m*/ inserts.add(update); }
From source file:io.druid.indexing.common.task.AppenderatorDriverRealtimeIndexTask.java
@JsonCreator public AppenderatorDriverRealtimeIndexTask(@JsonProperty("id") String id, @JsonProperty("resource") TaskResource taskResource, @JsonProperty("spec") RealtimeAppenderatorIngestionSpec spec, @JsonProperty("context") Map<String, Object> context, @JacksonInject ChatHandlerProvider chatHandlerProvider, @JacksonInject AuthorizerMapper authorizerMapper, @JacksonInject RowIngestionMetersFactory rowIngestionMetersFactory) { super(id == null ? makeTaskId(spec) : id, StringUtils.format("index_realtime_appenderator_%s", spec.getDataSchema().getDataSource()), taskResource, spec.getDataSchema().getDataSource(), context); this.spec = spec; this.pendingHandoffs = new ConcurrentLinkedQueue<>(); this.chatHandlerProvider = Optional.fromNullable(chatHandlerProvider); this.authorizerMapper = authorizerMapper; if (spec.getTuningConfig().getMaxSavedParseExceptions() > 0) { savedParseExceptions = new CircularBuffer<>(spec.getTuningConfig().getMaxSavedParseExceptions()); }/*from w w w.j a v a2s . co m*/ this.ingestionState = IngestionState.NOT_STARTED; this.rowIngestionMeters = rowIngestionMetersFactory.createRowIngestionMeters(); }
From source file:com.mobilehelix.appserver.push.PushManager.java
public void storeSession(String uniqueID, String combinedUser, PushReceiver newReceiver) { idMap.put(uniqueID, newReceiver);//w w w . j a va 2 s. c o m ConcurrentLinkedQueue<PushReceiver> receivers = this.userPushMap.get(combinedUser); if (receivers == null) { receivers = new ConcurrentLinkedQueue<>(); this.userPushMap.put(combinedUser, receivers); } receivers.add(newReceiver); }
From source file:org.glassfish.grizzly.memcached.pool.BaseObjectPoolBenchmark.java
@Test public void testBenchmarkingInMultiThreads() { final int threadCount = 1000; final int poolSize = 150; final KeyedPoolableObjectFactory<String, String> apacheFactory = new KeyedPoolableObjectFactory<String, String>() { private static final String VALUE_NAME = "value"; private int id; private int count; @Override/* w w w.j av a2 s . c o m*/ public synchronized String makeObject(String s) throws Exception { count++; return VALUE_NAME + ++id; } @Override public synchronized void destroyObject(String s, String s1) throws Exception { count--; } @Override public boolean validateObject(String s, String s1) { return true; } @Override public void activateObject(String s, String s1) throws Exception { } @Override public void passivateObject(String s, String s1) throws Exception { } }; final GenericKeyedObjectPool<String, String> apachePool = new GenericKeyedObjectPool<String, String>( apacheFactory, poolSize, GenericKeyedObjectPool.WHEN_EXHAUSTED_FAIL, 0, poolSize, poolSize, poolSize, false, false, 1000 * 60 * 60, 0, 1000 * 60 * 60, false); final ConcurrentLinkedQueue<String> borrowObjects = new ConcurrentLinkedQueue<String>(); final CountDownLatch startFlag = new CountDownLatch(1); final CountDownLatch finishFlag = new CountDownLatch(threadCount * 2); final AtomicInteger exceptionCnt = new AtomicInteger(); final AtomicInteger successCnt = new AtomicInteger(); for (int i = 0; i < threadCount; i++) { final Thread borrowThread = new Thread() { @Override public void run() { try { startFlag.await(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } for (int j = 0; j < 30; j++) { try { final String object = apachePool.borrowObject("key"); Assert.assertNotNull(object); successCnt.incrementAndGet(); Assert.assertTrue(borrowObjects.offer(object)); } catch (Exception ignore) { exceptionCnt.incrementAndGet(); } } finishFlag.countDown(); } }; borrowThread.start(); final Thread returnThread = new Thread() { @Override public void run() { try { startFlag.await(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } for (int j = 0; j < 30; j++) { try { final String object = borrowObjects.poll(); if (object != null) { apachePool.returnObject("key", object); } } catch (Exception e) { Assert.fail(e.getMessage()); } } finishFlag.countDown(); } }; returnThread.start(); } long startTime = System.currentTimeMillis(); startFlag.countDown(); try { finishFlag.await(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } logger.info("apache common-pool elapse = {}", (System.currentTimeMillis() - startTime)); try { logger.info("apache common-pool max gen-id = {}", apacheFactory.makeObject("key")); } catch (Exception ignore) { } logger.info("apache common-pool success counts = {}", successCnt.get()); logger.info("apache common-pool exception counts = {}", exceptionCnt.get()); try { apachePool.close(); } catch (Exception ignore) { } // grizzly final PoolableObjectFactory<String, String> grizzlyFactory = new PoolableObjectFactory<String, String>() { private static final String VALUE_NAME = "value"; private int id; private int count; @Override public synchronized String createObject(String s) throws Exception { count++; return VALUE_NAME + ++id; } @Override public synchronized void destroyObject(String s, String s1) throws Exception { count--; } @Override public boolean validateObject(String s, String s1) { return true; } }; final BaseObjectPool.Builder<String, String> builder = new BaseObjectPool.Builder<String, String>( grizzlyFactory); builder.disposable(false); builder.keepAliveTimeoutInSecs(-1); builder.borrowValidation(false); builder.returnValidation(false); builder.max(poolSize); builder.min(poolSize); final ObjectPool<String, String> grizzlyPool = builder.build(); final ConcurrentLinkedQueue<String> borrowObjects2 = new ConcurrentLinkedQueue<String>(); final CountDownLatch startFlag2 = new CountDownLatch(1); final CountDownLatch finishFlag2 = new CountDownLatch(threadCount * 2); final AtomicInteger exceptionCnt2 = new AtomicInteger(); final AtomicInteger successCnt2 = new AtomicInteger(); for (int i = 0; i < threadCount; i++) { final Thread borrowThread = new Thread() { @Override public void run() { try { startFlag2.await(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } for (int j = 0; j < 30; j++) { try { final String object = grizzlyPool.borrowObject("key", 0); Assert.assertNotNull(object); successCnt2.incrementAndGet(); Assert.assertTrue(borrowObjects2.offer(object)); } catch (Exception ignore) { exceptionCnt2.incrementAndGet(); } } finishFlag2.countDown(); } }; borrowThread.start(); final Thread returnThread = new Thread() { @Override public void run() { try { startFlag2.await(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } for (int j = 0; j < 30; j++) { try { final String object = borrowObjects2.poll(); if (object != null) { grizzlyPool.returnObject("key", object); } } catch (Exception e) { Assert.fail(e.getMessage()); } } finishFlag2.countDown(); } }; returnThread.start(); } startTime = System.currentTimeMillis(); startFlag2.countDown(); try { finishFlag2.await(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } logger.info("grizzly pool elapse = {}", (System.currentTimeMillis() - startTime)); try { logger.info("grizzly pool max gen-id = {}", grizzlyFactory.createObject("key")); } catch (Exception ignore) { } logger.info("grizzly pool success counts = {}", successCnt2.get()); logger.info("grizzly pool exception counts= {}", exceptionCnt2.get()); grizzlyPool.destroy(); }
From source file:com.linkedin.pinot.core.query.scheduler.PrioritySchedulerTest.java
@Test public void testMultiThreaded() throws InterruptedException { // add queries from multiple threads and verify that all those are executed PropertiesConfiguration conf = new PropertiesConfiguration(); conf.setProperty(ResourceManager.QUERY_WORKER_CONFIG_KEY, 60); conf.setProperty(ResourceManager.QUERY_RUNNER_CONFIG_KEY, 20); conf.setProperty(ResourceLimitPolicy.THREADS_PER_QUERY_PCT, 50); conf.setProperty(ResourceLimitPolicy.TABLE_THREADS_HARD_LIMIT, 60); conf.setProperty(ResourceLimitPolicy.TABLE_THREADS_SOFT_LIMIT, 40); conf.setProperty(MultiLevelPriorityQueue.MAX_PENDING_PER_GROUP_KEY, 10); final TestPriorityScheduler scheduler = TestPriorityScheduler.create(conf); scheduler.start();//from w w w.j a v a2 s . c om final Random random = new Random(); final ConcurrentLinkedQueue<ListenableFuture<byte[]>> results = new ConcurrentLinkedQueue<>(); final int numThreads = 3; final int queriesPerThread = 10; numQueries = new CountDownLatch(numThreads * queriesPerThread); for (int i = 0; i < numThreads; i++) { final int index = i; new Thread(new Runnable() { @Override public void run() { for (int j = 0; j < queriesPerThread; j++) { results.add(scheduler.submit(createServerQueryRequest(Integer.toString(index), metrics))); Uninterruptibles.sleepUninterruptibly(random.nextInt(100), TimeUnit.MILLISECONDS); } } }).start(); } numQueries.await(); scheduler.stop(); }
From source file:com.mellanox.r4h.DataXceiverServer.java
DataXceiverServer(DataNodeBridge dnBridge) throws URISyntaxException { super(new URI(String.format("rdma://%s", dnBridge.getDN().getDisplayName())), dnBridge.numOfMsgsToBind, dnBridge.msgInSize, dnBridge.msgOutSize, dnBridge.numSessionsLimit, 0, dnBridge.dynamicMsgAllocationAmount, !dnBridge.isForwardEnable); LOG.debug("After EventQueueHandler creation"); this.dnBridge = dnBridge; // TODO: remove debug DNexpoable and rename top DataNodeBridge this.threadGroup = new ThreadGroup("R4H Datanode Threads"); LOG.info("Creating DataXceiverServer - uri=" + uri); DataXceiverServer.DXSCallbacks dxsCbs = this.new DXSCallbacks(); LOG.trace("writePacketSize=" + dnBridge.getWritePacketSize()); URI workerUri = new URI(String.format("rdma://%s:0", this.uri.getHost())); this.spPool = new LinkedList<ServerPortalWorker>(); isForwardEnable = dnBridge.isForwardEnable; if (isForwardEnable) { this.sp = new ServerPortal(eqh, uri, dxsCbs, new WorkerCache.WorkerProvider() { @Override// w ww . j ava2 s .c o m public Worker getWorker() { return getNextFreePortalWorker(); } }); LOG.info("Using forward model"); int spwAmount = dnBridge.spwAmount; LOG.info(String.format("Starting ahead %d server portal worker with sessions limit of %d sessions each", spwAmount, dnBridge.numSessionsLimit)); for (int i = 1; i <= spwAmount; i++) { ServerPortalWorker spw = new ServerPortalWorker(workerUri, dnBridge.numOfMsgsToBind / spwAmount, dnBridge.msgInSize, dnBridge.msgOutSize, dnBridge.numSessionsLimit, i, dnBridge.dynamicMsgAllocationAmount); spw.start(); spPool.add(spw); LOG.info("Started new server portal worker thread: " + spw); } } else { this.sp = new ServerPortal(eqh, uri, dxsCbs); LOG.info("Using accept model"); LOG.info("Started a new worker thread: " + super.toString()); } auxThreadFactory = new ThreadFactoryBuilder().setNameFormat("r4h-auxillary-thread-%d").build(); ThreadFactory ioThreadFactory = new ThreadFactoryBuilder().setNameFormat("r4h-io-thread-%d").build(); LOG.info(String.format("Allocating ahead %d IO executors", dnBridge.numOfioExecutors)); this.ioExecutorPool = new LinkedList<R4HExecutor>(); for (int i = 0; i < dnBridge.numOfioExecutors; i++) { ioExecutorPool.add(new R4HExecutor(ioThreadFactory)); } LOG.info(String.format("Allocating ahead %d Auxillary executors", dnBridge.numOfAuxExecutors)); this.auxExecutorPool = new ConcurrentLinkedQueue<R4HExecutor>(); for (int i = 0; i < dnBridge.numOfAuxExecutors; i++) { auxExecutorPool.add(new R4HExecutor(auxThreadFactory)); } LOG.trace(this.toString()); }