List of usage examples for java.util.concurrent ConcurrentLinkedQueue ConcurrentLinkedQueue
public ConcurrentLinkedQueue()
From source file:org.codelibs.fess.helper.SearchLogHelper.java
public void storeSearchLog() { if (!searchLogQueue.isEmpty()) { final Queue<SearchLog> queue = searchLogQueue; searchLogQueue = new ConcurrentLinkedQueue<>(); processSearchLogQueue(queue);/*from w w w .j ava 2 s.c o m*/ } if (!clickLogQueue.isEmpty()) { final Queue<ClickLog> queue = clickLogQueue; clickLogQueue = new ConcurrentLinkedQueue<>(); processClickLogQueue(queue); } }
From source file:com.cognifide.aet.executor.SuiteExecutor.java
private SuiteRunner createSuiteRunner(Suite suite) throws JMSException { Session session = jmsConnection.getJmsSession(); SuiteRunner suiteRunner = new SuiteRunner(session, cacheUpdater, suiteStatusHandler, suite, RUNNER_IN_QUEUE, messageReceiveTimeout);/*from w ww . ja v a 2s . co m*/ suiteRunnerCache.put(suite.getCorrelationId(), suiteRunner); suiteStatusCache.put(suite.getCorrelationId(), new ConcurrentLinkedQueue<SuiteStatusResult>()); return suiteRunner; }
From source file:com.chinamobile.bcbsp.comm.BDBMap.java
/** * When duplicates are allowed, this method return all values associated with * the key/*from w ww . j a v a2s .c o m*/ */ public ConcurrentLinkedQueue<IMessage> getDupilcates(K key) { String tmp; int indexOf$; Collection<V> valueSet = storedMap.duplicates(key); ConcurrentLinkedQueue<IMessage> list = new ConcurrentLinkedQueue<IMessage>(); Iterator<V> it = valueSet.iterator(); while (it.hasNext()) { // Note Need To Be Upgraded. IMessage tmpmsg = new BSPMessage(); tmp = it.next().toString(); indexOf$ = tmp.indexOf('$'); tmpmsg.fromString(tmp.substring(indexOf$ + 1)); list.add(tmpmsg); } storedMap.remove(key); bdbMapSize.addAndGet(-list.size()); return list; }
From source file:org.apache.streams.cassandra.CassandraPersistWriter.java
@Override public void prepare(Object configurationObject) { this.persistQueue = new ConcurrentLinkedQueue<>(); start(); }
From source file:org.hyperic.hq.product.JDBCMeasurementPlugin.java
protected Connection getCachedConnection(String url, String user, String pass) throws SQLException { String cacheKey = calculateKey(url, user, pass); Connection conn;/* w ww .j av a 2 s. co m*/ Queue<Connection> pool; synchronized (connectionPools) { pool = connectionPools.get(cacheKey); if (pool == null) { pool = new ConcurrentLinkedQueue<Connection>(); connectionPools.put(cacheKey, pool); log.debug("[getCC] Pool for '" + cacheKey + "' created"); } } int count = 0; while (((conn = pool.poll()) == null) && (count++ < 5)) { try { Thread.sleep(100); } catch (InterruptedException ex) { log.error(ex, ex); } } if (conn == null) { conn = getConnection(url, user, pass); log.debug("[getCC] Connection for '" + cacheKey + "' created (pool.size=" + pool.size() + ")"); } log.debug("[getCC] Connection for '" + cacheKey + "' used (pool.size=" + pool.size() + ")"); return conn; }
From source file:org.apache.hadoop.yarn.server.resourcemanager.PendingEventRetrieval.java
protected RMNode convertHopToRMNode(RMNodeComps hopRMNodeFull) throws InvalidProtocolBufferException { RMNode rmNode = null;/*from w w w. ja v a 2 s . co m*/ if (hopRMNodeFull != null) { NodeId nodeId = ConverterUtils.toNodeId(hopRMNodeFull.getHopRMNode().getNodeId()); //Retrieve and Initialize NodeBase for RMNode Node node = null; if (hopRMNodeFull.getHopRMNode().getNodeId() != null) { node = new NodeBase(hopRMNodeFull.getHopNode().getName(), hopRMNodeFull.getHopNode().getLocation()); if (hopRMNodeFull.getHopNode().getParent() != null) { node.setParent(new NodeBase(hopRMNodeFull.getHopNode().getParent())); } node.setLevel(hopRMNodeFull.getHopNode().getLevel()); } //Retrieve nextHeartbeat boolean nextHeartbeat = hopRMNodeFull.getHopNextHeartbeat().isNextheartbeat(); //Create Resource ResourceOption resourceOption = null; if (hopRMNodeFull.getHopResource() != null) { resourceOption = ResourceOption.newInstance( Resource.newInstance(hopRMNodeFull.getHopResource().getMemory(), hopRMNodeFull.getHopResource().getVirtualCores()), hopRMNodeFull.getHopRMNode().getOvercommittimeout()); } //Create RMNode from HopRMNode rmNode = new RMNodeImpl(nodeId, rmContext, hopRMNodeFull.getHopRMNode().getHostName(), hopRMNodeFull.getHopRMNode().getCommandPort(), hopRMNodeFull.getHopRMNode().getHttpPort(), node, resourceOption, hopRMNodeFull.getHopRMNode().getNodemanagerVersion(), hopRMNodeFull.getHopRMNode().getHealthReport(), hopRMNodeFull.getHopRMNode().getLastHealthReportTime(), nextHeartbeat, conf.getBoolean(YarnConfiguration.HOPS_DISTRIBUTED_RT_ENABLED, YarnConfiguration.DEFAULT_HOPS_DISTRIBUTED_RT_ENABLED)); ((RMNodeImpl) rmNode).setState(hopRMNodeFull.getHopRMNode().getCurrentState()); // *** Recover maps/lists of RMNode *** //1. Recover JustLaunchedContainers List<JustLaunchedContainers> hopJlcList = hopRMNodeFull.getHopJustLaunchedContainers(); if (hopJlcList != null && !hopJlcList.isEmpty()) { Map<org.apache.hadoop.yarn.api.records.ContainerId, org.apache.hadoop.yarn.api.records.ContainerStatus> justLaunchedContainers = new HashMap<org.apache.hadoop.yarn.api.records.ContainerId, org.apache.hadoop.yarn.api.records.ContainerStatus>(); for (JustLaunchedContainers hop : hopJlcList) { //Create ContainerId org.apache.hadoop.yarn.api.records.ContainerId cid = ConverterUtils .toContainerId(hop.getContainerId()); //Find and create ContainerStatus ContainerStatus hopContainerStatus = hopRMNodeFull.getHopContainersStatus() .get(hop.getContainerId()); org.apache.hadoop.yarn.api.records.ContainerStatus conStatus = org.apache.hadoop.yarn.api.records.ContainerStatus .newInstance(cid, ContainerState.valueOf(hopContainerStatus.getState()), hopContainerStatus.getDiagnostics(), hopContainerStatus.getExitstatus()); justLaunchedContainers.put(cid, conStatus); } ((RMNodeImpl) rmNode).setJustLaunchedContainers(justLaunchedContainers); } //2. Return ContainerIdToClean List<ContainerId> cidToCleanList = hopRMNodeFull.getHopContainerIdsToClean(); if (cidToCleanList != null && !cidToCleanList.isEmpty()) { Set<org.apache.hadoop.yarn.api.records.ContainerId> containersToClean = new TreeSet<org.apache.hadoop.yarn.api.records.ContainerId>(); for (ContainerId hop : cidToCleanList) { //Create ContainerId containersToClean.add(ConverterUtils.toContainerId(hop.getContainerId())); } ((RMNodeImpl) rmNode).setContainersToClean(containersToClean); } //3. Finished Applications List<FinishedApplications> hopFinishedAppsList = hopRMNodeFull.getHopFinishedApplications(); if (hopFinishedAppsList != null && !hopFinishedAppsList.isEmpty()) { List<ApplicationId> finishedApps = new ArrayList<ApplicationId>(); for (FinishedApplications hop : hopFinishedAppsList) { finishedApps.add(ConverterUtils.toApplicationId(hop.getApplicationId())); } ((RMNodeImpl) rmNode).setFinishedApplications(finishedApps); } //4. UpdadedContainerInfo //Retrieve all UpdatedContainerInfo entries for this particular RMNode Map<Integer, List<UpdatedContainerInfo>> hopUpdatedContainerInfoMap = hopRMNodeFull .getHopUpdatedContainerInfo(); if (hopUpdatedContainerInfoMap != null && !hopUpdatedContainerInfoMap.isEmpty()) { ConcurrentLinkedQueue<org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo> updatedContainerInfoQueue = new ConcurrentLinkedQueue<org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo>(); for (int uciId : hopUpdatedContainerInfoMap.keySet()) { for (UpdatedContainerInfo hopUCI : hopUpdatedContainerInfoMap.get(uciId)) { List<org.apache.hadoop.yarn.api.records.ContainerStatus> newlyAllocated = new ArrayList<org.apache.hadoop.yarn.api.records.ContainerStatus>(); List<org.apache.hadoop.yarn.api.records.ContainerStatus> completed = new ArrayList<org.apache.hadoop.yarn.api.records.ContainerStatus>(); //Retrieve containerstatus entries for the particular updatedcontainerinfo org.apache.hadoop.yarn.api.records.ContainerId cid = ConverterUtils .toContainerId(hopUCI.getContainerId()); ContainerStatus hopContainerStatus = hopRMNodeFull.getHopContainersStatus() .get(hopUCI.getContainerId()); org.apache.hadoop.yarn.api.records.ContainerStatus conStatus = org.apache.hadoop.yarn.api.records.ContainerStatus .newInstance(cid, ContainerState.valueOf(hopContainerStatus.getState()), hopContainerStatus.getDiagnostics(), hopContainerStatus.getExitstatus()); //Check ContainerStatus state to add it to appropriate list if (conStatus != null) { if (conStatus.getState().toString() .equals(TablesDef.ContainerStatusTableDef.STATE_RUNNING)) { newlyAllocated.add(conStatus); } else if (conStatus.getState().toString() .equals(TablesDef.ContainerStatusTableDef.STATE_COMPLETED)) { completed.add(conStatus); } } org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo uci = new org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo( newlyAllocated, completed, hopUCI.getUpdatedContainerInfoId()); updatedContainerInfoQueue.add(uci); ((RMNodeImpl) rmNode).setUpdatedContainerInfo(updatedContainerInfoQueue); //Update uci counter ((RMNodeImpl) rmNode).setUpdatedContainerInfoId(hopRMNodeFull.getHopRMNode().getUciId()); } } } //5. Retrieve latestNodeHeartBeatResponse NodeHBResponse hopHB = hopRMNodeFull.getHopNodeHBResponse(); if (hopHB != null && hopHB.getResponse() != null) { NodeHeartbeatResponse hb = new NodeHeartbeatResponsePBImpl( YarnServerCommonServiceProtos.NodeHeartbeatResponseProto.parseFrom(hopHB.getResponse())); ((RMNodeImpl) rmNode).setLatestNodeHBResponse(hb); } } return rmNode; }
From source file:metlos.executors.batch.BatchExecutorTest.java
private void runSimpleDelayTest(int nofThreads) throws Exception { final ConcurrentLinkedQueue<Long> executionTimes = new ConcurrentLinkedQueue<Long>(); Runnable task = new Runnable() { @Override/*from ww w . j a v a 2 s . c om*/ public void run() { executionTimes.add(System.currentTimeMillis()); } }; BatchExecutor ex = getExecutor(nofThreads); //start running my task... the task should "take" 0ms and there should be a delay //of 10ms between executions... the executionTimes collection should therefore //contain time stamps 10ms apart from each other. ex.submitWithPreferedDurationAndFixedDelay(Collections.singleton(task), 0, 0, 10, TimeUnit.MILLISECONDS); Thread.sleep(1000); ex.shutdown(); assert executionTimes.size() > 1 : "There should have been more than 1 task executed."; long minDelay = 8; //10ms +- 20% long maxDelay = 12; int nofElements = executionTimes.size(); long previousTime = executionTimes.poll(); long cummulativeDiff = 0; while (!executionTimes.isEmpty()) { long thisTime = executionTimes.poll(); long diff = thisTime - previousTime; cummulativeDiff += diff; previousTime = thisTime; } long averageDelay = cummulativeDiff / (nofElements - 1); assert minDelay < averageDelay && averageDelay < maxDelay : "The average delay should be in <" + minDelay + ", " + maxDelay + "> but was " + averageDelay + "."; }
From source file:org.sakaiproject.nakamura.http.qos.QoSFilter.java
/** * @param componentContext/*from www .j a va2 s . com*/ * @throws ServletException */ @SuppressWarnings("unchecked") @Activate protected void activate(ComponentContext componentContext) throws ServletException { Dictionary<String, Object> properties = componentContext.getProperties(); long defaultTimeout = PropertiesUtil.toInteger(properties.get(QOS_TIMEOUT_CONFIG), -1); int maxPriorityNumber = PropertiesUtil.toInteger(properties.get(QOS_MAX_PRIORITY_CONF), 2); priorityQueue = new Queue[maxPriorityNumber + 1]; for (int i = 0; i < priorityQueue.length; i++) { priorityQueue[i] = new ConcurrentLinkedQueue<Continuation>(); } // path, max requests, priority, timeout qoSControMap.clear(); String[] qosLocations = PropertiesUtil.toStringArray(properties.get(QOS_CATEGORIES_CONFIG)); if (qosLocations != null) { for (String qosLocation : qosLocations) { String[] settings = StringUtils.split(qosLocation, ";"); if (settings != null) { if (settings.length > 3) { qoSControMap.put(settings[0], new QoSControl(priorityQueue, Integer.parseInt(settings[1]), Integer.parseInt(settings[2]), Long.parseLong(settings[3]))); } else if (settings.length > 2) { qoSControMap.put(settings[0], new QoSControl(priorityQueue, Integer.parseInt(settings[1]), Integer.parseInt(settings[2]), defaultTimeout)); } else if (settings.length > 1) { qoSControMap.put(settings[0], new QoSControl(priorityQueue, Integer.parseInt(settings[1]), maxPriorityNumber, defaultTimeout)); } } } } // defaults int qosDefaultPriority = PropertiesUtil.toInteger(properties.get(QOS_DEFAULT_PRIORITY_CONF), 0); int qosDefaultLimit = PropertiesUtil.toInteger(properties.get(QOS_DEFAULT_LIMIT_CONF), 10); long qosDefaultTimeout = PropertiesUtil.toLong(properties.get(QOS_DEFAULT_REQUEST_TIMEOUT_CONF), defaultTimeout); waitMs = PropertiesUtil.toLong(properties.get(QOS_SEMAPHOREWAIT_CONF), 50); defaultQoSControl = new QoSControl(priorityQueue, qosDefaultLimit, qosDefaultPriority, qosDefaultTimeout); int filterPriority = PropertiesUtil.toInteger(properties.get(FILTER_PRIORITY_CONF), 10); extHttpService.registerFilter(this, ".*", null, filterPriority, null); }
From source file:org.apache.bookkeeper.client.LedgerHandle.java
LedgerHandle(ClientContext clientCtx, long ledgerId, Versioned<LedgerMetadata> versionedMetadata, BookKeeper.DigestType digestType, byte[] password, EnumSet<WriteFlag> writeFlags) throws GeneralSecurityException, NumberFormatException { this.clientCtx = clientCtx; this.versionedMetadata = versionedMetadata; this.pendingAddOps = new ConcurrentLinkedQueue<PendingAddOp>(); this.writeFlags = writeFlags; LedgerMetadata metadata = versionedMetadata.getValue(); if (metadata.isClosed()) { lastAddConfirmed = lastAddPushed = metadata.getLastEntryId(); length = metadata.getLength();/*w w w. j a v a2 s . c o m*/ } else { lastAddConfirmed = lastAddPushed = INVALID_ENTRY_ID; length = 0; } this.pendingAddsSequenceHead = lastAddConfirmed; this.ledgerId = ledgerId; if (clientCtx.getConf().enableStickyReads && getLedgerMetadata().getEnsembleSize() == getLedgerMetadata().getWriteQuorumSize()) { stickyBookieIndex = clientCtx.getPlacementPolicy().getStickyReadBookieIndex(metadata, Optional.empty()); } else { stickyBookieIndex = -1; } if (clientCtx.getConf().throttleValue > 0) { this.throttler = RateLimiter.create(clientCtx.getConf().throttleValue); } else { this.throttler = null; } macManager = DigestManager.instantiate(ledgerId, password, BookKeeper.DigestType.toProtoDigestType(digestType), clientCtx.getByteBufAllocator(), clientCtx.getConf().useV2WireProtocol); // If the password is empty, pass the same random ledger key which is generated by the hash of the empty // password, so that the bookie can avoid processing the keys for each entry this.ledgerKey = DigestManager.generateMasterKey(password); distributionSchedule = new RoundRobinDistributionSchedule(metadata.getWriteQuorumSize(), metadata.getAckQuorumSize(), metadata.getEnsembleSize()); this.bookieFailureHistory = CacheBuilder.newBuilder() .expireAfterWrite(clientCtx.getConf().bookieFailureHistoryExpirationMSec, TimeUnit.MILLISECONDS) .build(new CacheLoader<BookieSocketAddress, Long>() { @Override public Long load(BookieSocketAddress key) { return -1L; } }); this.bookiesHealthInfo = new BookiesHealthInfo() { @Override public long getBookieFailureHistory(BookieSocketAddress bookieSocketAddress) { Long lastFailure = bookieFailureHistory.getIfPresent(bookieSocketAddress); return lastFailure == null ? -1L : lastFailure; } @Override public long getBookiePendingRequests(BookieSocketAddress bookieSocketAddress) { return clientCtx.getBookieClient().getNumPendingRequests(bookieSocketAddress, ledgerId); } }; ensembleChangeCounter = clientCtx.getClientStats().getEnsembleChangeCounter(); lacUpdateHitsCounter = clientCtx.getClientStats().getLacUpdateHitsCounter(); lacUpdateMissesCounter = clientCtx.getClientStats().getLacUpdateMissesCounter(); clientChannelWriteWaitStats = clientCtx.getClientStats().getClientChannelWriteWaitLogger(); clientCtx.getClientStats().registerPendingAddsGauge(new Gauge<Integer>() { @Override public Integer getDefaultValue() { return 0; } @Override public Integer getSample() { return pendingAddOps.size(); } }); initializeWriteHandleState(); }
From source file:org.glassfish.jersey.examples.sseitemstore.jaxrs.JaxrsItemStoreResourceTest.java
/** * Test the {@link SseEventSource} reconnect feature. * * @throws Exception in case of a test failure. *//*from w w w .j a va2 s.c om*/ @Test public void testEventSourceReconnect() throws Exception { final WebTarget itemsTarget = target("items"); final CountDownLatch latch = new CountDownLatch(MAX_ITEMS * MAX_LISTENERS * 2); // countdown only on new item events final List<Queue<String>> receivedQueues = new ArrayList<>(MAX_LISTENERS); final SseEventSource[] sources = new SseEventSource[MAX_LISTENERS]; for (int i = 0; i < MAX_LISTENERS; i++) { final int id = i; final SseEventSource es = SseEventSource.target(itemsTarget.path("events")) .reconnectingEvery(1, TimeUnit.MILLISECONDS).build(); sources[id] = es; final Queue<String> received = new ConcurrentLinkedQueue<>(); receivedQueues.add(received); es.register(inboundEvent -> { try { if (null == inboundEvent.getName()) { final String data = inboundEvent.readData(); LOGGER.info("[-i-] SOURCE " + id + ": Received event id=" + inboundEvent.getId() + " data=" + data); received.add(data); latch.countDown(); } } catch (Exception ex) { LOGGER.log(Level.SEVERE, "[-x-] SOURCE " + id + ": Error getting event data.", ex); received.add("[data processing error]"); } }); } final String[] postedItems = new String[MAX_ITEMS * 2]; try { open(sources); for (int i = 0; i < MAX_ITEMS; i++) { final String item = String.format("round-1-%02d", i); postItem(itemsTarget, item); postedItems[i] = item; sendCommand(itemsTarget, "disconnect"); Thread.sleep(200); } final int reconnectDelay = 1; sendCommand(itemsTarget, "reconnect " + reconnectDelay); sendCommand(itemsTarget, "disconnect"); Thread.sleep(reconnectDelay * 1000); for (int i = 0; i < MAX_ITEMS; i++) { final String item = String.format("round-2-%02d", i); postedItems[i + MAX_ITEMS] = item; postItem(itemsTarget, item); } sendCommand(itemsTarget, "reconnect now"); assertTrue("Waiting to receive all events has timed out.", latch.await( (1 + MAX_LISTENERS * (MAX_ITEMS + 1) * reconnectDelay) * getAsyncTimeoutMultiplier(), TimeUnit.SECONDS)); // need to force disconnect on server in order for EventSource.close(...) to succeed with HttpUrlConnection sendCommand(itemsTarget, "disconnect"); } finally { close(sources); } final String storedItems = itemsTarget.request().get(String.class); for (String item : postedItems) { assertThat("Posted item '" + item + "' stored on server", storedItems, containsString(item)); } int sourceId = 0; for (Queue<String> queue : receivedQueues) { assertThat("Received events in source " + sourceId, queue, describedAs("Collection containing %0", hasItems(postedItems), Arrays.asList(postedItems).toString())); assertThat("Size of received queue for source " + sourceId, queue.size(), equalTo(postedItems.length)); sourceId++; } }