List of usage examples for java.util.concurrent LinkedBlockingQueue take
public E take() throws InterruptedException
From source file:Test.java
public static void main(String[] args) throws Exception { LinkedBlockingQueue<String> queue = new LinkedBlockingQueue<String>(); FileOutputStream fos = new FileOutputStream("out.log"); DataOutputStream dos = new DataOutputStream(fos); while (!queue.isEmpty()) { dos.writeUTF(queue.take()); }//from w w w . j a v a 2s . com }
From source file:com.inmobi.messaging.consumer.util.TestUtil.java
public static void assertBuffer(StreamFile file, int fileNum, int startIndex, int numMsgs, PartitionId pid, LinkedBlockingQueue<QueueEntry> buffer, boolean isDatabusData, Map<Integer, PartitionCheckpoint> expectedDeltaPck) throws InterruptedException, IOException { int fileIndex = (fileNum - 1) * 100; for (int i = startIndex; i < (startIndex + numMsgs); i++) { QueueEntry entry = buffer.take(); Assert.assertEquals(entry.getPartitionId(), pid); if (entry.getMessageChkpoint() instanceof DeltaPartitionCheckPoint) { int min = Integer.parseInt(new Path(file.toString()).getParent().getName()); Map<Integer, PartitionCheckpoint> actualDeltaPck = ((DeltaPartitionCheckPoint) entry .getMessageChkpoint()).getDeltaCheckpoint(); // get expected delta pck expectedDeltaPck = new DeltaPartitionCheckPoint(file, i + 1, min, expectedDeltaPck) .getDeltaCheckpoint(); // assert on expected and actual delta pck Assert.assertEquals(actualDeltaPck, expectedDeltaPck); expectedDeltaPck.clear();//from w ww. ja v a 2 s .co m } else { Assert.assertEquals(entry.getMessageChkpoint(), new PartitionCheckpoint(file, i + 1)); } if (isDatabusData) { Assert.assertEquals(new String(((Message) entry.getMessage()).getData().array()), MessageUtil.constructMessage(fileIndex + i)); } else { Assert.assertEquals(MessageUtil.getTextMessage(((Message) entry.getMessage()).getData().array()), new Text(MessageUtil.constructMessage(fileIndex + i))); } } }
From source file:cn.wanghaomiao.seimi.def.DefaultLocalQueue.java
@Override public Request bPop(String crawlerName) { try {//from w w w . ja v a 2s . c o m LinkedBlockingQueue<Request> queue = getQueue(crawlerName); return queue.take(); } catch (InterruptedException e) { logger.error(e.getMessage(), e); } return null; }
From source file:com.twitter.distributedlog.admin.DistributedLogAdmin.java
private static Map<String, StreamCandidate> checkStreams( final com.twitter.distributedlog.DistributedLogManagerFactory factory, final Collection<String> streams, final ExecutorService executorService, final BookKeeperClient bkc, final String digestpw, final int concurrency) throws IOException { final LinkedBlockingQueue<String> streamQueue = new LinkedBlockingQueue<String>(); streamQueue.addAll(streams);/*from w w w .j av a 2 s . com*/ final Map<String, StreamCandidate> candidateMap = new ConcurrentSkipListMap<String, StreamCandidate>(); final AtomicInteger numPendingStreams = new AtomicInteger(streams.size()); final CountDownLatch doneLatch = new CountDownLatch(1); Runnable checkRunnable = new Runnable() { @Override public void run() { while (!streamQueue.isEmpty()) { String stream; try { stream = streamQueue.take(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); break; } StreamCandidate candidate; try { LOG.info("Checking stream {}.", stream); candidate = checkStream(factory, stream, executorService, bkc, digestpw); LOG.info("Checked stream {} - {}.", stream, candidate); } catch (IOException e) { LOG.error("Error on checking stream {} : ", stream, e); doneLatch.countDown(); break; } if (null != candidate) { candidateMap.put(stream, candidate); } if (numPendingStreams.decrementAndGet() == 0) { doneLatch.countDown(); } } } }; Thread[] threads = new Thread[concurrency]; for (int i = 0; i < concurrency; i++) { threads[i] = new Thread(checkRunnable, "check-thread-" + i); threads[i].start(); } try { doneLatch.await(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } if (numPendingStreams.get() != 0) { throw new IOException(numPendingStreams.get() + " streams left w/o checked"); } for (int i = 0; i < concurrency; i++) { threads[i].interrupt(); try { threads[i].join(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } } return candidateMap; }
From source file:com.turn.splicer.SuggestHttpWorker.java
@Override public String call() throws Exception { LinkedBlockingQueue<String> TSDs; //TODO: have it implement its own RegionChecker to get hbase locality looking for metric names //lets have it just pick a random host String hostname = getRandomHost(); TSDs = HttpWorker.TSDMap.get(hostname); if (TSDs == null) { LOG.error("We are not running TSDs on regionserver={}. Choosing a random host failed", hostname); return "{'error': 'Choice of hostname=" + hostname + " failed.'}"; }// ww w .ja va 2 s . c o m String server = TSDs.take(); String uri = "http://" + server + "/api/suggest?" + suggestQuery; CloseableHttpClient postman = HttpClientBuilder.create().build(); try { HttpGet getRequest = new HttpGet(uri); LOG.info("Sending query=" + uri + " to TSD running on host=" + hostname); HttpResponse response = postman.execute(getRequest); if (response.getStatusLine().getStatusCode() != 200) { throw new RuntimeException( "Failed : HTTP error code : " + response.getStatusLine().getStatusCode()); } List<String> dl = IOUtils.readLines(response.getEntity().getContent()); String result = StringUtils.join(dl, ""); LOG.info("Result={}", result); return result; } finally { IOUtils.closeQuietly(postman); TSDs.put(server); LOG.info("Returned {} into the available queue", server); } }
From source file:org.kurento.rabbitmq.RabbitTemplate.java
protected Message doSendAndReceiveWithFixed(final String exchange, final String routingKey, final Message message) { return this.execute(new ChannelCallback<Message>() { @Override// ww w . ja v a 2s . c o m public Message doInRabbit(Channel channel) throws Exception { final PendingReply pendingReply = new PendingReply(); byte[] messageTagBytes = message.getMessageProperties().getCorrelationId(); String messageTag; if (messageTagBytes != null) { messageTag = new String(messageTagBytes); } else { messageTag = UUID.randomUUID().toString(); } RabbitTemplate.this.replyHolder.put(messageTag, pendingReply); // Save any existing replyTo and correlation data String savedReplyTo = message.getMessageProperties().getReplyTo(); pendingReply.setSavedReplyTo(savedReplyTo); if (StringUtils.hasLength(savedReplyTo) && logger.isDebugEnabled()) { logger.debug("Replacing replyTo header:" + savedReplyTo + " in favor of template's configured reply-queue:" + RabbitTemplate.this.replyQueue.getName()); } message.getMessageProperties().setReplyTo(RabbitTemplate.this.replyQueue.getName()); String savedCorrelation = null; if (RabbitTemplate.this.correlationKey == null) { // using // standard // correlationId // property byte[] correlationId = message.getMessageProperties().getCorrelationId(); if (correlationId != null) { savedCorrelation = new String(correlationId, RabbitTemplate.this.encoding); } } else { savedCorrelation = (String) message.getMessageProperties().getHeaders() .get(RabbitTemplate.this.correlationKey); } pendingReply.setSavedCorrelation(savedCorrelation); if (RabbitTemplate.this.correlationKey == null) { // using // standard // correlationId // property message.getMessageProperties() .setCorrelationId(messageTag.getBytes(RabbitTemplate.this.encoding)); } else { message.getMessageProperties().setHeader(RabbitTemplate.this.correlationKey, messageTag); } if (logger.isDebugEnabled()) { logger.debug("Sending message with tag " + messageTag); } doSend(channel, exchange, routingKey, message, null); LinkedBlockingQueue<Message> replyHandoff = pendingReply.getQueue(); Message reply = (replyTimeout < 0) ? replyHandoff.take() : replyHandoff.poll(replyTimeout, TimeUnit.MILLISECONDS); RabbitTemplate.this.replyHolder.remove(messageTag); return reply; } }); }
From source file:disko.flow.analyzers.FullRelexAnalyzer.java
public void process(AnalysisContext<TextDocument> ctx, Ports ports) throws InterruptedException { if (pool == null) init();//ww w . j a v a 2s .c o m final InputPort<EntityMaintainer> inputPort = ports.getInput(EntityAnalyzer.ENTITY_CHANNEL); final OutputPort<RelexTaskResult> outputPort = ports.getOutput(PARSE_CHANNEL); final LinkedBlockingQueue<Future<RelexTaskResult>> futureResults = new LinkedBlockingQueue<Future<RelexTaskResult>>( outputPort.getChannel().getCapacity()); log.debug("Starting LinkGrammarAnalyzer..."); exec.submit(new Callable<Integer>() { public Integer call() throws Exception { try { log.debug("LinkGrammarAnalyzer from channel + " + inputPort.getChannel()); for (EntityMaintainer em = inputPort.take(); !inputPort.isEOS(em); em = inputPort.take()) submitTask(em, futureResults); } catch (Throwable t) { log.error("Unable to submit parsing task.", t); } finally { futureResults.put(new FutureRelexTaskResultEOS()); } return (futureResults.size() - 1); } }); try { while (true) { try { Future<RelexTaskResult> futureResult = futureResults.take(); RelexTaskResult relexTaskResult; relexTaskResult = futureResult.get(); if (relexTaskResult == null) break; log.debug("LinkGrammarAnalyzer received " + relexTaskResult.index + ": " + relexTaskResult.result.getParses().size() + " parses of sentences " + relexTaskResult.sentence); relexTaskResult.result.setSentence(relexTaskResult.entityMaintainer.getOriginalSentence()); outputPort.put(relexTaskResult); } catch (InterruptedException e) { for (Future<RelexTaskResult> future : futureResults) { try { future.cancel(true); } catch (Throwable t) { log.error(t); } } break; } } for (Future<RelexTaskResult> future : futureResults) { future.cancel(true); } } catch (ExecutionException e) { throw new RuntimeException(e); } finally { outputPort.close(); /* * exec.shutdown(); for (RelexContext context: pool){ * context.getLinkParserClient().close(); } */ destroy(); } }
From source file:org.apache.bookkeeper.bookie.InterleavedLedgerStorageTest.java
@Test public void testConsistencyCheckConcurrentGC() throws Exception { final long signalDone = -1; final List<Exception> asyncErrors = new ArrayList<>(); final LinkedBlockingQueue<Long> toCompact = new LinkedBlockingQueue<>(); final Semaphore awaitingCompaction = new Semaphore(0); interleavedStorage.flush();//from ww w . ja v a 2 s . c o m final long lastLogId = entryLogger.getLeastUnflushedLogId(); final MutableInt counter = new MutableInt(0); entryLogger.setCheckEntryTestPoint((ledgerId, entryId, entryLogId, pos) -> { if (entryLogId < lastLogId) { if (counter.intValue() % 100 == 0) { try { toCompact.put(entryLogId); awaitingCompaction.acquire(); } catch (InterruptedException e) { asyncErrors.add(e); } } counter.increment(); } }); Thread mutator = new Thread(() -> { EntryLogCompactor compactor = new EntryLogCompactor(conf, entryLogger, interleavedStorage, entryLogger::removeEntryLog); while (true) { Long next = null; try { next = toCompact.take(); if (next == null || next == signalDone) { break; } compactor.compact(entryLogger.getEntryLogMetadata(next)); } catch (BufferedChannelBase.BufferedChannelClosedException e) { // next was already removed, ignore } catch (Exception e) { asyncErrors.add(e); break; } finally { if (next != null) { awaitingCompaction.release(); } } } }); mutator.start(); List<LedgerStorage.DetectedInconsistency> inconsistencies = interleavedStorage .localConsistencyCheck(Optional.empty()); for (LedgerStorage.DetectedInconsistency e : inconsistencies) { LOG.error("Found: {}", e); } Assert.assertEquals(0, inconsistencies.size()); toCompact.offer(signalDone); mutator.join(); for (Exception e : asyncErrors) { throw e; } if (!conf.isEntryLogPerLedgerEnabled()) { Assert.assertNotEquals(0, statsProvider.getCounter(BOOKIE_SCOPE + "." + STORAGE_SCRUB_PAGE_RETRIES).get().longValue()); } }
From source file:org.apache.bookkeeper.bookie.LedgerCacheTest.java
/** * Race where a flush would fail because a garbage collection occurred at * the wrong time.//from w ww . j a va 2 s . c o m * {@link https://issues.apache.org/jira/browse/BOOKKEEPER-604} */ @Test(timeout = 60000) public void testFlushDeleteRace() throws Exception { newLedgerCache(); final AtomicInteger rc = new AtomicInteger(0); final LinkedBlockingQueue<Long> ledgerQ = new LinkedBlockingQueue<Long>(1); final byte[] masterKey = "masterKey".getBytes(); Thread newLedgerThread = new Thread() { public void run() { try { for (int i = 0; i < 1000 && rc.get() == 0; i++) { ledgerCache.setMasterKey(i, masterKey); ledgerQ.put((long) i); } } catch (Exception e) { rc.set(-1); LOG.error("Exception in new ledger thread", e); } } }; newLedgerThread.start(); Thread flushThread = new Thread() { public void run() { try { while (true) { Long id = ledgerQ.peek(); if (id == null) { continue; } LOG.info("Put entry for {}", id); try { ledgerCache.putEntryOffset((long) id, 1, 0); } catch (Bookie.NoLedgerException nle) { //ignore } ledgerCache.flushLedger(true); } } catch (Exception e) { rc.set(-1); LOG.error("Exception in flush thread", e); } } }; flushThread.start(); Thread deleteThread = new Thread() { public void run() { try { while (true) { long id = ledgerQ.take(); LOG.info("Deleting {}", id); ledgerCache.deleteLedger(id); } } catch (Exception e) { rc.set(-1); LOG.error("Exception in delete thread", e); } } }; deleteThread.start(); newLedgerThread.join(); assertEquals("Should have been no errors", rc.get(), 0); deleteThread.interrupt(); flushThread.interrupt(); }
From source file:org.apache.bookkeeper.metadata.etcd.EtcdLedgerManagerTest.java
@Test public void testRegisterLedgerMetadataListener() throws Exception { long ledgerId = System.currentTimeMillis(); // create a ledger metadata LedgerMetadata metadata = LedgerMetadataBuilder.create().withEnsembleSize(3).withWriteQuorumSize(3) .withAckQuorumSize(2).withPassword("test-password".getBytes(UTF_8)) .withDigestType(DigestType.CRC32C.toApiDigestType()).newEnsembleEntry(0L, createNumBookies(3)) .build();//from w ww .j a va2 s .c o m result(lm.createLedgerMetadata(ledgerId, metadata)); Versioned<LedgerMetadata> readMetadata = lm.readLedgerMetadata(ledgerId).get(); log.info("Create ledger metadata : {}", readMetadata.getValue()); // register first listener LinkedBlockingQueue<Versioned<LedgerMetadata>> metadataQueue1 = new LinkedBlockingQueue<>(); LedgerMetadataListener listener1 = (lid, m) -> { log.info("[listener1] Received ledger {} metadata : {}", lid, m); metadataQueue1.add(m); }; log.info("Registered first listener for ledger {}", ledgerId); lm.registerLedgerMetadataListener(ledgerId, listener1); // we should receive a metadata notification when a ledger is created Versioned<LedgerMetadata> notifiedMetadata = metadataQueue1.take(); assertEquals(readMetadata, notifiedMetadata); ValueStream<LedgerMetadata> lms = lm.getLedgerMetadataStream(ledgerId); assertNotNull(lms.waitUntilWatched()); assertNotNull(result(lms.waitUntilWatched())); // register second listener LinkedBlockingQueue<Versioned<LedgerMetadata>> metadataQueue2 = new LinkedBlockingQueue<>(); LedgerMetadataListener listener2 = (lid, m) -> { log.info("[listener2] Received ledger {} metadata : {}", lid, m); metadataQueue2.add(m); }; log.info("Registered second listener for ledger {}", ledgerId); lm.registerLedgerMetadataListener(ledgerId, listener2); Versioned<LedgerMetadata> notifiedMetadata2 = metadataQueue2.take(); assertEquals(readMetadata, notifiedMetadata2); assertNotNull(lm.getLedgerMetadataStream(ledgerId)); // update the metadata lm.writeLedgerMetadata(ledgerId, LedgerMetadataBuilder.from(metadata).newEnsembleEntry(10L, createNumBookies(3)).build(), notifiedMetadata.getVersion()).get(); readMetadata = lm.readLedgerMetadata(ledgerId).get(); assertEquals(readMetadata, metadataQueue1.take()); assertEquals(readMetadata, metadataQueue2.take()); lms = lm.getLedgerMetadataStream(ledgerId); assertNotNull(lms); assertEquals(2, lms.getNumConsumers()); // remove listener2 lm.unregisterLedgerMetadataListener(ledgerId, listener2); lms = lm.getLedgerMetadataStream(ledgerId); assertNotNull(lms); assertEquals(1, lms.getNumConsumers()); // update the metadata again lm.writeLedgerMetadata(ledgerId, LedgerMetadataBuilder.from(metadata).newEnsembleEntry(20L, createNumBookies(3)).build(), readMetadata.getVersion()).get(); readMetadata = lm.readLedgerMetadata(ledgerId).get(); assertEquals(readMetadata, metadataQueue1.take()); assertNull(metadataQueue2.poll()); // remove listener1 lm.unregisterLedgerMetadataListener(ledgerId, listener1); // the value stream will be removed while (lm.getLedgerMetadataStream(ledgerId) != null) { TimeUnit.MILLISECONDS.sleep(100); } assertEquals(0, lms.getNumConsumers()); // update the metadata again lm.writeLedgerMetadata(ledgerId, LedgerMetadataBuilder.from(metadata).newEnsembleEntry(30L, createNumBookies(3)).build(), readMetadata.getVersion()).get(); readMetadata = lm.readLedgerMetadata(ledgerId).get(); assertNull(metadataQueue1.poll()); assertNull(metadataQueue2.poll()); log.info("Registered first listener for ledger {} again", ledgerId); lm.registerLedgerMetadataListener(ledgerId, listener1); notifiedMetadata = metadataQueue1.take(); assertEquals(readMetadata, notifiedMetadata); lms = lm.getLedgerMetadataStream(ledgerId); assertNotNull(lms); assertEquals(1, lms.getNumConsumers()); // delete the ledger lm.removeLedgerMetadata(ledgerId, readMetadata.getVersion()).get(); // the listener will eventually be removed while (lm.getLedgerMetadataStream(ledgerId) != null) { TimeUnit.MILLISECONDS.sleep(100); } assertEquals(1, lms.getNumConsumers()); assertNull(metadataQueue1.poll()); assertNull(metadataQueue2.poll()); }