List of usage examples for java.util.concurrent LinkedBlockingQueue offer
public boolean offer(E e)
From source file:org.apache.bookkeeper.bookie.InterleavedLedgerStorageTest.java
@Test public void testConsistencyCheckConcurrentGC() throws Exception { final long signalDone = -1; final List<Exception> asyncErrors = new ArrayList<>(); final LinkedBlockingQueue<Long> toCompact = new LinkedBlockingQueue<>(); final Semaphore awaitingCompaction = new Semaphore(0); interleavedStorage.flush();/*from w ww . j a v a 2 s . co m*/ final long lastLogId = entryLogger.getLeastUnflushedLogId(); final MutableInt counter = new MutableInt(0); entryLogger.setCheckEntryTestPoint((ledgerId, entryId, entryLogId, pos) -> { if (entryLogId < lastLogId) { if (counter.intValue() % 100 == 0) { try { toCompact.put(entryLogId); awaitingCompaction.acquire(); } catch (InterruptedException e) { asyncErrors.add(e); } } counter.increment(); } }); Thread mutator = new Thread(() -> { EntryLogCompactor compactor = new EntryLogCompactor(conf, entryLogger, interleavedStorage, entryLogger::removeEntryLog); while (true) { Long next = null; try { next = toCompact.take(); if (next == null || next == signalDone) { break; } compactor.compact(entryLogger.getEntryLogMetadata(next)); } catch (BufferedChannelBase.BufferedChannelClosedException e) { // next was already removed, ignore } catch (Exception e) { asyncErrors.add(e); break; } finally { if (next != null) { awaitingCompaction.release(); } } } }); mutator.start(); List<LedgerStorage.DetectedInconsistency> inconsistencies = interleavedStorage .localConsistencyCheck(Optional.empty()); for (LedgerStorage.DetectedInconsistency e : inconsistencies) { LOG.error("Found: {}", e); } Assert.assertEquals(0, inconsistencies.size()); toCompact.offer(signalDone); mutator.join(); for (Exception e : asyncErrors) { throw e; } if (!conf.isEntryLogPerLedgerEnabled()) { Assert.assertNotEquals(0, statsProvider.getCounter(BOOKIE_SCOPE + "." + STORAGE_SCRUB_PAGE_RETRIES).get().longValue()); } }
From source file:org.apache.hadoop.hbase.client.HTableMultiplexer.java
/** * The put request will be buffered by its corresponding buffer queue. And the put request will be * retried before dropping the request./* w ww .j a v a2 s.c o m*/ * Return false if the queue is already full. * @param tableName * @param put * @param retry * @return true if the request can be accepted by its corresponding buffer queue. * @throws IOException */ public boolean put(final TableName tableName, final Put put, int retry) throws IOException { if (retry <= 0) { return false; } LinkedBlockingQueue<PutStatus> queue; HTable htable = getHTable(tableName); try { htable.validatePut(put); HRegionLocation loc = htable.getRegionLocation(put.getRow(), false); if (loc != null) { // Add the put pair into its corresponding queue. queue = addNewRegionServer(loc, htable); // Generate a MultiPutStatus obj and offer it into the queue PutStatus s = new PutStatus(loc.getRegionInfo(), put, retry); return queue.offer(s); } } catch (Exception e) { LOG.debug("Cannot process the put " + put + " because of " + e); } return false; }
From source file:org.apache.hedwig.server.proxy.HedwigProxy.java
public void start() throws InterruptedException { final LinkedBlockingQueue<Boolean> queue = new LinkedBlockingQueue<Boolean>(); new Thread(tg, new Runnable() { @Override/*from w w w. j a v a2 s .c om*/ public void run() { client = new HedwigClient(cfg); ThreadFactoryBuilder tfb = new ThreadFactoryBuilder(); serverSocketChannelFactory = new NioServerSocketChannelFactory( Executors.newCachedThreadPool(tfb.setNameFormat("HedwigProxy-NIOBoss-%d").build()), Executors.newCachedThreadPool(tfb.setNameFormat("HedwigProxy-NIOWorker-%d").build())); initializeHandlers(); initializeNetty(); queue.offer(true); } }).start(); queue.take(); }
From source file:org.apache.nifi.processor.util.listen.AbstractListenEventProcessor.java
/** * Creates a pool of ByteBuffers with the given size. * * @param poolSize the number of buffers to initialize the pool with * @param bufferSize the size of each buffer * @return a blocking queue with size equal to poolSize and each buffer equal to bufferSize */// w ww . j av a 2 s.c o m protected BlockingQueue<ByteBuffer> createBufferPool(final int poolSize, final int bufferSize) { final LinkedBlockingQueue<ByteBuffer> bufferPool = new LinkedBlockingQueue<>(poolSize); for (int i = 0; i < poolSize; i++) { bufferPool.offer(ByteBuffer.allocate(bufferSize)); } return bufferPool; }
From source file:org.jivesoftware.openfire.clearspace.ClearspaceManager.java
/** * Sends an IQ packet to the Clearspace external component and returns the IQ packet * returned by CS or <tt>null</tt> if no answer was received before the specified * timeout./*from ww w.j a va2s.c o m*/ * * @param packet IQ packet to send. * @param timeout milliseconds to wait before timing out. * @return IQ packet returned by Clearspace responsing the packet we sent. */ public IQ query(final IQ packet, int timeout) { // Complain if FROM is empty if (packet.getFrom() == null) { throw new IllegalStateException("IQ packets with no FROM cannot be sent to Clearspace"); } // If CS is not connected then return null if (clearspaces.isEmpty()) { return null; } // Set the target address to the IQ packet. Roate list so we distribute load String component; synchronized (clearspaces) { component = clearspaces.get(0); Collections.rotate(clearspaces, 1); } packet.setTo(component); final LinkedBlockingQueue<IQ> answer = new LinkedBlockingQueue<IQ>(8); final IQRouter router = XMPPServer.getInstance().getIQRouter(); router.addIQResultListener(packet.getID(), new IQResultListener() { public void receivedAnswer(IQ packet) { answer.offer(packet); } public void answerTimeout(String packetId) { Log.warn("No answer from Clearspace was received for IQ stanza: " + packet); } }); XMPPServer.getInstance().getIQRouter().route(packet); IQ reply = null; try { reply = answer.poll(timeout, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { // Ignore } return reply; }