List of usage examples for java.util.concurrent BlockingQueue offer
boolean offer(E e, long timeout, TimeUnit unit) throws InterruptedException;
From source file:com.addthis.hydra.kafka.consumer.KafkaSource.java
static <E> void putWhileRunning(BlockingQueue<E> queue, E value, AtomicBoolean running) { boolean offered = false; while (!offered) { if (!running.get()) { throw BenignKafkaException.INSTANCE; }/*from w w w . ja v a 2s . c o m*/ try { offered = queue.offer(value, 1, TimeUnit.SECONDS); } catch (InterruptedException e) { // ignored } } }
From source file:org.apache.streams.local.tasks.BaseStreamsTask.java
/** * Adds a StreamDatum to the outgoing queues. If there are multiple queues, it uses serialization to create * clones of the datum and adds a new clone to each queue. * @param datum//from w w w. j a v a 2 s. c o m */ protected void addToOutgoingQueue(StreamsDatum datum) throws InterruptedException { if (this.outQueues.size() == 1) { outQueues.get(0).put(datum); } else { List<BlockingQueue<StreamsDatum>> toOutput = Lists.newLinkedList(this.outQueues); while (!toOutput.isEmpty()) { for (BlockingQueue<StreamsDatum> queue : toOutput) { StreamsDatum newDatum = cloneStreamsDatum(datum); if (newDatum != null) { if (queue.offer(newDatum, 500, TimeUnit.MILLISECONDS)) { toOutput.remove(queue); } } } } } }
From source file:com.sm.store.cluster.Connection.java
public void putIfAbsent(BlockingQueue<Connection> queue) { lock.lock();//from w w w.j a va2 s . c o m try { if (!inQueue) { try { logger.info("put in reconnectQueue " + toString()); boolean flag = queue.offer(this, 200, TimeUnit.MILLISECONDS); if (flag) inQueue = true; else logger.warn("unable put in reconnectQueue " + toString()); } catch (InterruptedException e) { //swallow exception } } } finally { lock.unlock(); } }
From source file:org.apache.hadoop.ipc.FairCallQueue.java
@Override public boolean offer(E e, long timeout, TimeUnit unit) throws InterruptedException { int priorityLevel = e.getPriorityLevel(); BlockingQueue<E> q = this.queues.get(priorityLevel); boolean ret = q.offer(e, timeout, unit); if (ret) {/*from ww w. j av a 2 s.c o m*/ signalNotEmpty(); } return ret; }
From source file:org.apache.solr.handler.dataimport.processor.XPathEntityProcessor.java
private Iterator<Map<String, Object>> getRowIterator(final Reader data, final String s) { //nothing atomic about it. I just needed a StongReference final AtomicReference<Exception> exp = new AtomicReference<Exception>(); final BlockingQueue<Map<String, Object>> blockingQueue = new ArrayBlockingQueue<Map<String, Object>>( blockingQueueSize);/*w ww . j a v a2s . c om*/ final AtomicBoolean isEnd = new AtomicBoolean(false); final AtomicBoolean throwExp = new AtomicBoolean(true); publisherThread = new Thread() { @Override public void run() { try { xpathReader.streamRecords(data, new XPathRecordReader.Handler() { @Override @SuppressWarnings("unchecked") public void handle(Map<String, Object> record, String xpath) { if (isEnd.get()) { throwExp.set(false); //To end the streaming . otherwise the parsing will go on forever //though consumer has gone away throw new RuntimeException("BREAK"); } Map<String, Object> row; try { row = readRow(record, xpath); } catch (final Exception e) { isEnd.set(true); return; } offer(row); } }); } catch (final Exception e) { if (throwExp.get()) exp.set(e); } finally { closeIt(data); if (!isEnd.get()) { offer(END_MARKER); } } } private void offer(Map<String, Object> row) { try { while (!blockingQueue.offer(row, blockingQueueTimeOut, blockingQueueTimeOutUnits)) { if (isEnd.get()) return; LOG.debug("Timeout elapsed writing records. Perhaps buffer size should be increased."); } } catch (final InterruptedException e) { return; } finally { synchronized (this) { notifyAll(); } } } }; publisherThread.start(); return new Iterator<Map<String, Object>>() { private Map<String, Object> lastRow; int count = 0; @Override public boolean hasNext() { return !isEnd.get(); } @Override public Map<String, Object> next() { Map<String, Object> row; do { try { row = blockingQueue.poll(blockingQueueTimeOut, blockingQueueTimeOutUnits); if (row == null) { LOG.debug("Timeout elapsed reading records."); } } catch (final InterruptedException e) { LOG.debug("Caught InterruptedException while waiting for row. Aborting."); isEnd.set(true); return null; } } while (row == null); if (row == END_MARKER) { isEnd.set(true); if (exp.get() != null) { String msg = "Parsing failed for xml, url:" + s + " rows processed in this xml:" + count; if (lastRow != null) msg += " last row in this xml:" + lastRow; if (ABORT.equals(onError)) { wrapAndThrow(SEVERE, exp.get(), msg); } else if (SKIP.equals(onError)) { wrapAndThrow(DataImportHandlerException.SKIP, exp.get()); } else { LOG.warn(msg, exp.get()); } } return null; } count++; return lastRow = row; } @Override public void remove() { /*no op*/ } }; }
From source file:org.apache.solr.handler.dataimport.XPathEntityProcessor.java
private Iterator<Map<String, Object>> getRowIterator(final Reader data, final String s) { //nothing atomic about it. I just needed a StongReference final AtomicReference<Exception> exp = new AtomicReference<>(); final BlockingQueue<Map<String, Object>> blockingQueue = new ArrayBlockingQueue<>(blockingQueueSize); final AtomicBoolean isEnd = new AtomicBoolean(false); final AtomicBoolean throwExp = new AtomicBoolean(true); publisherThread = new Thread() { @Override/*w w w . ja v a 2s . c o m*/ public void run() { try { xpathReader.streamRecords(data, (record, xpath) -> { if (isEnd.get()) { throwExp.set(false); //To end the streaming . otherwise the parsing will go on forever //though consumer has gone away throw new RuntimeException("BREAK"); } Map<String, Object> row; try { row = readRow(record, xpath); } catch (Exception e) { isEnd.set(true); return; } offer(row); }); } catch (Exception e) { if (throwExp.get()) exp.set(e); } finally { closeIt(data); if (!isEnd.get()) { offer(END_MARKER); } } } private void offer(Map<String, Object> row) { try { while (!blockingQueue.offer(row, blockingQueueTimeOut, blockingQueueTimeOutUnits)) { if (isEnd.get()) return; LOG.debug("Timeout elapsed writing records. Perhaps buffer size should be increased."); } } catch (InterruptedException e) { return; } finally { synchronized (this) { notifyAll(); } } } }; publisherThread.start(); return new Iterator<Map<String, Object>>() { private Map<String, Object> lastRow; int count = 0; @Override public boolean hasNext() { return !isEnd.get(); } @Override public Map<String, Object> next() { Map<String, Object> row; do { try { row = blockingQueue.poll(blockingQueueTimeOut, blockingQueueTimeOutUnits); if (row == null) { LOG.debug("Timeout elapsed reading records."); } } catch (InterruptedException e) { LOG.debug("Caught InterruptedException while waiting for row. Aborting."); isEnd.set(true); return null; } } while (row == null); if (row == END_MARKER) { isEnd.set(true); if (exp.get() != null) { String msg = "Parsing failed for xml, url:" + s + " rows processed in this xml:" + count; if (lastRow != null) msg += " last row in this xml:" + lastRow; if (ABORT.equals(onError)) { wrapAndThrow(SEVERE, exp.get(), msg); } else if (SKIP.equals(onError)) { wrapAndThrow(DataImportHandlerException.SKIP, exp.get()); } else { LOG.warn(msg, exp.get()); } } return null; } count++; return lastRow = row; } @Override public void remove() { /*no op*/ } }; }
From source file:org.springframework.integration.util.CallerBlocksPolicy.java
@Override public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) { if (!executor.isShutdown()) { try {//from www . java2s . co m BlockingQueue<Runnable> queue = executor.getQueue(); if (logger.isDebugEnabled()) { logger.debug("Attempting to queue task execution for " + this.maxWait + " milliseconds"); } if (!queue.offer(r, this.maxWait, TimeUnit.MILLISECONDS)) { throw new RejectedExecutionException("Max wait time expired to queue task"); } if (logger.isDebugEnabled()) { logger.debug("Task execution queued"); } } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RejectedExecutionException("Interrupted", e); } } else { throw new RejectedExecutionException("Executor has been shut down"); } }