List of usage examples for java.util.concurrent BlockingQueue take
E take() throws InterruptedException;
From source file:org.pentaho.telemetry.TelemetryEventKeeper.java
/** * Takes an event from the event queue and stores it in the file system. * * @throws InterruptedException//from w w w . jav a2s.c om */ protected void processEvent() throws InterruptedException { BlockingQueue<TelemetryEvent> eventQueue = this.getEventQueue(); TelemetryEvent event = eventQueue.take(); try { String filename = System.currentTimeMillis() + FILE_EXT; FileOutputStream fout = new FileOutputStream(this.getTelemetryDirPath() + "/" + filename); ObjectOutputStream oos = new ObjectOutputStream(fout); oos.writeObject(event); oos.close(); } catch (FileNotFoundException fnfe) { this.getLogger().warn(UNABLE_TO_CREATE_FILE_MESSAGE, fnfe); } catch (IOException ioe) { this.getLogger().error(ERROR_CREATING_FILE_MESSAGE, ioe); } }
From source file:fi.jumi.core.suite.SuiteFactoryTest.java
@Test public void sets_the_context_class_loader_for_test_threads() throws InterruptedException { createSuiteFactory();// w ww . ja v a 2s. c o m factory.start(new NullSuiteListener()); BlockingQueue<ClassLoader> spy = new LinkedBlockingQueue<>(); factory.testThreadPool.execute(() -> { spy.add(Thread.currentThread().getContextClassLoader()); }); ClassLoader contextClassLoader = spy.take(); assertThat(contextClassLoader, is(factory.testClassLoader)); }
From source file:com.amazonaws.kinesis.dataviz.twitter.TwitterProducer.java
private boolean process(BlockingQueue<String> msgQueue, ProducerClient producer) { int exceptionCount = 0; while (true) { try {//from w w w.j a v a2s . c o m // get message HBC from queue String msg = msgQueue.take(); // use 'random' partition key String key = String.valueOf(System.currentTimeMillis()); // send to Kinesis producer.post(key, msg); } catch (Exception e) { // didn't get record - move on to next\ e.printStackTrace(); if (++exceptionCount > 5) { // too many exceptions - lets reconnect and try again return false; } } } }
From source file:gov.pnnl.cloud.producer.twitter.TwitterProducerKinesis.java
private boolean process(BlockingQueue<String> msgQueue, ProducerClient producer, StatisticsCollection stats) { int exceptionCount = 0; while (true) { try {// w w w . jav a 2s. c o m // get message HBC from queue String msg = msgQueue.take(); // use 'random' partition key String key = String.valueOf(System.currentTimeMillis()); // send to Kinesis producer.post(key, msg); } catch (Exception e) { // didn't get record - move on to next\ e.printStackTrace(); if (++exceptionCount > 5) { // too many exceptions - lets reconnect and try again return false; } } } }
From source file:org.bpmscript.integration.internal.memory.MemorySyncChannel.java
public Object get(String id, long duration) { BlockingQueue<Object> blockingQueue = null; blockingQueue = replies.get(id);//from w w w .j a v a2 s .c om if (blockingQueue != null) { try { if (duration > 0) { return blockingQueue.poll(duration, TimeUnit.MILLISECONDS); } else { return blockingQueue.take(); } } catch (InterruptedException e) { return null; } } else { return null; } }
From source file:gov.pnnl.cloud.producer.twitter.TwitterProducerKafka.java
private boolean process(BlockingQueue<String> msgQueue, KafkaProducerClient producer, StatisticsCollection stats) {/*from www . j ava 2 s .c om*/ int exceptionCount = 0; while (true) { try { // get message HBC from queue String msg = msgQueue.take(); // use 'random' partition key String key = String.valueOf(System.currentTimeMillis()); // send to Kinesis producer.post(key, msg); } catch (Exception e) { // didn't get record - move on to next\ e.printStackTrace(); if (++exceptionCount > 5) { // too many exceptions - lets reconnect and try again return false; } } } }
From source file:org.bpmscript.integration.spring.SpringSyncChannel.java
/** * @see org.bpmscript.channel.reply.ISyncService#get(java.lang.String, long) *//*w w w . j a va 2 s.c o m*/ public Object get(String id, long duration) { BlockingQueue<Object> blockingQueue = null; blockingQueue = replies.get(id); if (blockingQueue != null) { try { if (duration > 0) { return blockingQueue.poll(duration, TimeUnit.MILLISECONDS); } else { return blockingQueue.take(); } } catch (InterruptedException e) { return null; } } else { return null; } }
From source file:gemlite.core.internal.admin.service.ImportDataService.java
@Override public Object doExecute(Map<String, Object> args) { Cache cache = CacheFactory.getAnyInstance(); String memberId = (String) args.get("MEMBERID"); String ip = (String) args.get("IP"); String regionName = (String) args.get("REGIONPATH"); String filePath = (String) args.get("FILEPATH"); String showLog = (String) args.get("showLog"); if (StringUtils.isEmpty(filePath)) { filePath = System.getProperty(ITEMS.GS_WORK.name()); }//from w w w. j a v a 2 s . c o m if ("ALL".equalsIgnoreCase(regionName)) { // ?region,?gfd? if (filePath.endsWith(".gfd")) { int loc1 = filePath.lastIndexOf("\\"); int loc2 = filePath.lastIndexOf("/"); filePath = filePath.substring(0, loc1 > loc2 ? loc1 : loc2); } } if (!filePath.endsWith(".gfd") && !filePath.endsWith("\\") && !filePath.endsWith("/")) filePath += File.separator; args.put("FILEPATH", filePath); DistributedMember targetMember = AdminUtil.getDistributedMemberById(cache, memberId, ip); try { Set<String> regions = new HashSet<String>(); if ("ALL".equals(regionName)) { regions = AdminUtil.getRegionNames(cache); } else { regions.add(regionName); } if (targetMember == null) { LogUtil.getAppLog().error("error:targetMember is null!memberId is:" + memberId + " and total members:" + AdminUtil.getAllMemberIds(cache)); return "error:targetMember is null!memberId is:" + memberId + " and total members:" + AdminUtil.getAllMemberIds(cache); } if (regions == null || regions.isEmpty()) { LogUtil.getAppLog().error("error:regions is empty!"); return "error:regions is empty!"; } // region? for (String r : regions) { Execution execution = null; String path = filePath; if (!filePath.endsWith(".gfd")) path += (r + ".gfd"); args.put("REGIONPATH", r); args.put("FILEPATH", path); String last = "last"; MsgCollector msgCollector = new MsgCollector(last); execution = FunctionService.onMember(targetMember).withArgs(args).withCollector(msgCollector); ResultCollector rc = execution.execute(new ImportDataFunction().getId()); if ("Y".equals(showLog)) { BlockingQueue queue = msgCollector.getResult(); String msg = ""; try { // ??? while ((msg = (String) queue.take()) != last) { if (LogUtil.getAppLog().isDebugEnabled()) LogUtil.getAppLog().debug(" send to client:" + msg); return msg; } } catch (Exception e) { e.printStackTrace(); LogUtil.getAppLog().error("send msg error:", e); return e.getMessage(); } } else { return rc.getResult(); } } } catch (CacheClosedException e) { e.printStackTrace(); LogUtil.getAppLog().error("error:CacheClosedException", e); return e.getMessage(); } catch (FunctionInvocationTargetException e) { e.printStackTrace(); LogUtil.getAppLog().error("error:FunctionInvocationTargetException", e); return e.getMessage(); } return true; }
From source file:org.apache.hadoop.hbase.ipc.SimpleRpcScheduler.java
private void consumerLoop(BlockingQueue<CallRunner> myQueue) { boolean interrupted = false; try {/*from w w w .j a va2 s . c om*/ while (running) { try { CallRunner task = myQueue.take(); try { activeHandlerCount.incrementAndGet(); task.run(); } finally { activeHandlerCount.decrementAndGet(); } } catch (InterruptedException e) { interrupted = true; } } } finally { if (interrupted) { Thread.currentThread().interrupt(); } } }
From source file:org.spf4j.recyclable.impl.ObjectPoolVsApache.java
private long testPool(final RetryExecutor<Integer> exec, final RecyclingSupplier<ExpensiveTestObject> pool, final BlockingQueue<Future<Integer>> completionQueue) throws InterruptedException, ExecutionException { long startTime = System.currentTimeMillis(); for (int i = 0; i < TEST_TASKS; i++) { exec.submit(new TestCallable(pool, i)); }// w w w. jav a 2 s. c o m for (int i = 0; i < TEST_TASKS; i++) { completionQueue.take().get(); } long elapsedTime = System.currentTimeMillis() - startTime; System.out.println("Completed all " + TEST_TASKS + " tasks in " + elapsedTime + "ms "); return elapsedTime; }