List of usage examples for java.util.concurrent Future isDone
boolean isDone();
From source file:com.baifendian.swordfish.execserver.runner.flow.FlowRunner.java
/** * ?, ?/* w w w . ja v a 2s. c o m*/ */ private void updateUnfinishNodeStatus(boolean updateKilled) { Date now = new Date(); // ??? for (Map.Entry<NodeRunner, Future<Boolean>> entry : activeNodeRunners.entrySet()) { NodeRunner nodeRunner = entry.getKey(); Future<Boolean> future = entry.getValue(); // if (!future.isDone()) { // ?, ??, ??? if (updateKilled || (nodeRunner.getExecType() != ExecType.SCHEDULER && nodeRunner.getExecType() != ExecType.COMPLEMENT_DATA)) { ExecutionNode executionNode = nodeRunner.getExecutionNode(); updateNodeToKilled(executionNode); } } else { // ?? Boolean value = false; try { value = future.get(); if (value) { ExecutionNode executionNode = nodeRunner.getExecutionNode(); executionNode.setStatus(FlowStatus.SUCCESS); executionNode.setEndTime(now); flowDao.updateExecutionNode(executionNode); } } catch (InterruptedException e) { logger.error(e.getMessage(), e); } catch (ExecutionException e) { logger.error(e.getMessage(), e); } catch (CancellationException e) { // ? logger.error("task has been cancel, name:{}", nodeRunner.getNodename()); } catch (Exception e) { logger.error(e.getMessage(), e); } finally { if (!value) { if (updateKilled || (nodeRunner.getExecType() != ExecType.SCHEDULER && nodeRunner.getExecType() != ExecType.COMPLEMENT_DATA)) { ExecutionNode executionNode = nodeRunner.getExecutionNode(); updateNodeToKilled(executionNode); } } } } } }
From source file:com.tc.stats.DSO.java
@Override public Map<ObjectName, Object> invoke(Set<ObjectName> onSet, String operation, long timeout, TimeUnit unit, Object[] args, String[] sigs) { Map<ObjectName, Object> result = new HashMap<ObjectName, Object>(); List<Callable<SimpleInvokeResult>> tasks = new ArrayList<Callable<SimpleInvokeResult>>(); Iterator<ObjectName> onIter = onSet.iterator(); while (onIter.hasNext()) { tasks.add(new SimpleInvokeTask(onIter.next(), operation, args, sigs)); }//ww w. j av a 2 s . co m try { List<Future<SimpleInvokeResult>> results = pool.invokeAll(tasks, timeout, unit); Iterator<Future<SimpleInvokeResult>> resultIter = results.iterator(); while (resultIter.hasNext()) { Future<SimpleInvokeResult> future = resultIter.next(); if (future.isDone() && !future.isCancelled()) { try { SimpleInvokeResult sir = future.get(); result.put(sir.objectName, sir.result); } catch (CancellationException ce) { /**/ } catch (ExecutionException ee) { /**/ } } } } catch (InterruptedException ie) {/**/ } return result; }
From source file:org.wso2.siddhi.extension.input.transport.kafka.KafkaSourceTestCase.java
@Test public void testRecoveryOnFailureOfSingleNodeWithKafka() throws InterruptedException { try {//from w w w. j ava2 s . c o m log.info( "Test to verify recovering process of a Siddhi node on a failure when Kafka is the event source"); String topics[] = new String[] { "kafka_topic4" }; createTopic(topics, 1); PersistenceStore persistenceStore = new InMemoryPersistenceStore(); SiddhiManager siddhiManager = new SiddhiManager(); siddhiManager.setPersistenceStore(persistenceStore); siddhiManager.setExtension("source.mapper:text", TextSourceMapper.class); String query = "@Plan:name('TestExecutionPlan') " + "define stream BarStream (count long); " + "@info(name = 'query1') " + "@source(type='kafka', topic='kafka_topic4', group.id='test', " + "threading.option='topic.wise', bootstrap.servers='localhost:9092', partition.no.list='0', " + "@map(type='text'))" + "Define stream FooStream (symbol string, price float, volume long);" + "from FooStream select count(symbol) as count insert into BarStream;"; ExecutionPlanRuntime executionPlanRuntime = siddhiManager.createExecutionPlanRuntime(query); executionPlanRuntime.addCallback("BarStream", new StreamCallback() { @Override public void receive(Event[] events) { for (Event event : events) { eventArrived = true; System.out.println(event); count = Math.toIntExact((long) event.getData(0)); } } }); // start publishing events to Kafka Future eventSender = executorService.submit(new Runnable() { @Override public void run() { kafkaPublisher(topics, 1, 50, 1000); } }); Thread.sleep(2000); // start the execution plan executionPlanRuntime.start(); // wait for some time Thread.sleep(28000); // initiate a checkpointing task Future perisistor = executionPlanRuntime.persist(); // waits till the checkpointing task is done while (!perisistor.isDone()) { Thread.sleep(100); } // let few more events to be published Thread.sleep(5000); // initiate a execution plan shutdown - to demonstrate a node failure executionPlanRuntime.shutdown(); // let few events to be published while the execution plan is down Thread.sleep(5000); // recreate the execution plan executionPlanRuntime = siddhiManager.createExecutionPlanRuntime(query); executionPlanRuntime.addCallback("BarStream", new StreamCallback() { @Override public void receive(Event[] events) { for (Event event : events) { eventArrived = true; System.out.println(event); count = Math.toIntExact((long) event.getData(0)); } } }); // start the execution plan executionPlanRuntime.start(); // immediately trigger a restore from last revision executionPlanRuntime.restoreLastRevision(); Thread.sleep(5000); // waits till all the events are published while (!eventSender.isDone()) { Thread.sleep(2000); } Thread.sleep(20000); assertTrue(eventArrived); // assert the count assertEquals(50, count); executionPlanRuntime.shutdown(); } catch (ZkTimeoutException ex) { log.warn("No zookeeper may not be available.", ex); } }
From source file:com.tc.stats.DSO.java
@Override public Map<ObjectName, Map<String, Object>> getAttributeMap(Map<ObjectName, Set<String>> attributeMap, long timeout, TimeUnit unit) { Map<ObjectName, Map<String, Object>> result = new HashMap<ObjectName, Map<String, Object>>(); List<Callable<SourcedAttributeList>> tasks = new ArrayList<Callable<SourcedAttributeList>>(); Iterator<Entry<ObjectName, Set<String>>> entryIter = attributeMap.entrySet().iterator(); while (entryIter.hasNext()) { Entry<ObjectName, Set<String>> entry = entryIter.next(); tasks.add(new AttributeListTask(entry.getKey(), entry.getValue())); }//from w w w . j ava 2 s .c om try { List<Future<SourcedAttributeList>> results = pool.invokeAll(tasks, timeout, unit); Iterator<Future<SourcedAttributeList>> resultIter = results.iterator(); while (resultIter.hasNext()) { Future<SourcedAttributeList> future = resultIter.next(); if (future.isDone() && !future.isCancelled()) { try { SourcedAttributeList sal = future.get(); Iterator<Object> attrIter = sal.attributeList.iterator(); Map<String, Object> onMap = new HashMap<String, Object>(); while (attrIter.hasNext()) { Attribute attr = (Attribute) attrIter.next(); onMap.put(attr.getName(), attr.getValue()); } result.put(sal.objectName, onMap); } catch (CancellationException ce) { /**/ } catch (ExecutionException ee) { /**/ } } } } catch (InterruptedException ie) {/**/ } return result; }
From source file:org.apache.asterix.external.feed.test.InputHandlerTest.java
@Test public void testMemoryVariableSizeFrameNoSpillWithDiscard() { try {//from w ww .jav a 2s. c om int discardTestFrames = 100; Random random = new Random(); IHyracksTaskContext ctx = TestUtils.create(DEFAULT_FRAME_SIZE); // Spill budget = Memory budget, No discard FeedPolicyAccessor fpa = createFeedPolicyAccessor(false, true, DEFAULT_FRAME_SIZE, DISCARD_ALLOWANCE); // Non-Active Writer TestControlledFrameWriter writer = FrameWriterTestUtils.create(DEFAULT_FRAME_SIZE, false); writer.freeze(); // FramePool ConcurrentFramePool framePool = new ConcurrentFramePool(NODE_ID, discardTestFrames * DEFAULT_FRAME_SIZE, DEFAULT_FRAME_SIZE); FeedRuntimeInputHandler handler = createInputHandler(ctx, writer, fpa, framePool); handler.open(); // add NUM_FRAMES times ByteBuffer buffer = ByteBuffer.allocate(DEFAULT_FRAME_SIZE); int multiplier = 1; int numFrames = 0; // add NUM_FRAMES times while ((multiplier <= framePool.remaining())) { numFrames++; handler.nextFrame(buffer); multiplier = random.nextInt(10) + 1; buffer = ByteBuffer.allocate(DEFAULT_FRAME_SIZE * multiplier); } // Next call should NOT block but should discard. double numDiscarded = 0.0; boolean nextShouldDiscard = ((numDiscarded + 1.0) / (handler.getTotal() + 1.0)) <= fpa .getMaxFractionDiscard(); while (nextShouldDiscard) { handler.nextFrame(buffer); numDiscarded++; nextShouldDiscard = ((numDiscarded + 1.0) / (handler.getTotal() + 1.0)) <= fpa .getMaxFractionDiscard(); } Future<?> result = EXECUTOR.submit(new Pusher(buffer, handler)); if (result.isDone()) { Assert.fail("The producer should switch to stall mode since it is exceeding the discard allowance"); } else { // Check that no records were discarded Assert.assertEquals((int) numDiscarded, handler.getNumDiscarded()); // Check that one frame is spilled Assert.assertEquals(handler.getNumSpilled(), 0); } // consume memory frames writer.unfreeze(); result.get(); handler.close(); Assert.assertEquals(writer.nextFrameCount(), numFrames + 1); // exit } catch (Throwable th) { th.printStackTrace(); Assert.fail(); } Assert.assertNull(cause); }
From source file:org.apache.asterix.external.feed.test.InputHandlerTest.java
@Test public void testMemoryFixedSizeFrameNoSpillWithDiscard() { try {/*from w w w. ja va 2 s . c o m*/ int discardTestFrames = 100; IHyracksTaskContext ctx = TestUtils.create(DEFAULT_FRAME_SIZE); // Spill budget = Memory budget, No discard FeedPolicyAccessor fpa = createFeedPolicyAccessor(false, true, DEFAULT_FRAME_SIZE, DISCARD_ALLOWANCE); // Non-Active Writer TestControlledFrameWriter writer = FrameWriterTestUtils.create(DEFAULT_FRAME_SIZE, false); writer.freeze(); // FramePool ConcurrentFramePool framePool = new ConcurrentFramePool(NODE_ID, discardTestFrames * DEFAULT_FRAME_SIZE, DEFAULT_FRAME_SIZE); FeedRuntimeInputHandler handler = createInputHandler(ctx, writer, fpa, framePool); handler.open(); VSizeFrame frame = new VSizeFrame(ctx); // add NUM_FRAMES times for (int i = 0; i < discardTestFrames; i++) { handler.nextFrame(frame.getBuffer()); } // Next 5 calls call should NOT block but should discard. double numDiscarded = 0.0; boolean nextShouldDiscard = ((numDiscarded + 1.0) / (handler.getTotal() + 1.0)) <= fpa .getMaxFractionDiscard(); while (nextShouldDiscard) { handler.nextFrame(frame.getBuffer()); numDiscarded++; nextShouldDiscard = ((numDiscarded + 1.0) / (handler.getTotal() + 1.0)) <= fpa .getMaxFractionDiscard(); } // Next Call should block since we're exceeding the discard allowance Future<?> result = EXECUTOR.submit(new Pusher(frame.getBuffer(), handler)); if (result.isDone()) { Assert.fail("The producer should switch to stall mode since it is exceeding the discard allowance"); } else { // Check that no records were discarded Assert.assertEquals((int) numDiscarded, handler.getNumDiscarded()); // Check that one frame is spilled Assert.assertEquals(handler.getNumSpilled(), 0); } // consume memory frames writer.unfreeze(); result.get(); handler.close(); Assert.assertEquals(writer.nextFrameCount(), discardTestFrames + 1); // exit } catch (Throwable th) { th.printStackTrace(); Assert.fail(); } Assert.assertNull(cause); }
From source file:org.apache.asterix.external.feed.test.InputHandlerTest.java
@Test public void testMemoryFixedSizeFrameWithSpillWithDiscard() { try {//w ww . j a v a 2s. c om int numberOfMemoryFrames = 50; int numberOfSpillFrames = 50; IHyracksTaskContext ctx = TestUtils.create(DEFAULT_FRAME_SIZE); // Spill budget = Memory budget, No discard FeedPolicyAccessor fpa = createFeedPolicyAccessor(true, true, DEFAULT_FRAME_SIZE * numberOfSpillFrames, DISCARD_ALLOWANCE); // Non-Active Writer TestControlledFrameWriter writer = FrameWriterTestUtils.create(DEFAULT_FRAME_SIZE, false); writer.freeze(); // FramePool ConcurrentFramePool framePool = new ConcurrentFramePool(NODE_ID, numberOfMemoryFrames * DEFAULT_FRAME_SIZE, DEFAULT_FRAME_SIZE); FeedRuntimeInputHandler handler = createInputHandler(ctx, writer, fpa, framePool); handler.open(); VSizeFrame frame = new VSizeFrame(ctx); for (int i = 0; i < numberOfMemoryFrames; i++) { handler.nextFrame(frame.getBuffer()); } // Now we need to verify that the frame pool memory has been consumed! Assert.assertEquals(0, framePool.remaining()); Assert.assertEquals(numberOfMemoryFrames, handler.getTotal()); Assert.assertEquals(0, handler.getNumSpilled()); Assert.assertEquals(0, handler.getNumStalled()); Assert.assertEquals(0, handler.getNumDiscarded()); for (int i = 0; i < numberOfSpillFrames; i++) { handler.nextFrame(frame.getBuffer()); } Assert.assertEquals(0, framePool.remaining()); Assert.assertEquals(numberOfMemoryFrames + numberOfSpillFrames, handler.getTotal()); Assert.assertEquals(numberOfSpillFrames, handler.getNumSpilled()); Assert.assertEquals(0, handler.getNumStalled()); Assert.assertEquals(0, handler.getNumDiscarded()); // We can only discard one frame double numDiscarded = 0; boolean nextShouldDiscard = ((numDiscarded + 1.0) / (handler.getTotal() + 1.0)) <= fpa .getMaxFractionDiscard(); while (nextShouldDiscard) { handler.nextFrame(frame.getBuffer()); numDiscarded++; nextShouldDiscard = (numDiscarded + 1.0) / (handler.getTotal() + 1.0) <= fpa .getMaxFractionDiscard(); } Assert.assertEquals(0, framePool.remaining()); Assert.assertEquals((int) (numberOfMemoryFrames + numberOfSpillFrames + numDiscarded), handler.getTotal()); Assert.assertEquals(numberOfSpillFrames, handler.getNumSpilled()); Assert.assertEquals(0, handler.getNumStalled()); Assert.assertEquals((int) numDiscarded, handler.getNumDiscarded()); // Next Call should block since we're exceeding the discard allowance Future<?> result = EXECUTOR.submit(new Pusher(frame.getBuffer(), handler)); if (result.isDone()) { Assert.fail("The producer should switch to stall mode since it is exceeding the discard allowance"); } else { Assert.assertEquals((int) numDiscarded, handler.getNumDiscarded()); } // consume memory frames writer.unfreeze(); result.get(); handler.close(); Assert.assertTrue(result.isDone()); Assert.assertEquals(writer.nextFrameCount(), numberOfMemoryFrames + numberOfSpillFrames + 1); } catch (Throwable th) { th.printStackTrace(); Assert.fail(); } Assert.assertNull(cause); }
From source file:org.apache.asterix.external.feed.test.InputHandlerTest.java
@Test public void testMemoryVarSizeFrameNoDiskNoDiscard() { try {//from w w w . j a va 2 s .c om Random random = new Random(); IHyracksTaskContext ctx = TestUtils.create(DEFAULT_FRAME_SIZE); // No spill, No discard FeedPolicyAccessor fpa = createFeedPolicyAccessor(false, false, 0L, DISCARD_ALLOWANCE); // Non-Active Writer TestControlledFrameWriter writer = FrameWriterTestUtils.create(DEFAULT_FRAME_SIZE, false); writer.freeze(); // FramePool ConcurrentFramePool framePool = new ConcurrentFramePool(NODE_ID, FEED_MEM_BUDGET, DEFAULT_FRAME_SIZE); FeedRuntimeInputHandler handler = createInputHandler(ctx, writer, fpa, framePool); handler.open(); ByteBuffer buffer = ByteBuffer.allocate(DEFAULT_FRAME_SIZE); int multiplier = 1; // add NUM_FRAMES times while ((multiplier <= framePool.remaining())) { handler.nextFrame(buffer); multiplier = random.nextInt(10) + 1; buffer = ByteBuffer.allocate(DEFAULT_FRAME_SIZE * multiplier); } // we can't satisfy the next request // Next call should block we will do it in a different thread Future<?> result = EXECUTOR.submit(new Pusher(buffer, handler)); // Check that the nextFrame didn't return if (result.isDone()) { Assert.fail(); } // Check that no records were discarded Assert.assertEquals(handler.getNumDiscarded(), 0); // Check that no records were spilled Assert.assertEquals(handler.getNumSpilled(), 0); // Check that number of stalled is not greater than 1 Assert.assertTrue(handler.getNumStalled() <= 1); writer.unfreeze(); handler.close(); result.get(); } catch (Throwable th) { th.printStackTrace(); Assert.fail(); } Assert.assertNull(cause); }
From source file:ubic.gemma.core.analysis.preprocess.batcheffects.ComBat.java
private void runNonParametric(final DoubleMatrix2D sdata, DoubleMatrix2D gammastar, DoubleMatrix2D deltastar) { final ConcurrentHashMap<String, DoubleMatrix1D[]> results = new ConcurrentHashMap<>(); int numThreads = Math.min(batches.size(), Runtime.getRuntime().availableProcessors()); ComBat.log.info("Runing nonparametric estimation on " + numThreads + " threads"); Future<?>[] futures = new Future[numThreads]; ExecutorService service = Executors.newCachedThreadPool(); /*/*from ww w . j a va 2 s .co m*/ * Divvy up batches over threads. */ int batchesPerThread = batches.size() / numThreads; final String[] batchIds = batches.keySet().toArray(new String[] {}); for (int i = 0; i < numThreads; i++) { final int firstBatch = i * batchesPerThread; final int lastBatch = i == (numThreads - 1) ? batches.size() : firstBatch + batchesPerThread; futures[i] = service.submit(new Runnable() { @Override public void run() { for (int k = firstBatch; k < lastBatch; k++) { String batchId = batchIds[k]; DoubleMatrix2D batchData = ComBat.this.getBatchData(sdata, batchId); DoubleMatrix1D[] batchResults = ComBat.this.nonParametricFit(batchData, gammaHat.viewRow(k), deltaHat.viewRow(k)); results.put(batchId, batchResults); } } }); } service.shutdown(); boolean allDone = false; do { for (Future<?> f : futures) { allDone = true; if (!f.isDone() && !f.isCancelled()) { allDone = false; break; } } } while (!allDone); for (int i = 0; i < batchIds.length; i++) { String batchId = batchIds[i]; DoubleMatrix1D[] batchResults = results.get(batchId); for (int j = 0; j < batchResults[0].size(); j++) { gammastar.set(i, j, batchResults[0].get(j)); } for (int j = 0; j < batchResults[1].size(); j++) { deltastar.set(i, j, batchResults[1].get(j)); } } }