Example usage for java.util.concurrent Future isDone

List of usage examples for java.util.concurrent Future isDone

Introduction

In this page you can find the example usage for java.util.concurrent Future isDone.

Prototype

boolean isDone();

Source Link

Document

Returns true if this task completed.

Usage

From source file:org.geowebcache.diskquota.LayerCacheInfoBuilder.java

/**
 * Returns whether cache information is still being gathered for the layer named after
 * {@code layerName}./*from  ww  w  .j  a va 2  s.  c  o m*/
 * 
 * @param layerName
 * @return {@code true} if the cache information gathering for {@code layerName} is not finished
 */
public boolean isRunning(String layerName) {
    try {
        List<Future<ZoomLevelVisitor.Stats>> layerTasks = perLayerRunningTasks.get(layerName);
        if (layerTasks == null) {
            return false;
        }

        int numRunning = 0;
        Future<ZoomLevelVisitor.Stats> future;
        for (Iterator<Future<ZoomLevelVisitor.Stats>> it = layerTasks.iterator(); it.hasNext();) {
            future = it.next();
            if (future.isDone()) {
                it.remove();
            } else {
                numRunning++;
            }
        }
        return numRunning > 0;
    } catch (Exception e) {
        e.printStackTrace();
        return false;
    }
}

From source file:grakn.core.daemon.executor.Storage.java

/**
 * Attempt to start Storage and perform periodic polling until it is ready. The readiness check is performed with nodetool.
 * <p>//from  w  w w  . ja v  a  2s .  c  om
 * A {@link GraknDaemonException} will be thrown if Storage does not start after a timeout specified
 * in the 'WAIT_INTERVAL_SECOND' field.
 *
 * @throws GraknDaemonException
 */
private void start() {
    System.out.print("Starting " + DISPLAY_NAME + "...");
    System.out.flush();

    // Consume configuration from Grakn config file into Cassandra config file
    initialiseConfig();

    Future<Executor.Result> result = daemonExecutor.executeAsync(storageCommand(), graknHome.toFile());

    LocalDateTime timeout = LocalDateTime.now().plusSeconds(STORAGE_STARTUP_TIMEOUT_SECOND);

    while (LocalDateTime.now().isBefore(timeout) && !result.isDone()) {
        System.out.print(".");
        System.out.flush();

        if (storageStatus().equals("running")) {
            System.out.println("SUCCESS");
            return;
        }

        try {
            Thread.sleep(WAIT_INTERVAL_SECOND * 1000);
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
        }
    }

    try {
        System.out.println("FAILED!");
        System.err.println("Unable to start " + DISPLAY_NAME + ".");
        String errorMessage = "Process exited with code '" + result.get().exitCode() + "': '"
                + result.get().stderr() + "'";
        System.err.println(errorMessage);
        throw new GraknDaemonException(errorMessage);
    } catch (InterruptedException | ExecutionException e) {
        throw new GraknDaemonException(e.getMessage(), e);
    }
}

From source file:org.pentaho.reporting.platform.plugin.output.CachingPageableHTMLOutputIT.java

@Test
public void testScheduledReportContainsAllPages() throws Exception {

    ReportListenerThreadHolder.clear();//from   w w  w. jav  a 2 s.  c om
    final CountDownLatch latch1 = new CountDownLatch(1);
    final CountDownLatch latch2 = new CountDownLatch(1);
    final ExecutorService executorService = Executors.newFixedThreadPool(2);

    final SimpleReportingComponent rc = new SimpleReportingComponent();
    final ResourceManager mgr = new ResourceManager();
    final File src = new File("target/test/resource/solution/test/reporting/BigReport.prpt");
    final MasterReport masterReport = (MasterReport) mgr.createDirectly(src, MasterReport.class).getResource();
    final String key = "test";
    masterReport.setContentCacheKey(key);
    masterReport.setQueryLimit(500);
    rc.setReport(masterReport);

    rc.setOutputType("text/html"); //$NON-NLS-1$
    // turn on pagination, by way of input (typical mode for xaction)
    final HashMap<String, Object> inputs = new HashMap<String, Object>();
    inputs.put("paginate", "true"); //$NON-NLS-1$ //$NON-NLS-2$
    inputs.put("accepted-page", "0"); //$NON-NLS-1$ //$NON-NLS-2$
    rc.setInputs(inputs);

    final TestListener future1Listener = new TestListener("1", UUID.randomUUID(), "");
    future1Listener.setStatus(AsyncExecutionStatus.SCHEDULED);
    final TestListener future2Listener = new TestListener("1", UUID.randomUUID(), "");

    final Future<byte[]> future1 = executorService
            .submit(new CachingPageableHTMLOutputIT.TestTask(latch1, rc, future1Listener));
    final Future<byte[]> future2 = executorService
            .submit(new CachingPageableHTMLOutputIT.TestTask(latch2, rc, future2Listener));

    latch2.countDown();
    while (!future2.isDone()) {
        if (future2Listener.isOnFirstPage()) {
            latch1.countDown();
            break;
        }
        Thread.sleep(10);
    }

    final String content2 = new String(future2.get(), "UTF-8");
    assertFalse(content2.contains("Scheduled paginated HTML report"));

    final String content1 = new String(future1.get(), "UTF-8");
    assertTrue(content1.contains("Scheduled paginated HTML report"));

    assertEquals(future2Listener.getState().getPage(), future1Listener.getState().getTotalPages());
    assertEquals(future1Listener.getState().getPage(), future1Listener.getState().getTotalPages());
}

From source file:io.prestosql.plugin.accumulo.index.ColumnCardinalityCache.java

/**
 * Gets the cardinality for each {@link AccumuloColumnConstraint}.
 * Given constraints are expected to be indexed! Who knows what would happen if they weren't!
 *
 * @param schema Schema name/* ww w  .  j  a v a  2s. c  o m*/
 * @param table Table name
 * @param auths Scan authorizations
 * @param idxConstraintRangePairs Mapping of all ranges for a given constraint
 * @param earlyReturnThreshold Smallest acceptable cardinality to return early while other tasks complete
 * @param pollingDuration Duration for polling the cardinality completion service
 * @return An immutable multimap of cardinality to column constraint, sorted by cardinality from smallest to largest
 * @throws TableNotFoundException If the metrics table does not exist
 * @throws ExecutionException If another error occurs; I really don't even know anymore.
 */
public Multimap<Long, AccumuloColumnConstraint> getCardinalities(String schema, String table,
        Authorizations auths, Multimap<AccumuloColumnConstraint, Range> idxConstraintRangePairs,
        long earlyReturnThreshold, Duration pollingDuration) {
    // Submit tasks to the executor to fetch column cardinality, adding it to the Guava cache if necessary
    CompletionService<Pair<Long, AccumuloColumnConstraint>> executor = new ExecutorCompletionService<>(
            executorService);
    idxConstraintRangePairs.asMap().forEach((key, value) -> executor.submit(() -> {
        long cardinality = getColumnCardinality(schema, table, auths, key.getFamily(), key.getQualifier(),
                value);
        LOG.debug("Cardinality for column %s is %s", key.getName(), cardinality);
        return Pair.of(cardinality, key);
    }));

    // Create a multi map sorted by cardinality
    ListMultimap<Long, AccumuloColumnConstraint> cardinalityToConstraints = MultimapBuilder.treeKeys()
            .arrayListValues().build();
    try {
        boolean earlyReturn = false;
        int numTasks = idxConstraintRangePairs.asMap().entrySet().size();
        do {
            // Sleep for the polling duration to allow concurrent tasks to run for this time
            Thread.sleep(pollingDuration.toMillis());

            // Poll each task, retrieving the result if it is done
            for (int i = 0; i < numTasks; ++i) {
                Future<Pair<Long, AccumuloColumnConstraint>> futureCardinality = executor.poll();
                if (futureCardinality != null && futureCardinality.isDone()) {
                    Pair<Long, AccumuloColumnConstraint> columnCardinality = futureCardinality.get();
                    cardinalityToConstraints.put(columnCardinality.getLeft(), columnCardinality.getRight());
                }
            }

            // If the smallest cardinality is present and below the threshold, set the earlyReturn flag
            Optional<Entry<Long, AccumuloColumnConstraint>> smallestCardinality = cardinalityToConstraints
                    .entries().stream().findFirst();
            if (smallestCardinality.isPresent()) {
                if (smallestCardinality.get().getKey() <= earlyReturnThreshold) {
                    LOG.info("Cardinality %s, is below threshold. Returning early while other tasks finish",
                            smallestCardinality);
                    earlyReturn = true;
                }
            }
        } while (!earlyReturn && cardinalityToConstraints.entries().size() < numTasks);
    } catch (ExecutionException | InterruptedException e) {
        if (e instanceof InterruptedException) {
            Thread.currentThread().interrupt();
        }
        throw new PrestoException(UNEXPECTED_ACCUMULO_ERROR, "Exception when getting cardinality", e);
    }

    // Create a copy of the cardinalities
    return ImmutableMultimap.copyOf(cardinalityToConstraints);
}

From source file:com.facebook.presto.accumulo.index.ColumnCardinalityCache.java

/**
 * Gets the cardinality for each {@link AccumuloColumnConstraint}.
 * Given constraints are expected to be indexed! Who knows what would happen if they weren't!
 *
 * @param schema Schema name//from   ww  w.  ja  v a  2s.  com
 * @param table Table name
 * @param auths Scan authorizations
 * @param idxConstraintRangePairs Mapping of all ranges for a given constraint
 * @param earlyReturnThreshold Smallest acceptable cardinality to return early while other tasks complete
 * @param pollingDuration Duration for polling the cardinality completion service
 * @return An immutable multimap of cardinality to column constraint, sorted by cardinality from smallest to largest
 * @throws TableNotFoundException If the metrics table does not exist
 * @throws ExecutionException If another error occurs; I really don't even know anymore.
 */
public Multimap<Long, AccumuloColumnConstraint> getCardinalities(String schema, String table,
        Authorizations auths, Multimap<AccumuloColumnConstraint, Range> idxConstraintRangePairs,
        long earlyReturnThreshold, Duration pollingDuration) throws ExecutionException, TableNotFoundException {
    // Submit tasks to the executor to fetch column cardinality, adding it to the Guava cache if necessary
    CompletionService<Pair<Long, AccumuloColumnConstraint>> executor = new ExecutorCompletionService<>(
            executorService);
    idxConstraintRangePairs.asMap().forEach((key, value) -> executor.submit(() -> {
        long cardinality = getColumnCardinality(schema, table, auths, key.getFamily(), key.getQualifier(),
                value);
        LOG.debug("Cardinality for column %s is %s", key.getName(), cardinality);
        return Pair.of(cardinality, key);
    }));

    // Create a multi map sorted by cardinality
    ListMultimap<Long, AccumuloColumnConstraint> cardinalityToConstraints = MultimapBuilder.treeKeys()
            .arrayListValues().build();
    try {
        boolean earlyReturn = false;
        int numTasks = idxConstraintRangePairs.asMap().entrySet().size();
        do {
            // Sleep for the polling duration to allow concurrent tasks to run for this time
            Thread.sleep(pollingDuration.toMillis());

            // Poll each task, retrieving the result if it is done
            for (int i = 0; i < numTasks; ++i) {
                Future<Pair<Long, AccumuloColumnConstraint>> futureCardinality = executor.poll();
                if (futureCardinality != null && futureCardinality.isDone()) {
                    Pair<Long, AccumuloColumnConstraint> columnCardinality = futureCardinality.get();
                    cardinalityToConstraints.put(columnCardinality.getLeft(), columnCardinality.getRight());
                }
            }

            // If the smallest cardinality is present and below the threshold, set the earlyReturn flag
            Optional<Entry<Long, AccumuloColumnConstraint>> smallestCardinality = cardinalityToConstraints
                    .entries().stream().findFirst();
            if (smallestCardinality.isPresent()) {
                if (smallestCardinality.get().getKey() <= earlyReturnThreshold) {
                    LOG.info("Cardinality %s, is below threshold. Returning early while other tasks finish",
                            smallestCardinality);
                    earlyReturn = true;
                }
            }
        } while (!earlyReturn && cardinalityToConstraints.entries().size() < numTasks);
    } catch (ExecutionException | InterruptedException e) {
        if (e instanceof InterruptedException) {
            Thread.currentThread().interrupt();
        }
        throw new PrestoException(UNEXPECTED_ACCUMULO_ERROR, "Exception when getting cardinality", e);
    }

    // Create a copy of the cardinalities
    return ImmutableMultimap.copyOf(cardinalityToConstraints);
}

From source file:org.venice.piazza.servicecontroller.messaging.ServiceMessageWorkerTest.java

@Before
/** /*from ww w . j av  a2 s  .  c  o  m*/
 * Called for each test setup
 */
public void setup() {
    // Setup a Service with some Resource Metadata
    rm = new ResourceMetadata();
    rm.name = "toUpper Params";
    rm.description = "Service to convert string to uppercase";

    service = new Service();
    service.method = "POST";
    service.setResourceMetadata(rm);
    service.setUrl("http://localhost:8082/string/toUpper");

    // Create the executeService Job
    ExecuteServiceJob esJob = new ExecuteServiceJob();
    // Setup valid data
    ExecuteServiceData edata = new ExecuteServiceData();
    String serviceId = "a842aae2-bd74-4c4b-9a65-c45e8cd9060f";
    edata.setServiceId(serviceId);
    TextDataType dataType = new TextDataType();
    dataType.mimeType = "application/json";
    List<DataType> dataTypes = new ArrayList<>();
    dataTypes.add(dataType);
    edata.setDataOutput(dataTypes);
    // Now tie the data to the job
    esJob.data = edata;
    validJob = new Job();
    validJob.jobId = "b842aae2-ed70-5c4b-9a65-c45e8cd9060g";
    validJob.jobType = esJob;

    // Mock the Kafka response that Producers will send. This will always
    // return a Future that completes immediately and simply returns true.
    Mockito.when(producerMock.send(isA(ProducerRecord.class))).thenAnswer(new Answer<Future<Boolean>>() {
        @Override
        public Future<Boolean> answer(InvocationOnMock invocation) throws Throwable {
            Future<Boolean> future = Mockito.mock(FutureTask.class);
            Mockito.when(future.isDone()).thenReturn(true);
            Mockito.when(future.get()).thenReturn(true);
            return future;
        }
    });

    MockitoAnnotations.initMocks(this);

}

From source file:org.jasig.schedassist.web.owner.relationships.CSVFileImportFormController.java

/**
 * //from w  ww .ja  va 2  s . c  om
 * @param request
 * @param dismiss
 * @return
 * @throws InterruptedException
 * @throws ExecutionException
 * @throws NotRegisteredException 
 */
@RequestMapping(method = RequestMethod.GET)
@SuppressWarnings("unchecked")
protected String showForm(final ModelMap model, final HttpServletRequest request,
        @RequestParam(value = "dismiss", required = false, defaultValue = "false") final boolean dismiss)
        throws InterruptedException, ExecutionException, NotRegisteredException {
    //CalendarAccountUserDetails currentUser = (CalendarAccountUserDetails) SecurityContextHolder.getContext().getAuthentication().getPrincipal();
    //IScheduleOwner owner = currentUser.getScheduleOwner();

    HttpSession currentSession = request.getSession();
    if (null != currentSession) {
        if (dismiss) {
            // remove the future from the session
            currentSession.setAttribute(IMPORT_FUTURE_NAME, null);
        }
        Future<CSVFileImportResult> f = (Future<CSVFileImportResult>) currentSession
                .getAttribute(IMPORT_FUTURE_NAME);
        if (null != f) {
            if (f.isDone()) {
                CSVFileImportResult importResult = f.get();
                model.addAttribute("processing", false);
                model.addAttribute("importResult", importResult);
                currentSession.setAttribute(IMPORT_FUTURE_NAME, null);
                return getStatusViewName();
            } else {
                model.addAttribute("processing", true);
                return getStatusViewName();
            }
        }
    }
    // no upload being processed, display form
    return getFormViewName();
}

From source file:org.apache.storm.grouping.LoadAwareShuffleGroupingTest.java

private void runMultithreadedBenchmark(LoadAwareCustomStreamGrouping grouper, List<Integer> availableTaskIds,
        LoadMapping loadMapping, int numThreads) throws InterruptedException, ExecutionException {
    // Task Id not used, so just pick a static value
    final int inputTaskId = 100;

    final WorkerTopologyContext context = mockContext(availableTaskIds);

    // Call prepare with our available taskIds
    grouper.prepare(context, null, availableTaskIds);

    // periodically calls refreshLoad in 1 sec to simulate worker load update timer
    ScheduledExecutorService refreshService = MoreExecutors
            .getExitingScheduledExecutorService(new ScheduledThreadPoolExecutor(1));
    refreshService.scheduleAtFixedRate(() -> grouper.refreshLoad(loadMapping), 1, 1, TimeUnit.SECONDS);

    long current = System.currentTimeMillis();
    int idx = 0;//from w w w . j a v a  2s  .co m
    while (true) {
        grouper.chooseTasks(inputTaskId, Lists.newArrayList());

        idx++;
        if (idx % 100000 == 0) {
            // warm up 60 seconds
            if (System.currentTimeMillis() - current >= 60_000) {
                break;
            }
        }
    }

    final int groupingExecutionsPerThread = 2_000_000_000;

    List<Callable<Long>> threadTasks = Lists.newArrayList();
    for (int x = 0; x < numThreads; x++) {
        Callable<Long> threadTask = new Callable<Long>() {
            @Override
            public Long call() throws Exception {
                long current = System.currentTimeMillis();
                for (int i = 1; i <= groupingExecutionsPerThread; i++) {
                    grouper.chooseTasks(inputTaskId, Lists.newArrayList());
                }
                return System.currentTimeMillis() - current;
            }
        };

        // Add to our collection.
        threadTasks.add(threadTask);
    }

    ExecutorService executor = Executors.newFixedThreadPool(threadTasks.size());
    List<Future<Long>> taskResults = executor.invokeAll(threadTasks);

    // Wait for all tasks to complete
    Long maxDurationMillis = 0L;
    for (Future taskResult : taskResults) {
        while (!taskResult.isDone()) {
            Thread.sleep(100);
        }
        Long durationMillis = (Long) taskResult.get();
        if (maxDurationMillis < durationMillis) {
            maxDurationMillis = durationMillis;
        }
    }

    LOG.info("Max duration among threads is : {} ms", maxDurationMillis);

    refreshService.shutdownNow();
}

From source file:com.kenshoo.freemarker.services.FreeMarkerService.java

/**
 * @param templateSourceCode//from   w ww.  j av a2  s.c  om
 *            The FTL to execute; not {@code null}.
 * @param dataModel
 *            The FreeMarker data-model to execute the template with; maybe {@code null}.
 * @param outputFormat
 *            The output format to execute the template with; maybe {@code null}.
 * @param locale
 *            The locale to execute the template with; maybe {@code null}.
 * @param timeZone
 *            The time zone to execute the template with; maybe {@code null}.
 * 
 * @return The result of the template parsing and evaluation. The method won't throw exception if that fails due to
 *         errors in the template provided, instead it indicates this fact in the response object. That's because
 *         this is a service for trying out the template language, so such errors are part of the normal operation.
 * 
 * @throws RejectedExecutionException
 *             If the service is overburden and thus doing the calculation was rejected.
 * @throws FreeMarkerServiceException
 *             If the calculation fails from a reason that's not a mistake in the template and doesn't fit the
 *             meaning of {@link RejectedExecutionException} either.
 */
public FreeMarkerServiceResponse calculateTemplateOutput(String templateSourceCode, Object dataModel,
        OutputFormat outputFormat, Locale locale, TimeZone timeZone) throws RejectedExecutionException {
    Objects.requireNonNull(templateExecutor, "templateExecutor was null - was postConstruct ever called?");

    final CalculateTemplateOutput task = new CalculateTemplateOutput(templateSourceCode, dataModel,
            outputFormat, locale, timeZone);
    Future<FreeMarkerServiceResponse> future = templateExecutor.submit(task);

    synchronized (task) {
        while (!task.isTemplateExecutionStarted() && !task.isTaskEnded() && !future.isDone()) {
            try {
                task.wait(50); // Timeout is needed to periodically check future.isDone()
            } catch (InterruptedException e) {
                throw new FreeMarkerServiceException("Template execution task was interrupted.", e);
            }
        }
    }

    try {
        return future.get(maxTemplateExecutionTime, TimeUnit.MILLISECONDS);
    } catch (ExecutionException e) {
        throw new FreeMarkerServiceException("Template execution task unexpectedly failed", e.getCause());
    } catch (InterruptedException e) {
        throw new FreeMarkerServiceException("Template execution task was interrupted.", e);
    } catch (TimeoutException e) {
        // Exactly one interruption should be enough, and it should abort template processing pretty much
        // immediately. But to be on the safe side we will interrupt in a loop, with a timeout.
        final long abortionLoopStartTime = System.currentTimeMillis();
        long timeLeft = ABORTION_LOOP_TIME_LIMIT;
        boolean templateExecutionEnded = false;
        do {
            synchronized (task) {
                Thread templateExecutorThread = task.getTemplateExecutorThread();
                if (templateExecutorThread == null) {
                    templateExecutionEnded = true;
                } else {
                    FreeMarkerInternalsAccessor.interruptTemplateProcessing(templateExecutorThread);
                    logger.debug(
                            "Trying to interrupt overly long template processing (" + timeLeft + " ms left).");
                }
            }
            if (!templateExecutionEnded) {
                try {
                    timeLeft = ABORTION_LOOP_TIME_LIMIT - (System.currentTimeMillis() - abortionLoopStartTime);
                    if (timeLeft > 0) {
                        Thread.sleep(ABORTION_LOOP_INTERRUPTION_DISTANCE);
                    }
                } catch (InterruptedException eInt) {
                    logger.error("Template execution abortion loop was interrupted", eInt);
                    timeLeft = 0;
                }
            }
        } while (!templateExecutionEnded && timeLeft > 0);

        if (templateExecutionEnded) {
            logger.debug("Long template processing has ended.");
            try {
                return future.get();
            } catch (InterruptedException | ExecutionException e1) {
                throw new FreeMarkerServiceException("Failed to get result from template executor task", e);
            }
        } else {
            throw new FreeMarkerServiceException(
                    "Couldn't stop long running template processing within " + ABORTION_LOOP_TIME_LIMIT
                            + " ms. It's possibly stuck forever. Such problems can exhaust the executor pool. "
                            + "Template (quoted): " + StringEscapeUtils.escapeJava(templateSourceCode));
        }
    }
}

From source file:org.grouplens.lenskit.eval.script.ConfigMethodInvoker.java

public Object invokeConfigurationMethod(final Object target, final String name, Object... args) {
    Preconditions.checkNotNull(target, "target object");

    if (args.length == 1 && args[0] instanceof Future) {
        Future<?> f = (Future<?>) args[0];
        if (f.isDone()) {
            try {
                Object arg = f.get();
                return invokeConfigurationMethod(target, name, arg);
            } catch (InterruptedException e) {
                throw new RuntimeException("interrupted waiting for dependency", e);
            } catch (ExecutionException e) {
                throw new RuntimeException(e.getCause());
            }//from   w  w w.ja va2  s  . c  om
        } else {
            Function<Object, Object> recur = new Function<Object, Object>() {
                @Nullable
                @Override
                public Object apply(@Nullable Object input) {
                    return invokeConfigurationMethod(target, name, input);
                }
            };
            ListenableFuture<?> f2 = Futures.transform(listenInPoolThread(f), recur);
            registerDep(target, f2);
            return f2;
        }
    }

    final String setterName = "set" + StringUtils.capitalize(name);
    final String adderName = "add" + StringUtils.capitalize(name);
    Supplier<Object> inv;
    // directly invoke
    inv = findMethod(target, name, args);
    if (inv == null) {
        inv = findBuildableMethod(target, name, args);
    }
    // invoke a setter
    if (inv == null) {
        inv = findMethod(target, setterName, args);
    }
    // invoke a buildable setter
    if (inv == null) {
        inv = findBuildableMethod(target, setterName, args);
    }
    // invoke an adder
    if (inv == null) {
        inv = findMethod(target, adderName, args);
    }
    // add from a list
    if (inv == null) {
        inv = findMultiMethod(target, adderName, args);
    }
    // invoke a buildable adder
    if (inv == null) {
        inv = findBuildableMethod(target, adderName, args);
    }

    if (inv != null) {
        return inv.get();
    } else {
        // try to invoke the method directly
        return DefaultGroovyMethods.invokeMethod(target, name, args);
    }

}