Example usage for java.util.concurrent Future cancel

List of usage examples for java.util.concurrent Future cancel

Introduction

In this page you can find the example usage for java.util.concurrent Future cancel.

Prototype

boolean cancel(boolean mayInterruptIfRunning);

Source Link

Document

Attempts to cancel execution of this task.

Usage

From source file:org.pentaho.reporting.platform.plugin.async.AsyncIT.java

@Test
public void testCancelNotInterruptable() {

    try {//from  w ww  . j ava  2  s  .c  om

        final CountDownLatch latch = new CountDownLatch(1);

        final PentahoAsyncExecutor exec = new PentahoAsyncExecutor(10, 0);
        final PentahoAsyncReportExecution pentahoAsyncReportExecution = new PentahoAsyncReportExecution(
                "junit-path", component, handler, session, "not null", AuditWrapper.NULL) {

            @Override
            public IFixedSizeStreamingContent call() throws Exception {
                latch.await();
                return new NullSizeStreamingContent();
            }
        };

        final UUID uuid = UUID.randomUUID();

        exec.addTask(pentahoAsyncReportExecution, session, uuid);

        final Future future = exec.getFuture(uuid, session);

        final AbstractReportProcessor processor = mock(AbstractReportProcessor.class);

        ReportProcessorThreadHolder.setProcessor(processor);

        future.cancel(false);

        pentahoAsyncReportExecution.getListener().reportProcessingUpdate(
                new ReportProgressEvent(this, ReportProgressEvent.PAGINATING, 0, 0, 0, 0, 0, 0));

        verify(processor, never()).cancel();

        latch.countDown();
    } finally {
        ReportProcessorThreadHolder.clear();
    }
}

From source file:org.apache.phoenix.cache.ServerCacheClient.java

public ServerCache addServerCache(ScanRanges keyRanges, final ImmutableBytesWritable cachePtr,
        final ServerCacheFactory cacheFactory, final TableRef cacheUsingTableRef) throws SQLException {
    ConnectionQueryServices services = connection.getQueryServices();
    MemoryChunk chunk = services.getMemoryManager().allocate(cachePtr.getLength());
    List<Closeable> closeables = new ArrayList<Closeable>();
    closeables.add(chunk);//from w  w w . j a v a  2 s . c  om
    ServerCache hashCacheSpec = null;
    SQLException firstException = null;
    final byte[] cacheId = generateId();
    /**
     * Execute EndPoint in parallel on each server to send compressed hash cache 
     */
    // TODO: generalize and package as a per region server EndPoint caller
    // (ideally this would be functionality provided by the coprocessor framework)
    boolean success = false;
    ExecutorService executor = services.getExecutor();
    List<Future<Boolean>> futures = Collections.emptyList();
    try {
        PTable cacheUsingTable = cacheUsingTableRef.getTable();
        List<HRegionLocation> locations = services
                .getAllTableRegions(cacheUsingTable.getPhysicalName().getBytes());
        int nRegions = locations.size();
        // Size these based on worst case
        futures = new ArrayList<Future<Boolean>>(nRegions);
        Set<HRegionLocation> servers = new HashSet<HRegionLocation>(nRegions);
        for (HRegionLocation entry : locations) {
            // Keep track of servers we've sent to and only send once
            byte[] regionStartKey = entry.getRegionInfo().getStartKey();
            byte[] regionEndKey = entry.getRegionInfo().getEndKey();
            if (!servers.contains(entry) && keyRanges.intersects(regionStartKey, regionEndKey,
                    cacheUsingTable.getIndexType() == IndexType.LOCAL
                            ? ScanUtil.getRowKeyOffset(regionStartKey, regionEndKey)
                            : 0,
                    true)) {
                // Call RPC once per server
                servers.add(entry);
                if (LOG.isDebugEnabled()) {
                    LOG.debug(addCustomAnnotations("Adding cache entry to be sent for " + entry, connection));
                }
                final byte[] key = entry.getRegionInfo().getStartKey();
                final HTableInterface htable = services
                        .getTable(cacheUsingTableRef.getTable().getPhysicalName().getBytes());
                closeables.add(htable);
                futures.add(executor.submit(new JobCallable<Boolean>() {

                    @Override
                    public Boolean call() throws Exception {
                        final Map<byte[], AddServerCacheResponse> results;
                        try {
                            results = htable.coprocessorService(ServerCachingService.class, key, key,
                                    new Batch.Call<ServerCachingService, AddServerCacheResponse>() {
                                        @Override
                                        public AddServerCacheResponse call(ServerCachingService instance)
                                                throws IOException {
                                            ServerRpcController controller = new ServerRpcController();
                                            BlockingRpcCallback<AddServerCacheResponse> rpcCallback = new BlockingRpcCallback<AddServerCacheResponse>();
                                            AddServerCacheRequest.Builder builder = AddServerCacheRequest
                                                    .newBuilder();
                                            if (connection.getTenantId() != null) {
                                                builder.setTenantId(
                                                        ByteStringer.wrap(connection.getTenantId().getBytes()));
                                            }
                                            builder.setCacheId(ByteStringer.wrap(cacheId));
                                            builder.setCachePtr(
                                                    org.apache.phoenix.protobuf.ProtobufUtil.toProto(cachePtr));
                                            ServerCacheFactoryProtos.ServerCacheFactory.Builder svrCacheFactoryBuider = ServerCacheFactoryProtos.ServerCacheFactory
                                                    .newBuilder();
                                            svrCacheFactoryBuider
                                                    .setClassName(cacheFactory.getClass().getName());
                                            builder.setCacheFactory(svrCacheFactoryBuider.build());
                                            instance.addServerCache(controller, builder.build(), rpcCallback);
                                            if (controller.getFailedOn() != null) {
                                                throw controller.getFailedOn();
                                            }
                                            return rpcCallback.get();
                                        }
                                    });
                        } catch (Throwable t) {
                            throw new Exception(t);
                        }
                        if (results != null && results.size() == 1) {
                            return results.values().iterator().next().getReturn();
                        }
                        return false;
                    }

                    /**
                     * Defines the grouping for round robin behavior.  All threads spawned to process
                     * this scan will be grouped together and time sliced with other simultaneously
                     * executing parallel scans.
                     */
                    @Override
                    public Object getJobId() {
                        return ServerCacheClient.this;
                    }

                    @Override
                    public TaskExecutionMetricsHolder getTaskExecutionMetric() {
                        return NO_OP_INSTANCE;
                    }
                }));
            } else {
                if (LOG.isDebugEnabled()) {
                    LOG.debug(addCustomAnnotations("NOT adding cache entry to be sent for " + entry
                            + " since one already exists for that entry", connection));
                }
            }
        }

        hashCacheSpec = new ServerCache(cacheId, servers, cachePtr.getLength());
        // Execute in parallel
        int timeoutMs = services.getProps().getInt(QueryServices.THREAD_TIMEOUT_MS_ATTRIB,
                QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS);
        for (Future<Boolean> future : futures) {
            future.get(timeoutMs, TimeUnit.MILLISECONDS);
        }

        cacheUsingTableRefMap.put(Bytes.mapKey(cacheId), cacheUsingTableRef);
        success = true;
    } catch (SQLException e) {
        firstException = e;
    } catch (Exception e) {
        firstException = new SQLException(e);
    } finally {
        try {
            if (!success) {
                SQLCloseables.closeAllQuietly(Collections.singletonList(hashCacheSpec));
                for (Future<Boolean> future : futures) {
                    future.cancel(true);
                }
            }
        } finally {
            try {
                Closeables.closeAll(closeables);
            } catch (IOException e) {
                if (firstException == null) {
                    firstException = new SQLException(e);
                }
            } finally {
                if (firstException != null) {
                    throw firstException;
                }
            }
        }
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug(addCustomAnnotations("Cache " + cacheId + " successfully added to servers.", connection));
    }
    return hashCacheSpec;
}

From source file:org.apache.syncope.core.provisioning.java.ConnectorFacadeProxy.java

@Override
public ConnectorObject getObject(final ObjectClass objectClass, final Uid uid, final OperationOptions options) {
    Future<ConnectorObject> future = null;

    if (connInstance.getCapabilities().contains(ConnectorCapability.SEARCH)) {
        future = asyncFacade.getObject(connector, objectClass, uid, options);
    } else {/* w  w  w .  jav a2s .com*/
        LOG.info("Search was attempted, although the connector only has these capabilities: {}. No action.",
                connInstance.getCapabilities());
    }

    try {
        return future == null ? null : future.get(connInstance.getConnRequestTimeout(), TimeUnit.SECONDS);
    } catch (java.util.concurrent.TimeoutException e) {
        if (future != null) {
            future.cancel(true);
        }
        throw new TimeoutException("Request timeout");
    } catch (Exception e) {
        LOG.error("Connector request execution failure", e);
        if (e.getCause() instanceof RuntimeException) {
            throw (RuntimeException) e.getCause();
        } else {
            throw new IllegalArgumentException(e.getCause());
        }
    }
}

From source file:com.github.lindenb.mscheduler.MScheduler.java

protected int updateJobStatus(final Task task) {
    final StatusChecker call = createStatusChecker(task);
    final ExecutorService executor = Executors.newSingleThreadExecutor();
    final Future<Integer> future = executor.submit(call);
    int return_status = -1;

    try {/*from   ww  w .ja  v  a2  s  . c  om*/
        //allow 10 seconds to get status
        return_status = future.get(10, TimeUnit.SECONDS);

        return return_status;
    } catch (TimeoutException e) {
        future.cancel(true);
        LOG.error("Timeout for gettting job status and " + task);
        return -1;
    } catch (Exception e) {
        future.cancel(true);
        LOG.error("Failure:", e);
        return -1;
    } finally {
        executor.shutdown();
    }
}

From source file:com.baifendian.swordfish.execserver.runner.flow.FlowRunner.java

/**
 * kill //from ww w.  j  a va 2s . c  om
 */
private void kill() {
    synchronized (this) {
        if (activeNodeRunners.isEmpty()) {
            return;
        }

        logger.info("Kill has been called on exec id: {}, num: {}", executionFlow.getId(),
                activeNodeRunners.size());

        // ?
        for (Map.Entry<NodeRunner, Future<Boolean>> entry : activeNodeRunners.entrySet()) {
            NodeRunner nodeRunner = entry.getKey();
            Future<Boolean> future = entry.getValue();

            if (!future.isDone()) {
                //  kill ?
                logger.info("kill exec, id: {}, node: {}", executionFlow.getId(), nodeRunner.getNodename());

                // ?
                nodeRunner.kill();

                // , 
                future.cancel(true);
            }
        }
    }
}

From source file:de.unisb.cs.st.javalanche.mutation.runtime.testDriver.MutationTestDriver.java

/**
 * This method tries to stop a thread by disabling the current mutation.
 * This method is called when a thread that executes a mutation does not
 * return, e.g it is stuck in an endless loop.
 * //w ww . j  ava 2 s .c  o m
 * @param future
 *            the future that executes the mutation
 */
private void switchOfMutation(Future<?> future) {
    String message1 = "Could not kill thread for mutation: " + currentMutation;
    logger.info(message1 + " - Switching mutation of");
    if (mutationSwitcher != null) {
        mutationSwitcher.switchOff();
    }
    future.cancel(true);
    try {
        logger.info("Sleeping   ");
        Thread.sleep(configuration.getTimeoutInSeconds());
    } catch (InterruptedException e) {
        e.printStackTrace();
    }
}

From source file:com.microsoft.azure.AzureVMCloud.java

/**
 * Wait till a node that connects through JNLP comes online and connects to
 * Jenkins./*from  w  ww . j  av  a  2 s  .co  m*/
 *
 * @param agent Node to wait for
 * @throws Exception Throws if the wait time expires or other exception
 * happens.
 */
private void waitUntilJNLPNodeIsOnline(final AzureVMAgent agent) throws Exception {
    LOGGER.log(Level.INFO, "Azure Cloud: waitUntilOnline: for agent {0}", agent.getDisplayName());
    Callable<String> callableTask = new Callable<String>() {

        @Override
        public String call() {
            try {
                Computer computer = agent.toComputer();
                if (computer != null)
                    computer.waitUntilOnline();
            } catch (InterruptedException e) {
                // just ignore
            }
            return "success";
        }
    };
    Future<String> future = getThreadPool().submit(callableTask);

    try {
        // 30 minutes is decent time for the node to be alive
        String result = future.get(30, TimeUnit.MINUTES);
        LOGGER.log(Level.INFO, "Azure Cloud: waitUntilOnline: node is alive , result {0}", result);
    } catch (Exception ex) {
        throw new AzureCloudException("Azure Cloud: waitUntilOnline: Failure waiting till online", ex);
    } finally {
        future.cancel(true);
    }
}

From source file:com.ebay.pulsar.analytics.cache.MemcachedCache.java

@Override
public byte[] get(NamedKey key) {
    Future<Object> future;
    long start = System.nanoTime();
    try {/*from w w w . j a  va 2s.  com*/
        future = client.asyncGet(computeKeyHash(memcachedPrefix, key));
    } catch (IllegalStateException e) {
        // operation did not get queued in time (queue is full)
        errorCount.incrementAndGet();
        logger.warn("Unable to queue cache operation: " + e.getMessage());
        return null;
    }
    try {
        byte[] bytes = (byte[]) future.get(timeout, TimeUnit.MILLISECONDS);
        cacheGetTime.addAndGet(System.nanoTime() - start);
        if (bytes != null) {
            getBytes.addAndGet(bytes.length);
            hitCount.incrementAndGet();
        } else {
            missCount.incrementAndGet();
        }
        return bytes == null ? null : deserializeValue(key, bytes);
    } catch (TimeoutException e) {
        timeoutCount.incrementAndGet();
        future.cancel(false);
        return null;
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        throw Throwables.propagate(e);
    } catch (ExecutionException e) {
        errorCount.incrementAndGet();
        logger.warn("Exception pulling item from cache: " + e.getMessage());
        return null;
    }
}

From source file:com.rapid7.diskstorage.dynamodb.DynamoDBDelegate.java

public void parallelMutate(List<MutateWorker> workers) throws BackendException {
    CompletionService<Void> completion = new ExecutorCompletionService<>(clientThreadPool);
    List<Future<Void>> futures = Lists.newLinkedList();
    for (MutateWorker worker : workers) {
        futures.add(completion.submit(worker));
    }//from   ww w  .  j a  v a 2s .co m

    //block on the futures all getting or throwing instead of using a latch as i need to check future status anyway
    boolean interrupted = false;
    try {
        for (int i = 0; i < workers.size(); i++) {
            try {
                completion.take().get(); //Void
            } catch (InterruptedException e) {
                interrupted = true;
                // fail out because titan does not poll this thread for interrupted anywhere
                throw new BackendRuntimeException("was interrupted during parallelMutate");
            } catch (ExecutionException e) {
                throw unwrapExecutionException(e, MUTATE_ITEM);
            }
        }
    } finally {
        for (Future<Void> future : futures) {
            if (!future.isDone()) {
                future.cancel(interrupted /* mayInterruptIfRunning */);
            }
        }
        if (interrupted) {
            // set interrupted on this thread
            Thread.currentThread().interrupt();
        }
    }
}

From source file:com.amazon.janusgraph.diskstorage.dynamodb.DynamoDBDelegate.java

public void parallelMutate(List<MutateWorker> workers) throws BackendException {
    CompletionService<Void> completion = new ExecutorCompletionService<>(clientThreadPool);
    List<Future<Void>> futures = Lists.newLinkedList();
    for (MutateWorker worker : workers) {
        futures.add(completion.submit(worker));
    }//from w  w  w .  j a  va2 s.  co  m

    //block on the futures all getting or throwing instead of using a latch as i need to check future status anyway
    boolean interrupted = false;
    try {
        for (int i = 0; i < workers.size(); i++) {
            try {
                completion.take().get(); //Void
            } catch (InterruptedException e) {
                interrupted = true;
                // fail out because janusgraph does not poll this thread for interrupted anywhere
                throw new BackendRuntimeException("was interrupted during parallelMutate");
            } catch (ExecutionException e) {
                throw unwrapExecutionException(e, MUTATE_ITEM);
            }
        }
    } finally {
        for (Future<Void> future : futures) {
            if (!future.isDone()) {
                future.cancel(interrupted /* mayInterruptIfRunning */);
            }
        }
        if (interrupted) {
            // set interrupted on this thread
            Thread.currentThread().interrupt();
        }
    }
}