Example usage for java.util.concurrent Future cancel

List of usage examples for java.util.concurrent Future cancel

Introduction

In this page you can find the example usage for java.util.concurrent Future cancel.

Prototype

boolean cancel(boolean mayInterruptIfRunning);

Source Link

Document

Attempts to cancel execution of this task.

Usage

From source file:org.wicketstuff.nashorn.resource.NashornResource.java

/**
 * Executes a given script callable and the corresponding script
 * /*from  w  w  w .j a va 2  s  . c  om*/
 * @param executeScript
 *            the script callable to execute
 * @param watch
 *            if the script execution should be watched
 * @return the script result
 * @throws Exception
 */
private Object executeScript(NashornScriptCallable executeScript, boolean watch) throws Exception {
    Future<Object> scriptTask = scheduledExecutorService.submit(executeScript);
    if (watch && waitUnit != null) {
        scheduledExecutorService.execute(new NashornMemoryWatcher(executeScript, scriptTask, wait, waitUnit,
                maxScriptMemorySize, isDebug(), getErrorWriter()));
    }
    scheduledExecutorService.schedule(() -> {
        scriptTask.cancel(true);
    }, this.delay, this.delayUnit);
    return scriptTask.get();
}

From source file:com.cloudera.oryx.kmeans.computation.local.WeightedPointsByFold.java

@Override
public List<List<WeightedRealVector>> call() throws InterruptedException, ExecutionException {
    Config config = ConfigUtils.getDefaultConfig();
    ClusterSettings cluster = ClusterSettings.create(config);
    KSketchIndex index = buildIndex(foldVecs, cluster);
    int pointsPerIteration = cluster.getSketchPoints();
    RandomGenerator random = RandomManager.getRandom();

    ListeningExecutorService exec = MoreExecutors
            .listeningDecorator(Executors.newFixedThreadPool(config.getInt("model.parallelism"),
                    new ThreadFactoryBuilder().setNameFormat("KSKETCH-%d").setDaemon(true).build()));
    for (int iter = 0; iter < cluster.getSketchIterations(); iter++) {
        log.info("Starting sketch iteration {}", iter + 1);
        List<ListenableFuture<Collection<RealVector>>> futures = Lists.newArrayList();
        for (int foldId = 0; foldId < foldVecs.size(); foldId++) {
            futures.add(exec//from  ww w  .  ja  v  a2  s  . co m
                    .submit(new SamplingRun(index, random, foldId, foldVecs.get(foldId), pointsPerIteration)));
        }
        // At the end of each iteration, gather up the sampled points to add to the index
        Future<List<Collection<RealVector>>> all = Futures.allAsList(futures);
        try {
            List<Collection<RealVector>> newSamples = all.get();
            for (int foldId = 0; foldId < foldVecs.size(); foldId++) {
                for (RealVector v : newSamples.get(foldId)) {
                    index.add(v, foldId);
                }
            }
        } catch (ExecutionException e) {
            ExecutorUtils.shutdownNowAndAwait(exec);
            all.cancel(true);
            throw e;
        }
        index.rebuildIndices();
    }

    List<ListenableFuture<List<WeightedRealVector>>> ret = Lists.newArrayList();
    for (int foldId = 0; foldId < foldVecs.size(); foldId++) {
        ret.add(exec.submit(new AssignmentRun(index, foldId, foldVecs.get(foldId))));
    }
    try {
        return Futures.allAsList(ret).get();
    } finally {
        ExecutorUtils.shutdownNowAndAwait(exec);
    }
}

From source file:gda.scan.ConcurrentScanChild.java

/**
 * Asynchronously, readout detectors using parallel threads into ScanDataPoint and add to pipeline for possible
 * completion and publishing. Call {@link ConcurrentScanChild#waitForDetectorReadoutAndPublishCompletion()} to wait
 * for this task to complete, or {@link #cancelReadoutAndPublishCompletion()} to cancel and interrupt it.
 * <p>/* w w  w .ja  v  a 2s  .  c om*/
 * If the property {@link LocalProperties#GDA_SCAN_CONCURRENTSCAN_READOUT_CONCURRENTLY} is its default false value
 * then simply block while reading out each detector in series and then adding the ScanDataPoint to the pipeline.
 * 
 * @param point
 * @throws Exception
 */
@Override
protected void readoutDetectorsAndPublish(final ScanDataPoint point) throws Exception {

    final boolean lastPointInLine = (getPointPositionInLine() == PointPositionInLine.LAST); // latch value

    if (!isReadoutConcurrent()) {
        super.readoutDetectorsAndPublish(point);
        return;
    }

    // Make sure the previous point has read been published
    // (If the scan contains a detector this method will already have been called)
    waitForDetectorReadoutAndPublishCompletion();

    final String threadName = "ConcurrentScanChild.readoutDetectorsAndPublish(point '" + point.toString()
            + "')";
    detectorReadoutTask = new FutureTask<Void>(new Callable<Void>() {

        List<Future<Object>> readoutTasks;

        /**
         * Readout each detector in a thread, add the resulting data to the ScanDataPoint and publish.
         */
        @Override
        public Void call() throws Exception {

            try {
                Vector<Detector> detectors = point.getDetectors();

                // if there are detectors then readout in parallel threads
                if (detectors.size() != 0) {

                    readoutTasks = new ArrayList<Future<Object>>(detectors.size());

                    // Start readout tasks
                    for (Detector detector : point.getDetectors()) {
                        FutureTask<Object> readoutTask = new FutureTask<Object>(new ReadoutDetector(detector));
                        new Thread(readoutTask, threadName + ": readout '" + detector.getName() + "'").start();
                        readoutTasks.add(readoutTask);
                    }

                    // Wait for readout results and put into point
                    for (int i = 0; i < detectors.size(); i++) {
                        checkThreadInterrupted();
                        Object data = readoutTasks.get(i).get();
                        point.addDetectorData(data, ScannableUtils.getExtraNamesFormats(detectors.get(i)));
                    }

                }

                // Put point onto pipeline
                checkThreadInterrupted(); // probably voodoo and not required here
                scanDataPointPipeline.put(point); // may block
                checkThreadInterrupted(); // probably voodoo and not required here

                // The main scan thread cannot call atPointEnd (and subsequently atPointStart) in the correct order
                // with respect to readout so call these here instead.

                for (Detector detector : detectors) {
                    detector.atPointEnd();
                }

                // unless this is the last point in the line, call atPointStart hooks for the next point (the one
                // that main scan thread is now working on.
                if (!lastPointInLine) {
                    for (Detector detector : detectors) {
                        detector.atPointStart();
                    }
                }

            } catch (Exception e) {
                // could be the normal result of cancelling this task
                // (detector.readout() unfortunately doesn't distinguish InteruptedException from DeviceException
                logger.info("'" + representThrowable(e)
                        + "' --- while reading out detectors. *Canceling any remaining readout tasks.*");
                for (Future<Object> task : readoutTasks) {
                    task.cancel(true);
                }
                throw e;
            }
            return null;
        }
    });

    new Thread(detectorReadoutTask, threadName).start();
}

From source file:com.epam.reportportal.apache.http.impl.conn.PoolingHttpClientConnectionManager.java

public ConnectionRequest requestConnection(final HttpRoute route, final Object state) {
    Args.notNull(route, "HTTP route");
    if (this.log.isDebugEnabled()) {
        this.log.debug("Connection request: " + format(route, state) + formatStats(route));
    }//from  w  w w  .j  a va  2  s.  c o  m
    final Future<CPoolEntry> future = this.pool.lease(route, state, null);
    return new ConnectionRequest() {

        public boolean cancel() {
            return future.cancel(true);
        }

        public HttpClientConnection get(final long timeout, final TimeUnit tunit)
                throws InterruptedException, ExecutionException, ConnectionPoolTimeoutException {
            return leaseConnection(future, timeout, tunit);
        }

    };

}

From source file:com.intel.cosbench.driver.service.MissionHandler.java

public void abort() {
    String id = missionContext.getId();
    Future<?> future = missionContext.getFuture();
    /* for strong consistency: a lock should be employed here */
    if (future != null) {
        if (future.isCancelled())
            return; // already aborted
        if (future.cancel(true))
            return; // abort request submitted
    }/*from  www .  ja va2 s  .  c om*/
    if (isStopped(missionContext.getState())) {
        LOGGER.warn("mission {} not aborted as it is already stopped", id);
        return; // do nothing -- it is already stopped
    }
    missionContext.setState(ABORTED); // abort it directly
    LOGGER.info("mission {} has been aborted successfully", id);
}

From source file:com.uwindsor.elgg.project.http.AsyncHttpClient.java

/**
 * Cancels any pending (or potentially active) requests associated with the passed Context.
 * <p>/*  www .  ja  v  a2s . com*/
 * <b>Note:</b> This will only affect requests which were created with a non-null android Context. This method is
 * intended to be used in the onDestroy method of your android activities to destroy all requests which are no
 * longer required.
 * 
 * @param context
 *            the android Context instance associated to the request.
 * @param mayInterruptIfRunning
 *            specifies if active requests should be cancelled along with pending requests.
 */
public void cancelRequests(Context context, boolean mayInterruptIfRunning) {

    List<WeakReference<Future<?>>> requestList = requestMap.get(context);
    if (requestList != null) {
        for (WeakReference<Future<?>> requestRef : requestList) {
            Future<?> request = requestRef.get();
            if (request != null) {
                request.cancel(mayInterruptIfRunning);
            }
        }
    }
    requestMap.remove(context);
}

From source file:fi.hip.sicx.jclouds.JCloudClient.java

/**
 * Cancels reading of the data from the cloud. After the reading has been cancelled, reading from the InputStream
 * will not be possible./*  w w w. jav  a  2  s .c  om*/
 * 
 * @return true if cancel success, otherwise false
 */
public boolean cancelReadData() {
    if (this.cis != null) {
        for (Future<Blob> future : this.futures) {
            future.cancel(true);
        }
        try {
            this.cis.close();
        } catch (IOException e) {
            e.printStackTrace();
            return false;
        }
    }
    return true;
}

From source file:es.upv.grycap.coreutils.fiber.test.FetchCancellationTest.java

@Test
public void testFetch() throws Exception {
    // create fetcher
    final HttpDataFetcher fetcher = new HttpDataFetcher(2);
    assertThat("Fetcher was created", fetcher, notNullValue());

    // create output folder
    final File outDir = tmpFolder.newFolder(randomAlphanumeric(12));
    assertThat("Output dir was created", outDir, notNullValue());
    assertThat("Output dir is writable", outDir.canWrite());

    // submit request and cancel
    final ExecutorService executorService = Executors.newFixedThreadPool(2);
    final Future<Map<String, FetchStatus>> future = executorService
            .submit(new Callable<Map<String, FetchStatus>>() {
                @Override/* ww  w . ja  va  2  s  .  co  m*/
                public Map<String, FetchStatus> call() throws Exception {
                    return AsyncCompletionStage
                            .get(fetcher.fetchToDir(new URL(MOCK_SERVER_BASE_URL + "/fetch/long-waiting"),
                                    ImmutableList.of("1"), outDir), 120000l, TimeUnit.SECONDS);
                }
            });
    assertThat("Request was cancelled", future.cancel(true));
    assertThat("File does not exist", not(new File(outDir, "1").exists()));
}

From source file:org.alfresco.repo.content.transform.AbstractContentTransformer2.java

/**
 * Cancels <code>task</code> and closes content accessors
 * //from  w  w  w  . j a  va  2 s.c om
 * @param task - {@link Future} task instance which specifies a transformation action
 * @param proxiedReader - {@link AbstractStreamAwareProxy} instance which represents channel closing mechanism for content reader
 * @param proxiedWriter - {@link AbstractStreamAwareProxy} instance which represents channel closing mechanism for content writer
 */
private void releaseResources(Future<?> task, AbstractStreamAwareProxy proxiedReader,
        AbstractStreamAwareProxy proxiedWriter) {
    if (null != task) {
        task.cancel(true);
    }

    if (null != proxiedReader) {
        proxiedReader.release();
    }

    if (null != proxiedWriter) {
        proxiedWriter.release();
    }
}

From source file:com.bt.sitb.opendaylight.controller.sample.btil.provider.OpendaylightBtil.java

/**
 * RPC call implemented from the BtilService interface that cancels the current
 * toast, if any.//from  w ww . j  ava 2s  .com
 */
@Override
public Future<RpcResult<Void>> cancelToast() {

    LOG.info("cancelToast");
    Future<?> current = currentMakeToastTask.getAndSet(null);
    if (current != null) {
        current.cancel(true);
    }

    // Always return success from the cancel toast call.
    return Futures.immediateFuture(RpcResultBuilder.<Void>success().build());
}