Example usage for java.util.concurrent Future isCancelled

List of usage examples for java.util.concurrent Future isCancelled

Introduction

In this page you can find the example usage for java.util.concurrent Future isCancelled.

Prototype

boolean isCancelled();

Source Link

Document

Returns true if this task was cancelled before it completed normally.

Usage

From source file:com.mozilla.fhr.consumer.FHRConsumer.java

@Override
public void poll() {
    final CountDownLatch latch = new CountDownLatch(streams.size());
    workers = new ArrayList<Future<Void>>(streams.size());
    for (final KafkaStream<Message> stream : streams) {
        workers.add(executor.submit(new FHRConsumerWorker(stream, latch)));
    }/*w ww  .jav a2 s .  co m*/

    // Wait for all tasks to complete which in the normal case they will
    // run indefinitely unless killed
    try {
        while (true) {
            latch.await(10, TimeUnit.SECONDS);
            if (latch.getCount() != streams.size()) {
                // we have a dead thread and should exit
                break;
            }
        }
    } catch (InterruptedException e) {
        LOG.info("Interrupted during polling", e);
    }

    // Spit out errors if there were any
    for (Future<Void> worker : workers) {
        try {
            if (worker.isDone() && !worker.isCancelled()) {
                worker.get(1, TimeUnit.SECONDS);
            }
        } catch (InterruptedException e) {
            LOG.error("Thread was interrupted:", e);
        } catch (ExecutionException e) {
            LOG.error("Exception occured in thread:", e);
        } catch (TimeoutException e) {
            LOG.error("Timed out waiting for thread result:", e);
        } catch (CancellationException e) {
            LOG.error("Thread has been canceled: ", e);
        }
    }
}

From source file:org.apache.streams.mongo.MongoPersistReader.java

@Override
public void startStream() {

    LOGGER.debug("startStream");
    MongoPersistReaderTask readerTask = new MongoPersistReaderTask(this);
    Thread readerTaskThread = new Thread(readerTask);
    Future future = executor.submit(readerTaskThread);

    while (!future.isDone() && !future.isCancelled()) {
        try {/*w  ww .  ja va  2  s.  co  m*/
            Thread.sleep(1000);
        } catch (InterruptedException interrupt) {
            LOGGER.trace("Interrupt", interrupt);
        }
    }

    executor.shutdown();

}

From source file:com.soulgalore.crawler.core.impl.DefaultCrawler.java

/**
 * Verify that all urls in allUrls returns 200. If not, they will be removed from that set and
 * instead added to the nonworking list.
 * /*  www  .j a  v a 2 s .co  m*/
 * @param allUrls all the links that has been fetched
 * @param nonWorkingUrls links that are not working
 */
private void verifyUrls(Set<CrawlerURL> allUrls, Set<HTMLPageResponse> verifiedUrls,
        Set<HTMLPageResponse> nonWorkingUrls, Map<String, String> requestHeaders) {

    Set<CrawlerURL> urlsThatNeedsVerification = new LinkedHashSet<CrawlerURL>(allUrls);

    urlsThatNeedsVerification.removeAll(verifiedUrls);

    final Set<Callable<HTMLPageResponse>> tasks = new HashSet<Callable<HTMLPageResponse>>(
            urlsThatNeedsVerification.size());

    for (CrawlerURL testURL : urlsThatNeedsVerification) {
        tasks.add(new HTMLPageResponseCallable(testURL, responseFetcher, true, requestHeaders, false));
    }

    try {
        // wait for all urls to verify
        List<Future<HTMLPageResponse>> responses = service.invokeAll(tasks);

        for (Future<HTMLPageResponse> future : responses) {
            if (!future.isCancelled()) {
                HTMLPageResponse response = future.get();
                if (response.getResponseCode() == HttpStatus.SC_OK
                        && response.getResponseType().indexOf("html") > 0) {
                    // remove, way of catching interrupted / execution e
                    urlsThatNeedsVerification.remove(response.getPageUrl());
                    verifiedUrls.add(response);
                } else if (response.getResponseCode() == HttpStatus.SC_OK) {
                    // it is not HTML
                    urlsThatNeedsVerification.remove(response.getPageUrl());
                } else {
                    nonWorkingUrls.add(response);
                }
            }
        }

    } catch (InterruptedException e1) {
        // TODO add some logging
        e1.printStackTrace();
    } catch (ExecutionException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }

    // TODO: We can have a delta here if the exception occur

}

From source file:org.dllearner.algorithms.qtl.experiments.SPARQLLearningProblemsGenerator.java

public void generateBenchmark(int nrOfSPARQLQueries, final int minDepth, final int maxDepth,
        int minNrOfExamples) {
    Collection<OWLClass> classes = getClasses();
    ArrayList<OWLClass> classesList = new ArrayList<>(classes);
    Collections.shuffle(classesList, new Random(123));
    classes = classesList;//from www  .j  a v a2s.co  m
    //      classes = Sets.newHashSet(new OWLClassImpl(IRI.create("http://semantics.crl.ibm.com/univ-bench-dl.owl#TennisFan")));

    //      ExecutorService tp = Executors.newFixedThreadPool(threadCount);
    List<Path> allPaths = new ArrayList<>();

    //      ThreadPoolExecutor tp = new CustomFutureReturningExecutor(
    //            threadCount, threadCount,
    //                5000L, TimeUnit.MILLISECONDS,
    //                new ArrayBlockingQueue<Runnable>(classes.size(), true));

    ExecutorService tp = Executors.newFixedThreadPool(threadCount);

    CompletionService<List<Path>> ecs = new ExecutorCompletionService<List<Path>>(tp);

    JDKRandomGenerator rndGen = new JDKRandomGenerator();
    rndGen.setSeed(123);

    int nrOfQueriesPerDepth = nrOfSPARQLQueries / (maxDepth - minDepth + 1);

    // for each depth <= maxDepth
    for (int depth = minDepth; depth <= maxDepth; depth++) {
        System.out.println("Generating " + nrOfQueriesPerDepth + " queries for depth " + depth);

        Iterator<OWLClass> iterator = classes.iterator();

        // generate paths of depths <= maxDepth
        List<Path> pathsForDepth = new ArrayList<>();

        while (pathsForDepth.size() < nrOfQueriesPerDepth && iterator.hasNext()) {

            Collection<Future<List<Path>>> futures = new ArrayList<>();

            try {
                int cnt = 0;
                while (iterator.hasNext() && (pathsForDepth.size() + ++cnt < nrOfQueriesPerDepth)) {
                    // pick next class
                    OWLClass cls = iterator.next();

                    //            int depth = rndGen.nextInt(maxDepth) + 1;

                    Future<List<Path>> future = ecs
                            .submit(new PathDetectionTask(dataDir, ks, schema, cls, depth, minNrOfExamples));
                    futures.add(future);
                }

                int n = futures.size();
                try {
                    for (int i = 0; i < n; ++i) {
                        Future<List<Path>> f = ecs.take();
                        if (!f.isCancelled()) {
                            List<Path> paths = f.get();

                            if (paths != null) {
                                for (int j = 0; j < Math.min(paths.size(), maxPathsPerClassAndDepth); j++) {
                                    pathsForDepth.add(paths.get(j));
                                }
                            }
                            //                        System.out.println("#Paths: " + paths.size());
                            //                        paths.forEach(p -> System.out.println(p));

                            if (pathsForDepth.size() >= nrOfQueriesPerDepth) {
                                break;
                            }
                        }
                    }
                } catch (InterruptedException | ExecutionException e) {
                    e.printStackTrace();
                }
            } finally {
                for (Future<List<Path>> f : futures) {
                    f.cancel(true);
                }
            }
        }

        allPaths.addAll(pathsForDepth);
    }

    //      for (Future<Path> future : futures) {
    //            try {
    //               Path path = future.get();
    //               if(path != null) {
    //                  paths.add(path);
    //               }
    //               if(paths.size() == nrOfSPARQLQueries) {
    //                  System.err.println("Benchmark generation finished. Stopping all running threads.");
    //                  tp.shutdownNow();
    //               }
    //         } catch (InterruptedException | ExecutionException e) {
    //            e.printStackTrace();
    //         }
    //            if(paths.size() == nrOfSPARQLQueries) {
    //               System.err.println("Benchmark generation finished. Stopping all running threads.");
    //               tp.shutdownNow();
    //            }
    //      }

    tp.shutdownNow();
    try {
        tp.awaitTermination(1, TimeUnit.HOURS);
    } catch (InterruptedException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }
    //      try {
    //         tp.awaitTermination(1, TimeUnit.DAYS);
    //      } catch (InterruptedException e) {
    //         e.printStackTrace();
    //      }

    // write queries to disk
    String queries = "";
    for (Path path : allPaths) {
        System.out.println(path);
        queries += path.asSPARQLQuery(Var.alloc("s")) + "\n";
    }
    File file = new File(benchmarkDirectory,
            "queries_" + nrOfSPARQLQueries + "_" + minDepth + "-" + maxDepth + "_" + minNrOfExamples + ".txt");
    try {
        Files.write(queries, file, Charsets.UTF_8);
    } catch (IOException e) {
        e.printStackTrace();
    }
}

From source file:com.mozilla.bagheera.consumer.KafkaConsumer.java

@Override
public void poll() {
    final CountDownLatch latch = new CountDownLatch(streams.size());
    for (final KafkaStream<byte[], byte[]> stream : streams) {
        workers.add(executor.submit(new Callable<Void>() {
            @Override// w  w  w  .  j  a  va 2s.c  o  m
            public Void call() {
                try {
                    for (MessageAndMetadata<byte[], byte[]> mam : stream) {
                        BagheeraMessage bmsg = BagheeraMessage.parseFrom(mam.message());
                        // get the sink for this message's namespace 
                        // (typically only one sink unless a regex pattern was used to listen to multiple topics)
                        KeyValueSink sink = sinkFactory.getSink(bmsg.getNamespace());
                        if (sink == null) {
                            LOG.error("Could not obtain sink for namespace: " + bmsg.getNamespace());
                            break;
                        }
                        if (bmsg.getOperation() == Operation.CREATE_UPDATE && bmsg.hasId()
                                && bmsg.hasPayload()) {
                            if (validationPipeline == null
                                    || validationPipeline.isValid(bmsg.getPayload().toByteArray())) {
                                if (bmsg.hasTimestamp()) {
                                    sink.store(bmsg.getId(), bmsg.getPayload().toByteArray(),
                                            bmsg.getTimestamp());
                                } else {
                                    sink.store(bmsg.getId(), bmsg.getPayload().toByteArray());
                                }
                            } else {
                                invalidMessageMeter.mark();
                                // TODO: sample out an example payload
                                LOG.warn("Invalid payload for namespace: " + bmsg.getNamespace());
                            }
                        } else if (bmsg.getOperation() == Operation.DELETE && bmsg.hasId()) {
                            sink.delete(bmsg.getId());
                        }
                        consumed.mark();
                    }
                } catch (InvalidProtocolBufferException e) {
                    LOG.error("Invalid protocol buffer in data stream", e);
                } catch (UnsupportedEncodingException e) {
                    LOG.error("Message ID was not in UTF-8 encoding", e);
                } catch (IOException e) {
                    LOG.error("IO error while storing to data sink", e);
                } finally {
                    latch.countDown();
                }

                return null;
            }
        }));
    }

    // Wait for all tasks to complete which in the normal case they will
    // run indefinitely unless we detect that a thread exited
    try {
        while (true) {
            latch.await(10, TimeUnit.SECONDS);
            if (latch.getCount() != streams.size()) {
                // we have a dead thread and should exit
                break;
            }
        }
    } catch (InterruptedException e) {
        LOG.info("Interrupted during polling", e);
    }

    // Spit out errors if there were any
    for (Future<Void> worker : workers) {
        try {
            if (worker.isDone() && !worker.isCancelled()) {
                worker.get(1, TimeUnit.SECONDS);
            }
        } catch (InterruptedException e) {
            LOG.error("Thread was interrupted:", e);
        } catch (ExecutionException e) {
            LOG.error("Exception occured in thread:", e);
        } catch (TimeoutException e) {
            LOG.error("Timed out waiting for thread result:", e);
        } catch (CancellationException e) {
            LOG.error("Thread has been canceled: ", e);
        }
    }
}

From source file:org.hupo.psi.mi.psicquic.registry.PsicquicRegistryStatusChecker.java

private void checkAndResumeRegistryTasks() {

    for (Future f : runningTasks) {
        try {//from w w w.  j av  a2  s.c  o  m
            f.get(threadTimeOut, TimeUnit.SECONDS);
        } catch (InterruptedException e) {
            log.error("The registry task was interrupted, we cancel the task.", e);
            if (!f.isCancelled()) {
                f.cancel(true);
            }
        } catch (ExecutionException e) {
            log.error("The registry task could not be executed, we cancel the task.", e);
            if (!f.isCancelled()) {
                f.cancel(true);
            }
        } catch (TimeoutException e) {
            log.error("Service task stopped because of time out " + threadTimeOut + " seconds.");

            if (!f.isCancelled()) {
                f.cancel(true);
            }
        } catch (Throwable e) {
            log.error("Service task stopped.", e);
            if (!f.isCancelled()) {
                f.cancel(true);
            }
        }
    }

    runningTasks.clear();
}

From source file:cn.ctyun.amazonaws.services.s3.transfer.internal.UploadMonitor.java

/**
 * Polls for a result from a multipart upload and either returns it if
 * complete, or reschedules to poll again later if not.
 *///from   w w  w  .  ja va2 s.  co m
private UploadResult poll() throws InterruptedException {
    for (Future<PartETag> f : futures) {
        if (!f.isDone()) {
            reschedule();
            return null;
        }
    }

    for (Future<PartETag> f : futures) {
        if (f.isCancelled()) {
            throw new CancellationException();
        }
    }

    return completeMultipartUpload();
}

From source file:org.jaqpot.core.service.client.jpdi.JPDIClientImpl.java

@Override
public boolean cancel(String taskId) {
    Future future = futureMap.get(taskId);
    if (future != null && !future.isCancelled() && !future.isDone()) {
        future.cancel(true);/*  ww w .j av  a  2s .c om*/
        return true;
    }
    return false;
}

From source file:com.intel.cosbench.driver.service.MissionHandler.java

private void executeAgents(List<Agent> agents, int timeout) {
    int num = agents.size();
    LOGGER.debug("begin to execute agents, {} in total", num);
    try {//w  w  w . jav  a2 s  .com
        if (timeout == 0)
            executor.invokeAll(agents); // wait until finish
        else {
            List<Future<Agent>> futures = executor.invokeAll(agents, timeout, TimeUnit.SECONDS);
            for (Future<Agent> future : futures)
                if (future.isCancelled()) // test timeout status
                    throw new TimeoutException(); // force mission abort
        }
    } catch (InterruptedException ie) {
        throw new AbortedException(); // mission aborted
    }
    LOGGER.debug("all {} agents have finished execution", num);
    List<Integer> errIds = new ArrayList<Integer>();
    for (WorkerContext worker : missionContext.getWorkerRegistry())
        if (worker.isError() || worker.isAborted())
            errIds.add(worker.getIndex());
    if (errIds.isEmpty())
        return; // all of the workers are fine
    LOGGER.error("detected workers {} have encountered errors", errIds);
    throw new MissionException(); // mark termination
}

From source file:com.google.acre.appengine.script.AppEngineAsyncUrlfetch.java

public void wait_on_result(long time, TimeUnit tunit) {
    int i = 0;/*from www.  j  av  a 2 s . co  m*/
    long endtime = System.currentTimeMillis() + tunit.toMillis(time);

    Context ctx = Context.getCurrentContext();
    while (_requests.size() > 0) {
        long pass_start_time = System.currentTimeMillis();

        if (i > _requests.size() - 1)
            i = 0;

        if (time != -1 && endtime <= System.currentTimeMillis()) {
            for (AsyncRequest r : _requests) {
                r.request.cancel(true);
            }
            throw new JSURLTimeoutError("Time limit exceeded").newJSException(_scope);
        }

        AsyncRequest asyncreq = _requests.get(i);
        Future<HTTPResponse> futr = asyncreq.request;
        Function callback = asyncreq.callback;
        if (futr.isCancelled()) {
            JSURLError jse = new JSURLError("Request cancelled");

            callback.call(ctx, _scope, null, new Object[] { asyncreq.url.toString(), jse.toJSError(_scope) });
            _requests.remove(i);
            continue;
        }

        try {
            HTTPResponse res = futr.get(10, TimeUnit.MILLISECONDS);
            callback.call(ctx, _scope, null,
                    new Object[] { asyncreq.url.toString(), callback_result(asyncreq, res) });
            _requests.remove(i);
            continue;
        } catch (TimeoutException e) {
            // This is timeout on the futr.get() call, not a request timeout
        } catch (CancellationException e) {
            // pass, handled by isCancelled
        } catch (ExecutionException e) {
            JSURLError jse = new JSURLError(e.getMessage());

            callback.call(ctx, _scope, null, new Object[] { asyncreq.url.toString(), jse.toJSError(_scope) });
            _requests.remove(i);
            continue;
        } catch (InterruptedException e) {
            JSURLError jse = new JSURLError(e.getMessage());

            callback.call(ctx, _scope, null, new Object[] { asyncreq.url.toString(), jse.toJSError(_scope) });
            _requests.remove(i);
            continue;
        }

        _costCollector.collect("auub", System.currentTimeMillis() - pass_start_time);
        i++;
    }
}