Example usage for java.util.concurrent Future cancel

List of usage examples for java.util.concurrent Future cancel

Introduction

In this page you can find the example usage for java.util.concurrent Future cancel.

Prototype

boolean cancel(boolean mayInterruptIfRunning);

Source Link

Document

Attempts to cancel execution of this task.

Usage

From source file:com.mirth.connect.connectors.tcp.TcpReceiver.java

/**
 * Attempts to get the result of any Future tasks which may still be running. Any completed
 * tasks are removed from the Future list.
 * /*from  www .  jav a 2  s  .  c  o  m*/
 * This can ensure that all client socket threads are disposed, so that a remote client wouldn't
 * be able to still send a message after a channel has been stopped or undeployed (even though
 * it wouldn't be processed through the channel anyway).
 * 
 * @param block
 *            - If true, then each Future task will be joined to this one, blocking until the
 *            task thread dies.
 * @param interrupt
 *            - If true, each currently running task thread will be interrupted in an attempt to
 *            stop the task. Any interrupted exceptions will be caught and not thrown, in a best
 *            effort to ensure that all results are taken care of.
 * @param remove
 *            - If true, each completed result will be removed from the Future set during
 *            iteration.
 */
private void cleanup(boolean block, boolean interrupt, boolean remove) throws InterruptedException {
    for (Iterator<Future<Throwable>> it = results.iterator(); it.hasNext();) {
        Future<Throwable> result = it.next();

        if (interrupt) {
            // Cancel the task, with the option of whether or not to forcefully interrupt it
            result.cancel(true);
        }

        if (block) {
            // Attempt to get the result (which blocks until it returns)
            Throwable t = null;
            try {
                // If the return value is not null, then an exception was raised somewhere in the client socket thread
                if ((t = result.get()) != null) {
                    logger.debug("Client socket thread returned unsuccessfully ("
                            + connectorProperties.getName() + " \"Source\" on channel " + getChannelId() + ").",
                            t);
                }
            } catch (Exception e) {
                logger.debug("Error retrieving client socket thread result for " + connectorProperties.getName()
                        + " \"Source\" on channel " + getChannelId() + ".", e);

                Throwable cause;
                if (t instanceof ExecutionException) {
                    cause = t.getCause();
                } else {
                    cause = t;
                }

                if (cause instanceof InterruptedException) {
                    Thread.currentThread().interrupt();
                    if (!interrupt) {
                        throw (InterruptedException) cause;
                    }
                }
            }
        }

        if (remove) {
            // Remove the task from the list if it's done, or if it's been cancelled
            if (result.isDone()) {
                it.remove();
            }
        }
    }
}

From source file:org.apache.brooklyn.util.http.HttpTool.java

/**
 * Connects to the given url and returns the connection.
 * Caller should {@code connection.getInputStream().close()} the result of this
 * (especially if they are making heavy use of this method).
 *//*ww  w  .java 2s.com*/
public static URLConnection connectToUrl(String u) throws Exception {
    final URL url = new URL(u);
    final AtomicReference<Exception> exception = new AtomicReference<Exception>();

    // sometimes openConnection hangs, so run in background
    Future<URLConnection> f = executor.submit(new Callable<URLConnection>() {
        public URLConnection call() {
            try {
                HttpsURLConnection.setDefaultHostnameVerifier(new HostnameVerifier() {
                    @Override
                    public boolean verify(String s, SSLSession sslSession) {
                        return true;
                    }
                });
                URLConnection connection = url.openConnection();
                TrustingSslSocketFactory.configure(connection);
                connection.connect();

                connection.getContentLength(); // Make sure the connection is made.
                return connection;
            } catch (Exception e) {
                exception.set(e);
                LOG.debug("Error connecting to url " + url + " (propagating): " + e, e);
            }
            return null;
        }
    });
    try {
        URLConnection result = null;
        try {
            result = f.get(60, TimeUnit.SECONDS);
        } catch (InterruptedException e) {
            throw e;
        } catch (Exception e) {
            LOG.debug("Error connecting to url " + url + ", probably timed out (rethrowing): " + e);
            throw new IllegalStateException(
                    "Connect to URL not complete within 60 seconds, for url " + url + ": " + e);
        }
        if (exception.get() != null) {
            LOG.debug("Error connecting to url " + url + ", thread caller of " + exception,
                    new Throwable("source of rethrown error " + exception));
            throw exception.get();
        } else {
            return result;
        }
    } finally {
        f.cancel(true);
    }
}

From source file:org.dllearner.algorithms.qtl.experiments.SPARQLLearningProblemsGenerator.java

public void generateBenchmark(int nrOfSPARQLQueries, final int minDepth, final int maxDepth,
        int minNrOfExamples) {
    Collection<OWLClass> classes = getClasses();
    ArrayList<OWLClass> classesList = new ArrayList<>(classes);
    Collections.shuffle(classesList, new Random(123));
    classes = classesList;/*  ww  w. j a va2 s. co m*/
    //      classes = Sets.newHashSet(new OWLClassImpl(IRI.create("http://semantics.crl.ibm.com/univ-bench-dl.owl#TennisFan")));

    //      ExecutorService tp = Executors.newFixedThreadPool(threadCount);
    List<Path> allPaths = new ArrayList<>();

    //      ThreadPoolExecutor tp = new CustomFutureReturningExecutor(
    //            threadCount, threadCount,
    //                5000L, TimeUnit.MILLISECONDS,
    //                new ArrayBlockingQueue<Runnable>(classes.size(), true));

    ExecutorService tp = Executors.newFixedThreadPool(threadCount);

    CompletionService<List<Path>> ecs = new ExecutorCompletionService<List<Path>>(tp);

    JDKRandomGenerator rndGen = new JDKRandomGenerator();
    rndGen.setSeed(123);

    int nrOfQueriesPerDepth = nrOfSPARQLQueries / (maxDepth - minDepth + 1);

    // for each depth <= maxDepth
    for (int depth = minDepth; depth <= maxDepth; depth++) {
        System.out.println("Generating " + nrOfQueriesPerDepth + " queries for depth " + depth);

        Iterator<OWLClass> iterator = classes.iterator();

        // generate paths of depths <= maxDepth
        List<Path> pathsForDepth = new ArrayList<>();

        while (pathsForDepth.size() < nrOfQueriesPerDepth && iterator.hasNext()) {

            Collection<Future<List<Path>>> futures = new ArrayList<>();

            try {
                int cnt = 0;
                while (iterator.hasNext() && (pathsForDepth.size() + ++cnt < nrOfQueriesPerDepth)) {
                    // pick next class
                    OWLClass cls = iterator.next();

                    //            int depth = rndGen.nextInt(maxDepth) + 1;

                    Future<List<Path>> future = ecs
                            .submit(new PathDetectionTask(dataDir, ks, schema, cls, depth, minNrOfExamples));
                    futures.add(future);
                }

                int n = futures.size();
                try {
                    for (int i = 0; i < n; ++i) {
                        Future<List<Path>> f = ecs.take();
                        if (!f.isCancelled()) {
                            List<Path> paths = f.get();

                            if (paths != null) {
                                for (int j = 0; j < Math.min(paths.size(), maxPathsPerClassAndDepth); j++) {
                                    pathsForDepth.add(paths.get(j));
                                }
                            }
                            //                        System.out.println("#Paths: " + paths.size());
                            //                        paths.forEach(p -> System.out.println(p));

                            if (pathsForDepth.size() >= nrOfQueriesPerDepth) {
                                break;
                            }
                        }
                    }
                } catch (InterruptedException | ExecutionException e) {
                    e.printStackTrace();
                }
            } finally {
                for (Future<List<Path>> f : futures) {
                    f.cancel(true);
                }
            }
        }

        allPaths.addAll(pathsForDepth);
    }

    //      for (Future<Path> future : futures) {
    //            try {
    //               Path path = future.get();
    //               if(path != null) {
    //                  paths.add(path);
    //               }
    //               if(paths.size() == nrOfSPARQLQueries) {
    //                  System.err.println("Benchmark generation finished. Stopping all running threads.");
    //                  tp.shutdownNow();
    //               }
    //         } catch (InterruptedException | ExecutionException e) {
    //            e.printStackTrace();
    //         }
    //            if(paths.size() == nrOfSPARQLQueries) {
    //               System.err.println("Benchmark generation finished. Stopping all running threads.");
    //               tp.shutdownNow();
    //            }
    //      }

    tp.shutdownNow();
    try {
        tp.awaitTermination(1, TimeUnit.HOURS);
    } catch (InterruptedException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }
    //      try {
    //         tp.awaitTermination(1, TimeUnit.DAYS);
    //      } catch (InterruptedException e) {
    //         e.printStackTrace();
    //      }

    // write queries to disk
    String queries = "";
    for (Path path : allPaths) {
        System.out.println(path);
        queries += path.asSPARQLQuery(Var.alloc("s")) + "\n";
    }
    File file = new File(benchmarkDirectory,
            "queries_" + nrOfSPARQLQueries + "_" + minDepth + "-" + maxDepth + "_" + minNrOfExamples + ".txt");
    try {
        Files.write(queries, file, Charsets.UTF_8);
    } catch (IOException e) {
        e.printStackTrace();
    }
}

From source file:com.clustercontrol.agent.SendQueue.java

/**
 * ????<BR>/*from www  .  j  a v  a  2  s  . co  m*/
 * 
 * ?????????<BR>
 * ??????
 * @param msg
 */
public boolean put(Object info) {
    m_log.debug("put() start : " + info.getClass().getCanonicalName());

    while (!ReceiveTopic.isHistoryClear()) {
        m_log.debug("put() while (!ReceiveTopic.isHistoryClear()) is true");

        boolean sendQueueStatus = false;
        ExecutorService es = null;
        Future<Boolean> task = null;
        try {
            String id = "";
            // Executor??
            SenderThreadFactory threadFactory = new SenderThreadFactory(id);
            es = Executors.newSingleThreadExecutor(threadFactory);

            // Queue??
            // ?Queue??????????????????
            // Future.get()????????
            m_log.debug("put() submit");
            task = es.submit(new Sender(info));
            sendQueueStatus = task.get(SEND_TIMEOUT, TimeUnit.MILLISECONDS);

        } catch (Exception e) {
            // Queue?????????????Future.get()???????
            // ????

            // ?
            m_log.warn("put() : Failed to connect to MGR " + e.getMessage(), e);

        } finally {
            // 
            if (task != null) {
                task.cancel(true);
            }

            if (es != null) {
                es.shutdown();
            }
            m_log.debug("put() end    : " + info.getClass().getCanonicalName());
        }

        // ??????????sleep????
        // Queue????????
        if (sendQueueStatus) {
            m_log.debug("put() return true : " + info.getClass().getCanonicalName());
            return true;
        } else {
            // sleep???QueueConnection?QueueSession ??
            try {
                m_log.debug("put() reput interval sleep: " + m_sendQueueReconnectionInterval + " sec");
                Thread.sleep(m_sendQueueReconnectionInterval);
            } catch (InterruptedException e1) {
                m_log.error("put() reput interval sleep: ", e1);
            }
        }
    } // End While Loop
    return false;
}

From source file:org.apache.flume.sink.hdfs.BucketWriter.java

/**
 * Execute the callable on a separate thread and wait for the completion for
 * the specified amount of time in milliseconds. In case of timeout cancel
 * the callable and throw an IOException
 *//*from www .j av a 2s . c  om*/
private <T> T callWithTimeout(final CallRunner<T> callRunner) throws IOException, InterruptedException {
    Future<T> future = callTimeoutPool.submit(new Callable<T>() {
        @Override
        public T call() throws Exception {
            return proxyUser.execute(new PrivilegedExceptionAction<T>() {
                @Override
                public T run() throws Exception {
                    return callRunner.call();
                }
            });
        }
    });
    try {
        if (callTimeout > 0) {
            return future.get(callTimeout, TimeUnit.MILLISECONDS);
        } else {
            return future.get();
        }
    } catch (TimeoutException eT) {
        future.cancel(true);
        sinkCounter.incrementConnectionFailedCount();
        throw new IOException("Callable timed out after " + callTimeout + " ms" + " on file: " + bucketPath,
                eT);
    } catch (ExecutionException e1) {
        sinkCounter.incrementConnectionFailedCount();
        Throwable cause = e1.getCause();
        if (cause instanceof IOException) {
            throw (IOException) cause;
        } else if (cause instanceof InterruptedException) {
            throw (InterruptedException) cause;
        } else if (cause instanceof RuntimeException) {
            throw (RuntimeException) cause;
        } else if (cause instanceof Error) {
            throw (Error) cause;
        } else {
            throw new RuntimeException(e1);
        }
    } catch (CancellationException ce) {
        throw new InterruptedException("Blocked callable interrupted by rotation event");
    } catch (InterruptedException ex) {
        LOG.warn("Unexpected Exception " + ex.getMessage(), ex);
        throw ex;
    }
}

From source file:org.talend.repository.hbaseprovider.provider.HBaseMetadataProvider.java

@Override
public ConnectionStatus testConnection(IMetadataConnection metadataConnection) {
    classLoader = HBaseClassLoaderFactory.getClassLoader(metadataConnection);
    ConnectionStatus connectionStatus = new ConnectionStatus();
    connectionStatus.setResult(false);/*from w  w w. jav a2  s.co m*/
    ClassLoader oldClassLoaderLoader = Thread.currentThread().getContextClassLoader();
    try {
        Thread.currentThread().setContextClassLoader(classLoader);
        Object config = getConfiguration(metadataConnection);
        Callable<Object> callable = checkHBaseAvailable(config);
        ExecutorService executor = Executors.newSingleThreadExecutor();
        Future<Object> future = executor.submit(callable);
        try {
            int timeout = 15;
            if (GlobalServiceRegister.getDefault().isServiceRegistered(IDesignerCoreService.class)) {
                IDesignerCoreService designerService = (IDesignerCoreService) GlobalServiceRegister.getDefault()
                        .getService(IDesignerCoreService.class);
                timeout = designerService.getDBConnectionTimeout();
            }
            future.get(timeout, TimeUnit.SECONDS);
            connectionStatus.setResult(true);
        } catch (Exception e) {
            future.cancel(true);
            connectionStatus.setResult(false);
            connectionStatus.setMessageException(ExceptionUtils.getFullStackTrace(e));
            ExceptionHandler.process(e);
        }
    } catch (Exception e) {
        ExceptionHandler.process(e);
        connectionStatus.setResult(false);
        connectionStatus.setMessageException(ExceptionUtils.getFullStackTrace(e));
    } finally {
        Thread.currentThread().setContextClassLoader(oldClassLoaderLoader);
    }
    return connectionStatus;
}

From source file:org.apache.helix.messaging.handling.HelixTaskExecutor.java

private boolean cancelNotStartedStateTransition(Message message,
        Map<String, MessageHandler> stateTransitionHandlers, HelixDataAccessor accessor, String instanceName) {
    String targetMessageName = getMessageTarget(message.getResourceName(), message.getPartitionName());
    ProcessedMessageState messageState;//  w  w  w . j  a v  a2 s  .c o m
    Message targetStateTransitionMessage;

    // State transition message and cancel message are in same batch
    if (stateTransitionHandlers.containsKey(targetMessageName)) {
        targetStateTransitionMessage = stateTransitionHandlers.get(targetMessageName).getMessage();
        if (isCancelingSameStateTransition(targetStateTransitionMessage, message)) {
            stateTransitionHandlers.remove(targetMessageName);
            messageState = ProcessedMessageState.COMPLETED;
        } else {
            messageState = ProcessedMessageState.DISCARDED;
        }
    } else if (_messageTaskMap.containsKey(targetMessageName)) {
        // Cancel the from future without interrupt ->  Cancel the task future without
        // interruptting the state transition that is already started.  If the state transition
        // is already started, we should call cancel in the state model.
        String taskId = _messageTaskMap.get(targetMessageName);
        HelixTask task = (HelixTask) _taskMap.get(taskId).getTask();
        Future<HelixTaskResult> future = _taskMap.get(taskId).getFuture();
        targetStateTransitionMessage = task.getMessage();

        if (isCancelingSameStateTransition(task.getMessage(), message)) {
            boolean success = task.cancel();
            if (!success) {
                // the state transition is already started, need further cancellation.
                return false;
            }

            future.cancel(false);
            _messageTaskMap.remove(targetMessageName);
            _taskMap.remove(taskId);
            messageState = ProcessedMessageState.COMPLETED;
        } else {
            messageState = ProcessedMessageState.DISCARDED;
        }
    } else {
        return false;
    }

    // remove the original state-transition message been cancelled.
    removeMessageFromZK(accessor, targetStateTransitionMessage, instanceName);
    _monitor.reportProcessedMessage(targetStateTransitionMessage,
            ParticipantMessageMonitor.ProcessedMessageState.DISCARDED);

    // remove the state transition cancellation message
    reportAndRemoveMessage(message, accessor, instanceName, messageState);

    return true;
}

From source file:org.springframework.amqp.rabbit.admin.RabbitBrokerAdmin.java

private boolean waitForState(final StatusCallback callable, String state) {

    if (timeout <= 0) {
        return true;
    }//from  w w  w.ja v  a  2 s .  c om

    RabbitStatus status = getStatus();

    if (!callable.get(status)) {

        logger.info("Waiting for broker to enter state: " + state);

        Future<RabbitStatus> started = executor.submit(new Callable<RabbitStatus>() {
            public RabbitStatus call() throws Exception {
                RabbitStatus status = getStatus();
                while (!callable.get(status)) {
                    // Any less than 1000L and we tend to clog up the socket?
                    Thread.sleep(500L);
                    status = getStatus();
                }
                return status;
            }
        });

        try {
            status = started.get(timeout, TimeUnit.MILLISECONDS);
            // This seems to help... really it just means we didn't get the right status data
            Thread.sleep(500L);
        } catch (TimeoutException e) {
            started.cancel(true);
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
        } catch (ExecutionException e) {
            logger.error("Exception checking broker status for " + state, e.getCause());
        }

        if (!callable.get(status)) {
            logger.error("Rabbit broker not in " + state + " state after timeout. Stopping process.");
            stopNode();
            return false;
        } else {
            logger.info("Finished waiting for broker to enter state: " + state);
            if (logger.isDebugEnabled()) {
                logger.info("Status: " + status);
            }
            return true;
        }

    } else {
        logger.info("Broker already in state: " + state);
    }

    return true;

}

From source file:com.hygenics.parser.BreakMultiple.java

/**
 * run the class//from   w ww.ja v a2 s. c  om
 */
public void run() {
    int j = 0;
    checkTable();
    rows = new ArrayList<String>();
    log.info("Starting Break");

    // the pool
    ForkJoinPool fjp = new ForkJoinPool(Runtime.getRuntime().availableProcessors() * procnum);

    // for returned results
    List<Future<ArrayList<String>>> futures = new ArrayList<Future<ArrayList<String>>>();

    // for parsing
    Set<Callable<ArrayList<String>>> collect = new HashSet<Callable<ArrayList<String>>>();

    // for querying
    Set<Callable<ArrayList<String>>> qcollect = new HashSet<Callable<ArrayList<String>>>();

    // results
    ArrayList<String> jsons = new ArrayList<String>();

    String condition = null;
    int size = (int) Math.ceil(pullsize / qnum);
    // get initial data from user
    for (int i = 0; i < qnum; i++) {
        condition = " WHERE " + idcolumn + " > " + Integer.toString(offset + (Math.round(pullsize / qnum) * i))
                + " AND " + idcolumn + " <= "
                + Integer.toString(offset + (Math.round(pullsize / qnum) * (i + 1)));

        if (extracondition != null) {
            condition += " " + extracondition.trim();
        }

        qcollect.add(new GetFromDB((select + condition), template));
        log.info("SELECTING " + select + " " + condition);
    }

    log.info("Getting From DB @" + Calendar.getInstance().getTime().toString());
    futures = fjp.invokeAll(qcollect);

    int w = 0;
    while (fjp.getActiveThreadCount() > 0 && fjp.isQuiescent() == false) {
        w++;
    }

    log.info("Waited for " + w + "Cycles");

    for (Future<ArrayList<String>> f : futures) {
        try {
            rows.addAll(f.get());
            f.cancel(true);
        } catch (InterruptedException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        } catch (ExecutionException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }
    }

    qcollect = new HashSet<Callable<ArrayList<String>>>();
    futures = null;

    log.info("Breaking");
    // process while there is still data to process
    while (rows.size() > 0) {
        log.info("Iteration Contains " + rows.size() + " Rows");
        // add to the commit size for future processing
        offset += pullsize;
        log.info("Submitting Tasks");
        // submit for breaking apart

        for (String r : rows) {

            if (fjp.isShutdown()) {
                fjp = new ForkJoinPool(Runtime.getRuntime().availableProcessors() * procnum);
            }

            if (r != null) {

                if (mustcontain != null) {
                    if (r.contains(mustcontain)) {
                        if (cannotcontain != null) {
                            if (r.contains(cannotcontain) == false) {
                                Map<String, Json> rowmap = Json.read(r).asJsonMap();

                                // final getDAOTemplate template, final
                                // String row, final String token, final
                                // String replacementPattern, final
                                // Map<String,String> positions,final String
                                // date, final String table, final String
                                // offenderhash
                                if (rowmap.size() > 0) {
                                    collect.add(new Break(unescape, repeatkeys, template,
                                            rowmap.get(rowcolumn).asString(), token, replacementPattern,
                                            positions, (Calendar.getInstance().getTime().toString()),
                                            targettable, rowmap.get("offenderhash").asString(), maxpos,
                                            genhash));
                                }
                            }
                        } else {
                            Map<String, Json> rowmap = Json.read(r).asJsonMap();

                            // final getDAOTemplate template, final String
                            // row, final String token, final String
                            // replacementPattern, final Map<String,String>
                            // positions,final String date, final String
                            // table, final String offenderhash
                            if (rowmap.size() > 0) {
                                collect.add(new Break(unescape, repeatkeys, template,
                                        rowmap.get(rowcolumn).asString(), token, replacementPattern, positions,
                                        (Calendar.getInstance().getTime().toString()), targettable,
                                        rowmap.get("offenderhash").asString(), maxpos, genhash));
                            }
                        }
                    }
                } else {

                    if (cannotcontain != null) {
                        if (r.contains(cannotcontain) == false) {
                            Map<String, Json> rowmap = Json.read(r).asJsonMap();

                            // to ascend you must die, to die you must be
                            // crucified; so get off your -- cross so that
                            // we can nail down the nex martyr
                            // final getDAOTemplate template, final String
                            // row, final String token, final String
                            // replacementPattern, final Map<String,String>
                            // positions,final String date, final String
                            // table, final String offenderhash
                            if (rowmap.size() > 0) {
                                collect.add(new Break(unescape, repeatkeys, template,
                                        rowmap.get(rowcolumn).asString(), token, replacementPattern, positions,
                                        (Calendar.getInstance().getTime().toString()), targettable,
                                        rowmap.get("offenderhash").asString(), maxpos, genhash));
                            }
                        }
                    } else {
                        Map<String, Json> rowmap = Json.read(r).asJsonMap();

                        // final getDAOTemplate template, final String row,
                        // final String token, final String
                        // replacementPattern, final Map<String,String>
                        // positions,final String date, final String table,
                        // final String offenderhash
                        if (rowmap.size() > 0) {
                            collect.add(new Break(unescape, repeatkeys, template,
                                    rowmap.get(rowcolumn).asString(), token, replacementPattern, positions,
                                    (Calendar.getInstance().getTime().toString()), targettable,
                                    rowmap.get("offenderhash").asString(), maxpos, genhash));
                        }
                    }
                }
            }
        }

        log.info("SUBMITTED " + collect.size() + " tasks");

        futures = fjp.invokeAll(collect);

        w = 0;

        while (fjp.getActiveThreadCount() > 0 && fjp.isQuiescent() == false) {
            w++;
        }

        log.info("Waited for " + w + " Cycles");

        jsons.clear();
        log.info("Getting Strings");
        try {

            for (Future<ArrayList<String>> p : futures) {
                ArrayList<String> retlist = p.get();

                if (retlist != null) {
                    if (retlist.size() > 0) {
                        jsons.addAll(retlist);
                    }

                    if (jsons.size() >= commit_size) {
                        // send to db
                        if (jsons.size() > SPLITSIZE) {
                            log.info("Split True: Sending to DB @ "
                                    + Calendar.getInstance().getTime().toString());

                            postToDb(jsons, true);
                            jsons = new ArrayList<String>();
                            log.info("Posted to DB @ " + Calendar.getInstance().getTime().toString());
                        } else {
                            log.info("Split False: Sending to DB @ "
                                    + Calendar.getInstance().getTime().toString());
                            postToDb(jsons, false);
                            jsons = new ArrayList<String>();
                            log.info("Posted to DB @ " + Calendar.getInstance().getTime().toString());
                        }
                    }
                }
                p.cancel(true);
            }
        } catch (InterruptedException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        } catch (ExecutionException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }

        futures = null;
        collect = new HashSet<Callable<ArrayList<String>>>();

        // send to db
        if (jsons.size() > SPLITSIZE) {
            log.info("Split True: Sending to DB @" + Calendar.getInstance().getTime().toString());
            postToDb(jsons, true);
            jsons = new ArrayList<String>();
            log.info("Posted to DB @ " + Calendar.getInstance().getTime().toString());
        } else {
            log.info("Split False: Sending to DB @" + Calendar.getInstance().getTime().toString());
            postToDb(jsons, false);
            jsons = new ArrayList<String>();
            log.info("Posted to DB @ " + Calendar.getInstance().getTime().toString());
        }

        // get more information
        rows = new ArrayList<String>();

        if (Runtime.getRuntime().freeMemory() < 500000 | ((loops % waitloops) == 0 & waitloops != 0)) {
            log.info("Paused Free Memory Left: " + Runtime.getRuntime().freeMemory());
            System.gc();
            Runtime.getRuntime().gc();

            try {
                Thread.sleep(2000);
            } catch (InterruptedException e) {
                // TODO Auto-generated catch block
                e.printStackTrace();
            }

            while (Runtime.getRuntime().freeMemory() < 500000) {
                try {
                    Thread.sleep(2000);
                } catch (InterruptedException e) {
                    // TODO Auto-generated catch block
                    e.printStackTrace();
                }
            }

            log.info("Restart Free Memory Left: " + Runtime.getRuntime().freeMemory());
        }

        rows = new ArrayList<String>();

        // attempt to query the database from multiple threads
        for (int conn = 1; conn <= qnum; conn++) {
            // change condition
            condition = " WHERE " + idcolumn + " > "
                    + Integer.toString(offset + (Math.round(pullsize / qnum) * conn)) + " AND " + idcolumn
                    + " <= " + Integer.toString(offset + (Math.round(pullsize / qnum) * (conn + 1)));

            if (extracondition != null) {
                condition += " " + extracondition.trim();
            }

            qcollect.add(new GetFromDB((select + condition), template));
            log.info("SELECTING " + select + " " + condition);
        }

        futures = fjp.invokeAll(qcollect);

        w = 0;

        while (fjp.getActiveThreadCount() > 0 && fjp.isQuiescent() == false) {
            w++;
        }

        log.info("Waited for " + w + " Cycles");

        for (Future<ArrayList<String>> f : futures) {
            try {

                ArrayList<String> test = f.get();

                if (test != null) {
                    if (test.size() > 0) {
                        rows.addAll(test);
                    }
                }

                f.cancel(true);

            } catch (InterruptedException e) {
                // TODO Auto-generated catch block
                e.printStackTrace();
            } catch (ExecutionException e) {
                // TODO Auto-generated catch block
                e.printStackTrace();
            }
        }

        futures = null;
        qcollect = new HashSet<Callable<ArrayList<String>>>(4);

        j++;

        Runtime.getRuntime().gc();
        System.gc();

    }

    // send to db
    if (jsons.size() > SPLITSIZE) {
        log.info("Split True: Sending to DB @" + Calendar.getInstance().getTime().toString());
        postToDb(jsons, true);
        jsons = new ArrayList<String>();
    } else if (jsons.size() > 0) {
        log.info("Split False: Sending to DB @" + Calendar.getInstance().getTime().toString());
        postToDb(jsons, false);
        jsons = new ArrayList<String>();
    }

    Runtime.getRuntime().gc();
    System.gc();

    log.info("Shutting Down Forkjoin Pool");
    if (fjp.isShutdown() == false) {
        fjp.shutdownNow();
    }
}

From source file:de.blizzy.documentr.search.PageIndex.java

Bits getVisibleDocIds(IndexSearcher searcher, Authentication authentication)
        throws IOException, TimeoutException {
    Future<BitSet> branchPagesFuture = taskExecutor
            .submit(new GetVisibleBranchDocIdsTask(searcher, authentication, permissionEvaluator));
    Future<BitSet> inaccessibleDocsFuture = taskExecutor.submit(new GetInaccessibleDocIdsTask(searcher,
            Permission.VIEW, authentication, userStore, permissionEvaluator));
    try {//from  w  ww.j  a  v a  2  s.  c  o m
        BitSet docIds = branchPagesFuture.get(INTERACTIVE_TIMEOUT, TimeUnit.SECONDS);
        docIds.andNot(inaccessibleDocsFuture.get(INTERACTIVE_TIMEOUT, TimeUnit.SECONDS));
        return new DocIdBitSet(docIds);
    } catch (InterruptedException e) {
        throw new RuntimeException(e);
    } catch (ExecutionException e) {
        Throwable cause = e.getCause();
        if (cause instanceof IOException) {
            throw (IOException) cause;
        } else {
            throw Util.toRuntimeException(cause);
        }
    } finally {
        branchPagesFuture.cancel(false);
        inaccessibleDocsFuture.cancel(false);
    }
}