Example usage for java.util.concurrent ExecutorService shutdownNow

List of usage examples for java.util.concurrent ExecutorService shutdownNow

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService shutdownNow.

Prototype

List<Runnable> shutdownNow();

Source Link

Document

Attempts to stop all actively executing tasks, halts the processing of waiting tasks, and returns a list of the tasks that were awaiting execution.

Usage

From source file:org.codice.ddf.commands.catalog.DumpCommand.java

@Override
protected Object executeWithSubject() throws Exception {
    final File dumpDir = new File(dirPath);

    if (!dumpDir.exists()) {
        printErrorMessage("Directory [" + dirPath + "] must exist.");
        console.println("If the directory does indeed exist, try putting the path in quotes.");
        return null;
    }//from www .  j a  va  2 s. c o  m

    if (!dumpDir.isDirectory()) {
        printErrorMessage("Path [" + dirPath + "] must be a directory.");
        return null;
    }

    if (!DEFAULT_TRANSFORMER_ID.matches(transformerId)) {
        transformers = getTransformers();
        if (transformers == null) {
            console.println(transformerId + " is an invalid metacard transformer.");
            return null;
        }
    }

    CatalogFacade catalog = getCatalog();
    FilterBuilder builder = getFilterBuilder();

    Filter createdFilter = null;
    if ((createdAfter != null) && (createdBefore != null)) {
        DateTime createStartDateTime = DateTime.parse(createdAfter);
        DateTime createEndDateTime = DateTime.parse(createdBefore);
        createdFilter = builder.attribute(Metacard.CREATED).is().during().dates(createStartDateTime.toDate(),
                createEndDateTime.toDate());
    } else if (createdAfter != null) {
        DateTime createStartDateTime = DateTime.parse(createdAfter);
        createdFilter = builder.attribute(Metacard.CREATED).is().after().date(createStartDateTime.toDate());
    } else if (createdBefore != null) {
        DateTime createEndDateTime = DateTime.parse(createdBefore);
        createdFilter = builder.attribute(Metacard.CREATED).is().before().date(createEndDateTime.toDate());
    }

    Filter modifiedFilter = null;
    if ((modifiedAfter != null) && (modifiedBefore != null)) {
        DateTime modifiedStartDateTime = DateTime.parse(modifiedAfter);
        DateTime modifiedEndDateTime = DateTime.parse(modifiedBefore);
        modifiedFilter = builder.attribute(Metacard.MODIFIED).is().during()
                .dates(modifiedStartDateTime.toDate(), modifiedEndDateTime.toDate());
    } else if (modifiedAfter != null) {
        DateTime modifiedStartDateTime = DateTime.parse(modifiedAfter);
        modifiedFilter = builder.attribute(Metacard.MODIFIED).is().after().date(modifiedStartDateTime.toDate());
    } else if (modifiedBefore != null) {
        DateTime modifiedEndDateTime = DateTime.parse(modifiedBefore);
        modifiedFilter = builder.attribute(Metacard.MODIFIED).is().before().date(modifiedEndDateTime.toDate());
    }

    Filter filter = null;
    if ((createdFilter != null) && (modifiedFilter != null)) {
        // Filter by both created and modified dates
        filter = builder.allOf(createdFilter, modifiedFilter);
    } else if (createdFilter != null) {
        // Only filter by created date
        filter = createdFilter;
    } else if (modifiedFilter != null) {
        // Only filter by modified date
        filter = modifiedFilter;
    } else {
        // Don't filter by date range
        filter = builder.attribute(Metacard.ID).is().like().text(WILDCARD);
    }

    if (cqlFilter != null) {
        filter = CQL.toFilter(cqlFilter);
    }

    QueryImpl query = new QueryImpl(filter);
    query.setRequestsTotalResultsCount(false);
    query.setPageSize(pageSize);

    Map<String, Serializable> props = new HashMap<String, Serializable>();
    // Avoid caching all results while dumping with native query mode
    props.put("mode", "native");

    final AtomicLong resultCount = new AtomicLong(0);
    long start = System.currentTimeMillis();

    SourceResponse response = catalog.query(new QueryRequestImpl(query, props));

    BlockingQueue<Runnable> blockingQueue = new ArrayBlockingQueue<Runnable>(multithreaded);
    RejectedExecutionHandler rejectedExecutionHandler = new ThreadPoolExecutor.CallerRunsPolicy();
    final ExecutorService executorService = new ThreadPoolExecutor(multithreaded, multithreaded, 0L,
            TimeUnit.MILLISECONDS, blockingQueue, rejectedExecutionHandler);

    while (response.getResults().size() > 0) {
        response = catalog.query(new QueryRequestImpl(query, props));

        if (multithreaded > 1) {
            final List<Result> results = new ArrayList<Result>(response.getResults());
            executorService.submit(new Runnable() {
                @Override
                public void run() {
                    boolean transformationFailed = false;
                    for (final Result result : results) {
                        Metacard metacard = result.getMetacard();
                        try {
                            exportMetacard(dumpDir, metacard);
                        } catch (IOException | CatalogTransformerException e) {
                            transformationFailed = true;
                            LOGGER.debug("Failed to dump metacard {}", metacard.getId(), e);
                            executorService.shutdownNow();
                        }
                        printStatus(resultCount.incrementAndGet());
                    }
                    if (transformationFailed) {
                        LOGGER.error(
                                "One or more metacards failed to transform. Enable debug log for more details.");
                    }
                }
            });
        } else {
            for (final Result result : response.getResults()) {
                Metacard metacard = result.getMetacard();
                exportMetacard(dumpDir, metacard);
                printStatus(resultCount.incrementAndGet());
            }
        }

        if (response.getResults().size() < pageSize || pageSize == -1) {
            break;
        }

        if (pageSize > 0) {
            query.setStartIndex(query.getStartIndex() + pageSize);
        }
    }

    executorService.shutdown();

    while (!executorService.isTerminated()) {
        try {
            TimeUnit.MILLISECONDS.sleep(100);
        } catch (InterruptedException e) {
            // ignore
        }
    }

    long end = System.currentTimeMillis();
    String elapsedTime = timeFormatter.print(new Period(start, end).withMillis(0));
    console.printf(" %d file(s) dumped in %s\t%n", resultCount.get(), elapsedTime);
    LOGGER.info("{} file(s) dumped in {}", resultCount.get(), elapsedTime);
    console.println();

    return null;
}

From source file:org.jodconverter.office.OfficeProcessManagerPoolEntryITest.java

/**
 * Tests that an office process is restarted successfully after a crash.
 *
 * @throws Exception if an error occurs.
 *//*from  w w  w .  j ava 2s  .  c  om*/
@Test
public void execute_WhenOfficeProcessCrash_ShouldRestartAfterCrash() throws Exception {

    final OfficeProcessManagerPoolEntry officeManager = new OfficeProcessManagerPoolEntry(CONNECT_URL);

    try {
        officeManager.start();
        assertThat(officeManager.isRunning()).isTrue();
        assertThat(officeManager)
                .extracting("officeProcessManager.process.running", "officeProcessManager.connection.connected")
                .containsExactly(true, true);

        // Submit the task to an executor
        final ExecutorService pool = Executors.newFixedThreadPool(1);
        try {
            final Callable<Boolean> task = new RestartAfterCrashTask(officeManager);
            final Future<Boolean> future = pool.submit(task);

            Thread.sleep(500); // NOSONAR

            // Simulate crash
            final Process underlyingProcess = (Process) FieldUtils.readField(getOfficeProcess(officeManager),
                    "process", true);
            assertThat(underlyingProcess).isNotNull();
            LOGGER.debug("Simulating the crash");
            underlyingProcess.destroy();

            // Wait until the task is completed
            try {
                future.get();
                fail("Exception expected");
            } catch (ExecutionException ex) {
                assertThat(ex.getCause()).isInstanceOf(OfficeException.class);
                assertThat(ex.getCause().getCause()).isInstanceOf(CancellationException.class);
            }

        } finally {
            pool.shutdownNow();
        }

        assertRestartedAndReconnected(officeManager, RESTART_INITIAL_WAIT, RESTART_WAIT_TIMEOUT);

        final MockOfficeTask goodTask = new MockOfficeTask();
        officeManager.execute(goodTask);
        assertThat(goodTask.isCompleted()).isTrue();

    } finally {

        officeManager.stop();
        assertThat(officeManager.isRunning()).isFalse();
        assertThat(officeManager)
                .extracting("officeProcessManager.process.running", "officeProcessManager.connection.connected")
                .containsExactly(false, false);
        assertThat(getOfficeProcess(officeManager).getExitCode(0, 0)).isEqualTo(0);
    }
}

From source file:org.yccheok.jstock.gui.IndicatorScannerJPanel.java

private void initAlertDataStructures() {
    AlertStateManager oldAlertStateManager = alertStateManager;
    if (oldAlertStateManager != null) {
        oldAlertStateManager.dettachAll();
        oldAlertStateManager.clearState();
    }//from  w w w. j a  v  a  2 s.  c  om

    final ExecutorService oldSystemTrayAlertPool = systemTrayAlertPool;
    final ExecutorService oldEmailAlertPool = emailAlertPool;

    Utils.getZoombiePool().execute(new Runnable() {
        @Override
        public void run() {
            if (oldSystemTrayAlertPool != null) {
                log.info("Prepare to shut down " + oldSystemTrayAlertPool + "...");
                oldSystemTrayAlertPool.shutdownNow();
                try {
                    oldSystemTrayAlertPool.awaitTermination(100, TimeUnit.DAYS);
                } catch (InterruptedException exp) {
                    log.error(null, exp);
                }
                log.info("Shut down " + oldSystemTrayAlertPool + " peacefully.");

                log.info("Prepare to shut down " + oldEmailAlertPool + "...");
            }

            if (oldEmailAlertPool != null) {
                oldEmailAlertPool.shutdownNow();
                try {
                    oldEmailAlertPool.awaitTermination(100, TimeUnit.DAYS);
                } catch (InterruptedException exp) {
                    log.error(null, exp);
                }
                log.info("Shut down " + oldEmailAlertPool + " peacefully.");
            }
        }
    });

    alertStateManager = new AlertStateManager();
    alertStateManager.attach(this);

    emailAlertPool = Executors.newFixedThreadPool(1);
    systemTrayAlertPool = Executors.newFixedThreadPool(1);
}

From source file:io.pravega.test.integration.MultiReadersEndToEndTest.java

private Collection<Integer> readAllEvents(final int numParallelReaders, ClientFactory clientFactory,
        final String readerGroupName, final int numSegments) {
    ConcurrentLinkedQueue<Integer> read = new ConcurrentLinkedQueue<>();
    final ExecutorService executorService = Executors.newFixedThreadPool(numParallelReaders,
            new ThreadFactoryBuilder().setNameFormat("testreader-pool-%d").build());
    List<Future<?>> futures = new ArrayList<>();
    for (int i = 0; i < numParallelReaders; i++) {
        futures.add(executorService.submit(() -> {
            final String readerId = UUID.randomUUID().toString();
            @Cleanup/*from  w  w  w . j a v a 2  s .c o m*/
            final EventStreamReader<Integer> reader = clientFactory.createReader(readerId, readerGroupName,
                    new IntegerSerializer(), ReaderConfig.builder().build());
            int emptyCount = 0;
            while (emptyCount <= numSegments) {
                try {
                    final Integer integerEventRead = reader.readNextEvent(100).getEvent();
                    if (integerEventRead != null) {
                        read.add(integerEventRead);
                        emptyCount = 0;
                    } else {
                        emptyCount++;
                    }
                } catch (ReinitializationRequiredException e) {
                    throw new RuntimeException(e);
                }
            }
        }));
    }

    // Wait until all readers are done.
    futures.forEach(f -> FutureHelpers.getAndHandleExceptions(f, RuntimeException::new));
    executorService.shutdownNow();
    return read;
}

From source file:com.oneops.ops.dao.PerfDataAccessor.java

/**
 * Inits the DAOs/connections// w w  w  . j  a va  2  s.c  om
 */
public void init() {
    logger.info("PerfDataAccessor: " + ":" + clusterName + ":" + keyspaceName);
    ExecutorService executor = Executors.newSingleThreadExecutor();
    Future<String> future = executor.submit(this::connectToCluster);

    try {
        logger.info("Started connecting.. with timeOut " + TIMEOUT_IN_SECONDS);
        logger.info(future.get(TIMEOUT_IN_SECONDS, TimeUnit.SECONDS));
        logger.info("Finished connecting!");

    } catch (TimeoutException e) {
        logger.error("no cassandra hosts available - shutting down");
        throw new HectorException("TimeOut occured in getting the cassandra connection");
    } catch (InterruptedException e) {
        e.printStackTrace();
    } catch (ExecutionException e) {
        e.printStackTrace();
    }

    executor.shutdownNow();
    initCluster();
}

From source file:com.twitter.hraven.etl.JobFileProcessor.java

/**
 * Run the jobs and wait for all of them to complete.
 * /*from w  w  w .  j  av  a  2  s . c om*/
 * @param threadCount
 *          up to how many jobs to run in parallel
 * @param jobRunners
 *          the list of jobs to run.
 * @return whether all jobs completed successfully or not.
 * @throws InterruptedException
 *           when interrupted while running jobs.
 * @throws ExecutionException
 *           when at least one of the jobs could not be scheduled.
 */
private boolean runJobs(int threadCount, List<JobRunner> jobRunners)
        throws InterruptedException, ExecutionException {
    ExecutorService execSvc = Executors.newFixedThreadPool(threadCount);

    if ((jobRunners == null) || (jobRunners.size() == 0)) {
        return true;
    }

    boolean success = true;
    try {
        List<Future<Boolean>> jobFutures = new LinkedList<Future<Boolean>>();
        for (JobRunner jobRunner : jobRunners) {
            Future<Boolean> jobFuture = execSvc.submit(jobRunner);
            jobFutures.add(jobFuture);
        }

        // Wait for all jobs to complete.
        for (Future<Boolean> jobFuture : jobFutures) {
            success = jobFuture.get();
            if (!success) {
                // Stop the presses as soon as we see an error. Note that several
                // other jobs may have already been scheduled. Others will never be
                // scheduled.
                break;
            }
        }
    } finally {
        // Shut down the executor so that the JVM can exit.
        List<Runnable> neverRan = execSvc.shutdownNow();
        if (neverRan != null && neverRan.size() > 0) {
            System.err.println("Interrupted run. Currently running Hadoop jobs will continue unless cancelled. "
                    + neverRan + " jobs never scheduled.");
        }
    }
    return success;
}

From source file:org.apache.hadoop.hdfs.TestDFSOpsCountStatistics.java

@Test
public void testCurrentAccess() throws InterruptedException {
    final int numThreads = 10;
    final ExecutorService threadPool = newFixedThreadPool(numThreads);

    try {/* w w w .j av  a2  s .c  om*/
        final CountDownLatch allReady = new CountDownLatch(numThreads);
        final CountDownLatch startBlocker = new CountDownLatch(1);
        final CountDownLatch allDone = new CountDownLatch(numThreads);
        final AtomicReference<Throwable> childError = new AtomicReference<>();

        for (int i = 0; i < numThreads; i++) {
            threadPool.submit(new Runnable() {
                @Override
                public void run() {
                    allReady.countDown();
                    try {
                        startBlocker.await();
                        incrementOpsCountByRandomNumbers();
                    } catch (Throwable t) {
                        LOG.error("Child failed when calling mkdir", t);
                        childError.compareAndSet(null, t);
                    } finally {
                        allDone.countDown();
                    }
                }
            });
        }

        allReady.await(); // wait until all threads are ready
        startBlocker.countDown(); // all threads start making directories
        allDone.await(); // wait until all threads are done

        assertNull("Child failed with exception.", childError.get());
        verifyStatistics();
    } finally {
        threadPool.shutdownNow();
    }
}

From source file:org.zeroturnaround.exec.ProcessExecutor.java

/**
 * Wait until the process stops, a timeout occurs and the caller thread gets interrupted.
 * In the latter cases the process gets destroyed as well.
 *///w w w  .j a va  2s .  c om
private ProcessResult waitFor(WaitForProcess task) throws IOException, InterruptedException, TimeoutException {
    ProcessResult result;
    if (timeout == null) {
        // Use the current thread
        result = task.call();
    } else {
        // Fork another thread to invoke Process.waitFor()
        ExecutorService service = Executors.newSingleThreadScheduledExecutor();
        try {
            result = service.submit(task).get(timeout, timeoutUnit);
        } catch (ExecutionException e) {
            Throwable c = e.getCause();
            if (c instanceof IOException)
                throw (IOException) c;
            if (c instanceof InterruptedException)
                throw (InterruptedException) c;
            if (c instanceof InvalidExitValueException)
                throw (InvalidExitValueException) c;
            throw new IllegalStateException("Error occured while waiting for process to finish:", c);
        } catch (TimeoutException e) {
            log.debug("{} is running too long", task);
            throw e;
        } finally {
            // Interrupt the task if it's still running and release the ExecutorService's resources
            service.shutdownNow();
        }
    }
    return result;
}

From source file:org.openrdf.http.server.ProtocolTest.java

/**
 * Test for SES-1861/*from   w ww  . j  a  v a 2 s.  c o  m*/
 * 
 * @throws Exception
 */
@Test
public void testConcurrentNamespaceUpdates() throws Exception {
    int limitCount = 1000;
    int limitPrefix = 50;

    Random prng = new Random();

    // String repositoryLocation =
    // Protocol.getRepositoryLocation("http://localhost:8080/openrdf-sesame",
    // "Test-NativeStore");
    String repositoryLocation = TestServer.REPOSITORY_URL;

    ExecutorService threadPool = Executors.newFixedThreadPool(20);

    for (int count = 0; count < limitCount; count++) {
        final int number = count;
        final int i = prng.nextInt(limitPrefix);
        final String prefix = "prefix" + i;
        final String ns = "http://example.org/namespace" + i;

        final String location = Protocol.getNamespacePrefixLocation(repositoryLocation, prefix);

        Runnable runner = new Runnable() {

            public void run() {
                try {
                    if (number % 2 == 0) {
                        putNamespace(location, ns);
                    } else {
                        deleteNamespace(location);
                    }
                } catch (Exception e) {
                    e.printStackTrace();
                    fail("Failed in test: " + number);
                }
            }
        };
        threadPool.execute(runner);
    }
    threadPool.shutdown();
    threadPool.awaitTermination(30000, TimeUnit.MILLISECONDS);
    threadPool.shutdownNow();
}

From source file:org.structr.common.SystemTest.java

@Test
public void testTransactionIsolation() {

    // Tests the transaction isolation of the underlying database layer.

    // Create a node and use many different threads to set a property on
    // it in a transaction. Observe the property value to check that the
    // threads do not interfere with each other.

    try {/*from   w w w . ja v a2s  .c  om*/

        final TestOne test = createTestNode(TestOne.class);
        final ExecutorService executor = Executors.newCachedThreadPool();
        final List<TestRunner> tests = new LinkedList<>();
        final List<Future> futures = new LinkedList<>();

        // create and run test runners
        for (int i = 0; i < 25; i++) {

            final TestRunner runner = new TestRunner(app, test);

            futures.add(executor.submit(runner));
            tests.add(runner);
        }

        // wait for termination
        for (final Future future : futures) {
            future.get();
            System.out.print(".");
        }

        System.out.println();

        // check for success
        for (final TestRunner runner : tests) {
            assertTrue("Could not validate transaction isolation", runner.success());
        }

        executor.shutdownNow();

    } catch (Throwable fex) {
        fail("Unexpected exception");
    }
}