Example usage for java.util.concurrent ExecutorService shutdownNow

List of usage examples for java.util.concurrent ExecutorService shutdownNow

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService shutdownNow.

Prototype

List<Runnable> shutdownNow();

Source Link

Document

Attempts to stop all actively executing tasks, halts the processing of waiting tasks, and returns a list of the tasks that were awaiting execution.

Usage

From source file:org.apache.phoenix.execute.UpsertSelectOverlappingBatchesIT.java

/**
 * Tests that UPSERT SELECT doesn't indefinitely block region closes
 *//*  ww  w .j a  v a 2 s.  co m*/
@Test
public void testRegionCloseDuringUpsertSelect() throws Exception {
    int numUpsertSelectRunners = 4;
    ExecutorService exec = Executors.newFixedThreadPool(numUpsertSelectRunners);
    try (Connection conn = driver.connect(url, props)) {
        final UpsertSelectRunner upsertSelectRunner = new UpsertSelectRunner(dataTable, 0, 105, 1);
        // keep running slow upsert selects
        SlowBatchRegionObserver.SLOW_MUTATE = true;
        for (int i = 0; i < numUpsertSelectRunners; i++) {
            exec.submit(new UpsertSelectLooper(upsertSelectRunner));
            Thread.sleep(300);
        }

        final HBaseTestingUtility utility = getUtility();
        // try to close the region while UPSERT SELECTs are happening,
        final HRegionServer dataRs = utility.getHBaseCluster().getRegionServer(0);
        final Admin admin = utility.getAdmin();
        final RegionInfo dataRegion = admin.getRegions(TableName.valueOf(dataTable)).get(0);
        logger.info("Closing data table region");
        admin.unassign(dataRegion.getEncodedNameAsBytes(), true);
        // make sure the region is offline
        utility.waitFor(60000L, 1000, new Waiter.Predicate<Exception>() {
            @Override
            public boolean evaluate() throws Exception {
                List<RegionInfo> onlineRegions = admin.getRegions(dataRs.getServerName());
                for (RegionInfo onlineRegion : onlineRegions) {
                    if (onlineRegion.equals(dataRegion)) {
                        logger.info("Data region still online");
                        return false;
                    }
                }
                logger.info("Region is no longer online");
                return true;
            }
        });
    } finally {
        SlowBatchRegionObserver.SLOW_MUTATE = false;
        exec.shutdownNow();
        exec.awaitTermination(60, TimeUnit.SECONDS);
    }
}

From source file:org.apache.activemq.store.jdbc.JDBCCleanupLimitedPoolTest.java

@Test
public void testNoDeadlockOnXaPoolExhaustion() throws Exception {
    final CountDownLatch done = new CountDownLatch(1);
    final CountDownLatch doneCommit = new CountDownLatch(1000);

    final ActiveMQXAConnectionFactory factory = new ActiveMQXAConnectionFactory(
            broker.getTransportConnectorByScheme("tcp").getPublishableConnectString());

    ExecutorService executorService = Executors.newCachedThreadPool();
    // some contention over pool of 2
    for (int i = 0; i < 3; i++) {
        executorService.execute(new Runnable() {
            @Override/*from  w w  w  .j ava  2s  .  c o  m*/
            public void run() {
                try {
                    ActiveMQXAConnection conn = (ActiveMQXAConnection) factory.createXAConnection();
                    conn.start();
                    XASession sess = conn.createXASession();
                    while (done.getCount() > 0 && doneCommit.getCount() > 0) {
                        Xid xid = createXid();
                        sess.getXAResource().start(xid, XAResource.TMNOFLAGS);
                        MessageProducer producer = sess.createProducer(sess.createQueue("test"));
                        producer.send(sess.createTextMessage("test"));
                        sess.getXAResource().end(xid, XAResource.TMSUCCESS);
                        sess.getXAResource().prepare(xid);
                        sess.getXAResource().commit(xid, false);
                        doneCommit.countDown();
                    }

                    conn.close();

                } catch (Exception ignored) {
                    ignored.printStackTrace();
                }
            }
        });
    }

    executorService.execute(new Runnable() {
        @Override
        public void run() {
            try {
                while (!done.await(10, TimeUnit.MILLISECONDS) && doneCommit.getCount() > 0) {
                    jdbcPersistenceAdapter.cleanup();
                }
            } catch (Exception ignored) {
            }

        }
    });

    executorService.shutdown();
    boolean allComplete = executorService.awaitTermination(40, TimeUnit.SECONDS);
    done.countDown();
    assertTrue("all complete", allComplete);
    executorService.shutdownNow();

    assertTrue("xa tx done", doneCommit.await(10, TimeUnit.SECONDS));
}

From source file:com.google.cloud.hadoop.gcsio.GoogleCloudStorageIntegrationHelper.java

/**
 * Creates objects with the given names in the given bucket.
 *//* w  w  w .  j  a  va  2s.  co  m*/
private void createObjects(final String bucketName, String[] objectNames) throws IOException {

    final ExecutorService threadPool = Executors.newCachedThreadPool();
    final CountDownLatch counter = new CountDownLatch(objectNames.length);
    List<Future<?>> futures = new ArrayList<>();
    // Do each creation asynchronously.
    for (final String objectName : objectNames) {
        Future<?> future = threadPool.submit(new Runnable() {
            @Override
            public void run() {
                try {
                    if (objectName.endsWith(GoogleCloudStorage.PATH_DELIMITER)) {
                        mkdir(bucketName, objectName);
                    } else {
                        // Just use objectName as file contents.
                        writeTextFile(bucketName, objectName, objectName);
                    }
                } catch (Throwable ioe) {
                    throw new RuntimeException(
                            String.format("Exception creating %s/%s", bucketName, objectName), ioe);
                } finally {
                    counter.countDown();
                }
            }
        });
        futures.add(future);
    }

    try {
        counter.await();
    } catch (InterruptedException ie) {
        throw new IOException("Interrupted while awaiting object creation!", ie);
    } finally {
        threadPool.shutdown();
        try {
            if (!threadPool.awaitTermination(10L, TimeUnit.SECONDS)) {
                System.err.println("Failed to awaitTermination! Forcing executor shutdown.");
                threadPool.shutdownNow();
            }
        } catch (InterruptedException ie) {
            throw new IOException("Interrupted while shutting down threadpool!", ie);
        }
    }

    for (Future<?> future : futures) {
        try {
            // We should already be done.
            future.get(10, TimeUnit.MILLISECONDS);
        } catch (Exception e) {
            throw new IOException("Creation of file failed with exception", e);
        }
    }
}

From source file:io.pravega.client.stream.impl.ControllerImplTest.java

@Test
public void testParallelCreateStream() throws Exception {
    final ExecutorService executorService = Executors.newFixedThreadPool(10);
    Semaphore createCount = new Semaphore(-19);
    AtomicBoolean success = new AtomicBoolean(true);
    for (int i = 0; i < 10; i++) {
        executorService.submit(() -> {
            for (int j = 0; j < 2; j++) {
                try {
                    CompletableFuture<Boolean> createStreamStatus;
                    createStreamStatus = controllerClient
                            .createStream(StreamConfiguration.builder().streamName("streamparallel")
                                    .scope("scope1").scalingPolicy(ScalingPolicy.fixed(1)).build());
                    log.info("{}", createStreamStatus.get());
                    assertTrue(createStreamStatus.get());
                    createCount.release();
                } catch (Exception e) {
                    log.error("Exception when creating stream: {}", e);

                    // Don't wait for other threads to complete.
                    success.set(false);//w  w w  . j a  va2 s . c om
                    createCount.release(20);
                }
            }
        });
    }
    createCount.acquire();
    executorService.shutdownNow();
    assertTrue(success.get());
}

From source file:org.apache.hadoop.hbase.util.RegionMover.java

@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "DLS_DEAD_LOCAL_STORE", justification = "FB is wrong; its size is read")
private void unloadRegions(Admin admin, String server, ArrayList<String> regionServers, boolean ack,
        List<HRegionInfo> movedRegions) throws Exception {
    List<HRegionInfo> regionsToMove = new ArrayList<HRegionInfo>();// FindBugs: DLS_DEAD_LOCAL_STORE
    regionsToMove = getRegions(this.conf, server);
    if (regionsToMove.size() == 0) {
        LOG.info("No Regions to move....Quitting now");
        return;/*from   w  ww.  ja v a 2s.c om*/
    } else if (regionServers.size() == 0) {
        LOG.warn("No Regions were moved - no servers available");
        throw new Exception("No online region servers");
    }
    while (true) {
        regionsToMove = getRegions(this.conf, server);
        regionsToMove.removeAll(movedRegions);
        if (regionsToMove.size() == 0) {
            break;
        }
        int counter = 0;
        LOG.info("Moving " + regionsToMove.size() + " regions from " + this.hostname + " to "
                + regionServers.size() + " servers using " + this.maxthreads + " threads .Ack Mode:" + ack);
        ExecutorService moveRegionsPool = Executors.newFixedThreadPool(this.maxthreads);
        List<Future<Boolean>> taskList = new ArrayList<Future<Boolean>>();
        int serverIndex = 0;
        while (counter < regionsToMove.size()) {
            if (ack) {
                Future<Boolean> task = moveRegionsPool.submit(new MoveWithAck(admin, regionsToMove.get(counter),
                        server, regionServers.get(serverIndex), movedRegions));
                taskList.add(task);
            } else {
                Future<Boolean> task = moveRegionsPool.submit(new MoveWithoutAck(admin,
                        regionsToMove.get(counter), server, regionServers.get(serverIndex), movedRegions));
                taskList.add(task);
            }
            counter++;
            serverIndex = (serverIndex + 1) % regionServers.size();
        }
        moveRegionsPool.shutdown();
        long timeoutInSeconds = regionsToMove.size()
                * admin.getConfiguration().getInt(MOVE_WAIT_MAX_KEY, DEFAULT_MOVE_WAIT_MAX);
        try {
            if (!moveRegionsPool.awaitTermination(timeoutInSeconds, TimeUnit.SECONDS)) {
                moveRegionsPool.shutdownNow();
            }
        } catch (InterruptedException e) {
            moveRegionsPool.shutdownNow();
            Thread.currentThread().interrupt();
        }
        for (Future<Boolean> future : taskList) {
            try {
                // if even after shutdownNow threads are stuck we wait for 5 secs max
                if (!future.get(5, TimeUnit.SECONDS)) {
                    LOG.error("Was Not able to move region....Exiting Now");
                    throw new Exception("Could not move region Exception");
                }
            } catch (InterruptedException e) {
                LOG.error("Interrupted while waiting for Thread to Complete " + e.getMessage(), e);
                throw e;
            } catch (ExecutionException e) {
                LOG.error("Got Exception From Thread While moving region " + e.getMessage(), e);
                throw e;
            } catch (CancellationException e) {
                LOG.error("Thread for moving region cancelled. Timeout for cancellation:" + timeoutInSeconds
                        + "secs", e);
                throw e;
            }
        }
    }
}

From source file:org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils.java

public static List<ColumnStatisticsObj> aggrPartitionStats(
        Map<ColumnStatsAggregator, List<ColStatsObjWithSourceInfo>> colStatsMap, final List<String> partNames,
        final boolean areAllPartsFound, final boolean useDensityFunctionForNDVEstimation, final double ndvTuner)
        throws MetaException {
    List<ColumnStatisticsObj> aggrColStatObjs = new ArrayList<ColumnStatisticsObj>();
    int numProcessors = Runtime.getRuntime().availableProcessors();
    final ExecutorService pool = Executors.newFixedThreadPool(Math.min(colStatsMap.size(), numProcessors),
            new ThreadFactoryBuilder().setDaemon(true).setNameFormat("aggr-col-stats-%d").build());
    final List<Future<ColumnStatisticsObj>> futures = Lists.newLinkedList();
    LOG.debug("Aggregating column stats. Threads used: {}", Math.min(colStatsMap.size(), numProcessors));
    long start = System.currentTimeMillis();
    for (final Map.Entry<ColumnStatsAggregator, List<ColStatsObjWithSourceInfo>> entry : colStatsMap
            .entrySet()) {//from   w ww  .  j  a v a  2 s.c o  m
        futures.add(pool.submit(new Callable<ColumnStatisticsObj>() {
            @Override
            public ColumnStatisticsObj call() throws MetaException {
                List<ColStatsObjWithSourceInfo> colStatWithSourceInfo = entry.getValue();
                ColumnStatsAggregator aggregator = entry.getKey();
                try {
                    ColumnStatisticsObj statsObj = aggregator.aggregate(colStatWithSourceInfo, partNames,
                            areAllPartsFound);
                    return statsObj;
                } catch (MetaException e) {
                    LOG.debug(e.getMessage());
                    throw e;
                }
            }
        }));
    }
    pool.shutdown();
    if (!futures.isEmpty()) {
        for (Future<ColumnStatisticsObj> future : futures) {
            try {
                if (future.get() != null) {
                    aggrColStatObjs.add(future.get());
                }
            } catch (InterruptedException | ExecutionException e) {
                LOG.debug(e.getMessage());
                pool.shutdownNow();
                throw new MetaException(e.toString());
            }

        }
    }
    LOG.debug("Time for aggr col stats in seconds: {} Threads used: {}",
            ((System.currentTimeMillis() - (double) start)) / 1000,
            Math.min(colStatsMap.size(), numProcessors));
    return aggrColStatObjs;
}

From source file:org.rhq.enterprise.server.core.concurrency.LatchedServiceController.java

public void executeServices() throws LatchedServiceCircularityException {
    checkForCircularDependencies();/*from w  w  w .  j a  v  a2  s .  c o  m*/

    ExecutorService threadPool = Executors.newFixedThreadPool(this.threadPoolSize);
    log.debug("Will execute latched services with a concurrency of [" + this.threadPoolSize + "]");

    // submit all latched services, but they'll block either in the thread pool queue or our startup latch
    Map<String, Future<?>> threads = new HashMap<String, Future<?>>();
    for (LatchedService service : this.latchedServices) {
        log.debug("Submitting [" + service.getServiceName() + "] to thread pool");
        Future<?> thread = threadPool.submit(service);
        threads.put(service.getServiceName(), thread);
    }

    // allow them to go
    serviceStartupLatch.countDown();

    try {
        // and then wait for all of them to complete
        int elapsedMinutes = 0;
        final int MINUTES_BETWEEN_UPDATES = 3;
        while (!this.serviceCompletionLatch.await(MINUTES_BETWEEN_UPDATES, TimeUnit.MINUTES)) {
            elapsedMinutes += MINUTES_BETWEEN_UPDATES;
            boolean stillRunning = false;
            for (Map.Entry<String, Future<?>> thread : threads.entrySet()) {
                if (!thread.getValue().isDone()) {
                    stillRunning = true;
                    log.warn("Still processing [" + thread.getKey() + "] after " + elapsedMinutes
                            + " minutes - is it hung?");
                }
            }
            if (!stillRunning) {
                log.error(
                        "The controller is waiting for threads that are already dead, breaking deadlock now!");
                break;
            }
        }
    } catch (InterruptedException ie) {
        log.warn("Controller was interrupted; can not be sure if all services have begun");
    } finally {
        threadPool.shutdownNow();
    }

    log.debug("All services have begun");
}

From source file:org.apache.hadoop.hbase.client.example.MultiThreadedClientExample.java

@Override
public int run(String[] args) throws Exception {

    if (args.length < 1 || args.length > 2) {
        System.out.println("Usage: " + this.getClass().getName() + " tableName [num_operations]");
        return -1;
    }/*from www.  j  a v a2s.  com*/

    final TableName tableName = TableName.valueOf(args[0]);
    int numOperations = DEFAULT_NUM_OPERATIONS;

    // the second arg is the number of operations to send.
    if (args.length == 2) {
        numOperations = Integer.parseInt(args[1]);
    }

    // Threads for the client only.
    //
    // We don't want to mix hbase and business logic.
    //
    ExecutorService service = new ForkJoinPool(threads * 2);

    // Create two different connections showing how it's possible to
    // separate different types of requests onto different connections
    final Connection writeConnection = ConnectionFactory.createConnection(getConf(), service);
    final Connection readConnection = ConnectionFactory.createConnection(getConf(), service);

    // At this point the entire cache for the region locations is full.
    // Only do this if the number of regions in a table is easy to fit into memory.
    //
    // If you are interacting with more than 25k regions on a client then it's probably not good
    // to do this at all.
    warmUpConnectionCache(readConnection, tableName);
    warmUpConnectionCache(writeConnection, tableName);

    List<Future<Boolean>> futures = new ArrayList<>(numOperations);
    for (int i = 0; i < numOperations; i++) {
        double r = ThreadLocalRandom.current().nextDouble();
        Future<Boolean> f;

        // For the sake of generating some synthetic load this queues
        // some different callables.
        // These callables are meant to represent real work done by your application.
        if (r < .30) {
            f = internalPool.submit(new WriteExampleCallable(writeConnection, tableName));
        } else if (r < .50) {
            f = internalPool.submit(new SingleWriteExampleCallable(writeConnection, tableName));
        } else {
            f = internalPool.submit(new ReadExampleCallable(writeConnection, tableName));
        }
        futures.add(f);
    }

    // Wait a long time for all the reads/writes to complete
    for (Future<Boolean> f : futures) {
        f.get(10, TimeUnit.MINUTES);
    }

    // Clean up after our selves for cleanliness
    internalPool.shutdownNow();
    service.shutdownNow();
    return 0;
}

From source file:com.google.cloud.hadoop.gcsio.GoogleCloudStorageIntegrationTest.java

/**
 * Creates objects with the given names in the given bucket.
 *//*from www.  jav  a  2  s  .  c om*/
private void createObjects(final String bucketName, String[] objectNames) throws IOException {

    final ExecutorService threadPool = Executors.newCachedThreadPool();
    final CountDownLatch counter = new CountDownLatch(objectNames.length);
    List<Future<?>> futures = new ArrayList<>();
    // Do each creation asynchronously.
    for (final String objectName : objectNames) {
        Future<?> future = threadPool.submit(new Runnable() {
            @Override
            public void run() {
                try {
                    if (objectName.endsWith(GoogleCloudStorage.PATH_DELIMITER)) {
                        mkdir(bucketName, objectName);
                    } else {
                        // Just use objectName as file contents.
                        writeTextFile(bucketName, objectName, objectName);
                    }
                } catch (Throwable ioe) {
                    throw new RuntimeException(
                            String.format("Exception creating %s/%s", bucketName, objectName), ioe);
                } finally {
                    counter.countDown();
                }
            }
        });
        futures.add(future);
    }

    try {
        counter.await();
    } catch (InterruptedException ie) {
        throw new IOException("Interrupted while awaiting object creation!", ie);
    } finally {
        threadPool.shutdown();
        try {
            if (!threadPool.awaitTermination(10L, TimeUnit.SECONDS)) {
                System.err.println("Failed to awaitTermination! Forcing executor shutdown.");
                threadPool.shutdownNow();
            }
        } catch (InterruptedException ie) {
            throw new IOException("Interrupted while shutting down threadpool!", ie);
        }
    }

    for (Future<?> future : futures) {
        try {
            // We should already be done.
            future.get(10, TimeUnit.MILLISECONDS);
        } catch (Exception e) {
            throw new IOException(String.format("Creation of file %s failed with exception", objectName), e);
        }
    }
}

From source file:org.wso2.carbon.analytics.dataservice.core.indexing.AnalyticsDataIndexer.java

private void shutdownTaxonomyWorkerThreadPool(ExecutorService pool) throws AnalyticsIndexException {
    if (pool != null) {
        pool.shutdown();/*  ww  w .jav  a  2 s.c  om*/
    }
    try {
        if (!pool.awaitTermination(TAXONOMYWORKER_TIMEOUT, TimeUnit.SECONDS)) {
            pool.shutdownNow();
        }
    } catch (InterruptedException e) {
        log.error("Error while shutting down the Taxonomyworker threadpool , " + e.getMessage(), e);
        throw new AnalyticsIndexException(
                "Error while shutting down the Taxonomyworker threadpool , " + e.getMessage(), e);
    } finally {
        pool = null;
    }
}