Example usage for java.util.concurrent ExecutorService shutdownNow

List of usage examples for java.util.concurrent ExecutorService shutdownNow

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService shutdownNow.

Prototype

List<Runnable> shutdownNow();

Source Link

Document

Attempts to stop all actively executing tasks, halts the processing of waiting tasks, and returns a list of the tasks that were awaiting execution.

Usage

From source file:com.bbytes.jfilesync.sync.ftp.FTPClientFactory.java

/**
 * Get {@link FTPClient} with initialized connects to server given in properties file
 * @return/*from w w  w. j a  v  a 2 s .  c  o  m*/
 */
public FTPClient getClientInstance() {

    ExecutorService ftpclientConnThreadPool = Executors.newSingleThreadExecutor();
    Future<FTPClient> future = ftpclientConnThreadPool.submit(new Callable<FTPClient>() {

        FTPClient ftpClient = new FTPClient();

        boolean connected;

        public FTPClient call() throws Exception {

            try {
                while (!connected) {
                    try {
                        ftpClient.connect(host, port);
                        if (!ftpClient.login(username, password)) {
                            ftpClient.logout();
                        }
                        connected = true;
                        return ftpClient;
                    } catch (Exception e) {
                        connected = false;
                    }

                }

                int reply = ftpClient.getReplyCode();
                // FTPReply stores a set of constants for FTP reply codes.
                if (!FTPReply.isPositiveCompletion(reply)) {
                    ftpClient.disconnect();
                }

                ftpClient.setFileType(FTP.BINARY_FILE_TYPE);

            } catch (Exception e) {
                log.error(e.getMessage(), e);
            }
            return ftpClient;
        }
    });

    FTPClient ftpClient = new FTPClient();
    try {
        // wait for 100 secs for acquiring conn else terminate
        ftpClient = future.get(100, TimeUnit.SECONDS);
    } catch (TimeoutException e) {
        log.info("FTP client Conn wait thread terminated!");
    } catch (InterruptedException e) {
        log.error(e.getMessage(), e);
    } catch (ExecutionException e) {
        log.error(e.getMessage(), e);
    }

    ftpclientConnThreadPool.shutdownNow();
    return ftpClient;

}

From source file:org.apache.flink.runtime.blob.BlobServerGetTest.java

/**
 * [FLINK-6020] Tests that concurrent get operations don't concurrently access the BlobStore to
 * download a blob.//from   w w w.  j  ava 2 s  . c  om
 *
 * @param jobId
 *       job ID to use (or <tt>null</tt> if job-unrelated)
 * @param blobType
 *       whether the BLOB should become permanent or transient
 */
private void testConcurrentGetOperations(@Nullable final JobID jobId, final BlobKey.BlobType blobType)
        throws IOException, InterruptedException, ExecutionException {
    final Configuration config = new Configuration();
    config.setString(BlobServerOptions.STORAGE_DIRECTORY, temporaryFolder.newFolder().getAbsolutePath());

    final BlobStore blobStore = mock(BlobStore.class);

    final int numberConcurrentGetOperations = 3;
    final List<CompletableFuture<File>> getOperations = new ArrayList<>(numberConcurrentGetOperations);

    final byte[] data = { 1, 2, 3, 4, 99, 42 };

    doAnswer(new Answer() {
        @Override
        public Object answer(InvocationOnMock invocation) throws Throwable {
            File targetFile = (File) invocation.getArguments()[2];

            FileUtils.writeByteArrayToFile(targetFile, data);

            return null;
        }
    }).when(blobStore).get(any(JobID.class), any(BlobKey.class), any(File.class));

    final ExecutorService executor = Executors.newFixedThreadPool(numberConcurrentGetOperations);

    try (final BlobServer server = new BlobServer(config, blobStore)) {

        server.start();

        // upload data first
        final BlobKey blobKey = put(server, jobId, data, blobType);

        // now try accessing it concurrently (only HA mode will be able to retrieve it from HA store!)
        if (blobType == PERMANENT_BLOB) {
            // remove local copy so that a transfer from HA store takes place
            assertTrue(server.getStorageLocation(jobId, blobKey).delete());
        }
        for (int i = 0; i < numberConcurrentGetOperations; i++) {
            CompletableFuture<File> getOperation = CompletableFuture.supplyAsync(() -> {
                try {
                    File file = get(server, jobId, blobKey);
                    // check that we have read the right data
                    validateGetAndClose(new FileInputStream(file), data);
                    return file;
                } catch (IOException e) {
                    throw new CompletionException(
                            new FlinkException("Could not read blob for key " + blobKey + '.', e));
                }
            }, executor);

            getOperations.add(getOperation);
        }

        CompletableFuture<Collection<File>> filesFuture = FutureUtils.combineAll(getOperations);
        filesFuture.get();
    } finally {
        executor.shutdownNow();
    }
}

From source file:org.structr.common.SystemTest.java

@Test
public void testConstraintsConcurrently() {

        /**/*from   w ww.java 2 s .  com*/
         * This test concurrently creates 1000 nodes in
         * batches of 10, with 10 threads simultaneously.
         */

        try (final Tx tx = app.tx()) {

        app.create(SchemaNode.class, new NodeAttribute(SchemaNode.name, "Item"), new NodeAttribute(
                SchemaNode.schemaProperties,
                Arrays.asList(app.create(SchemaProperty.class, new NodeAttribute(SchemaProperty.name, "name"),
                        new NodeAttribute(SchemaProperty.propertyType, "String"),
                        new NodeAttribute(SchemaProperty.unique, true),
                        new NodeAttribute(SchemaProperty.indexed, true)))));

        tx.success();

    } catch (FrameworkException ex) {
        fail("Error creating schema node");
    }

    final Class itemType = StructrApp.getConfiguration().getNodeEntityClass("Item");

    assertNotNull("Error creating schema node", itemType);

    final Runnable worker = new Runnable() {

        @Override
        public void run() {

            int i = 0;

            while (i < 1000) {

                try (final Tx tx = app.tx()) {

                    for (int j = 0; j < 10 && i < 1000; j++) {

                        app.create(itemType, "Item" + StringUtils.leftPad(Integer.toString(i++), 5, "0"));
                    }

                    tx.success();

                } catch (FrameworkException expected) {
                }
            }
        }
    };

    final ExecutorService service = Executors.newFixedThreadPool(10);
    final List<Future> futures = new LinkedList<>();

    for (int i = 0; i < 10; i++) {

        futures.add(service.submit(worker));
    }

    // wait for result of async. operations
    for (final Future future : futures) {

        try {
            future.get();

        } catch (Throwable t) {

            logger.warn("", t);
            fail("Unexpected exception");
        }
    }

    try (final Tx tx = app.tx()) {

        final List<NodeInterface> items = app.nodeQuery(itemType).sort(AbstractNode.name).getAsList();
        int i = 0;

        assertEquals("Invalid concurrent constraint test result", 1000, items.size());

        for (final NodeInterface item : items) {

            assertEquals("Invalid name detected", "Item" + StringUtils.leftPad(Integer.toString(i++), 5, "0"),
                    item.getName());
        }

        tx.success();

    } catch (FrameworkException ex) {
        fail("Unexpected exception");
    }

    service.shutdownNow();
}

From source file:com.netflix.curator.framework.recipes.locks.TestReaper.java

private void testSimulationWithLocks(String namespace) throws Exception {
    final int LOCK_CLIENTS = 10;
    final int ITERATIONS = 250;
    final int MAX_WAIT_MS = 10;

    ExecutorService service = Executors.newFixedThreadPool(LOCK_CLIENTS);
    ExecutorCompletionService<Object> completionService = new ExecutorCompletionService<Object>(service);

    Timing timing = new Timing();
    Reaper reaper = null;/*from   w ww  .j  a  v  a 2s . c om*/
    final CuratorFramework client = makeClient(timing, namespace);
    try {
        client.start();

        reaper = new Reaper(client, MAX_WAIT_MS / 2);
        reaper.start();
        reaper.addPath("/a/b");

        for (int i = 0; i < LOCK_CLIENTS; ++i) {
            completionService.submit(new Callable<Object>() {
                @Override
                public Object call() throws Exception {
                    final InterProcessMutex lock = new InterProcessMutex(client, "/a/b");
                    for (int i = 0; i < ITERATIONS; ++i) {
                        lock.acquire();
                        try {
                            Thread.sleep((int) (Math.random() * MAX_WAIT_MS));
                        } finally {
                            lock.release();
                        }
                    }
                    return null;
                }
            });
        }

        for (int i = 0; i < LOCK_CLIENTS; ++i) {
            completionService.take().get();
        }

        Thread.sleep(timing.session());
        timing.sleepABit();

        Stat stat = client.checkExists().forPath("/a/b");
        Assert.assertNull("Child qty: " + ((stat != null) ? stat.getNumChildren() : 0), stat);
    } finally {
        service.shutdownNow();
        IOUtils.closeQuietly(reaper);
        IOUtils.closeQuietly(client);
    }
}

From source file:info.archinnov.achilles.embedded.ServerStarter.java

private void start(final TypedMap parameters) {
    if (isAlreadyRunning()) {
        log.debug("Cassandra is already running, not starting new one");
        return;//from   w w w  .j  a v  a 2s.  c o m
    }

    final String triggersDir = createTriggersFolder();

    log.info(" Random embedded Cassandra RPC port/Thrift port = {}",
            parameters.<Integer>getTyped(CASSANDRA_THRIFT_PORT));
    log.info(" Random embedded Cassandra Native port/CQL port = {}",
            parameters.<Integer>getTyped(CASSANDRA_CQL_PORT));
    log.info(" Random embedded Cassandra Storage port = {}",
            parameters.<Integer>getTyped(CASSANDRA_STORAGE_PORT));
    log.info(" Random embedded Cassandra Storage SSL port = {}",
            parameters.<Integer>getTyped(CASSANDRA_STORAGE_SSL_PORT));
    log.info(" Random embedded Cassandra Remote JMX port = {}",
            System.getProperty("com.sun.management.jmxremote.port", "null"));
    log.info(" Embedded Cassandra triggers directory = {}", triggersDir);

    log.info("Starting Cassandra...");

    System.setProperty("cassandra.triggers_dir", triggersDir);
    System.setProperty("cassandra-foreground", "true");
    System.setProperty("cassandra.embedded.concurrent.reads",
            parameters.getTypedOr(CASSANDRA_CONCURRENT_READS, 32).toString());
    System.setProperty("cassandra.embedded.concurrent.writes",
            parameters.getTypedOr(CASSANDRA_CONCURRENT_WRITES, 32).toString());
    System.setProperty("cassandra-foreground", "true");

    final boolean useUnsafeCassandra = parameters.getTyped(USE_UNSAFE_CASSANDRA_DAEMON);

    if (useUnsafeCassandra) {
        System.setProperty("cassandra-num-tokens", "1");
    }

    System.setProperty("cassandra.config.loader", "info.archinnov.achilles.embedded.AchillesCassandraConfig");

    final CountDownLatch startupLatch = new CountDownLatch(1);
    final ExecutorService executor = Executors.newSingleThreadExecutor();
    final AtomicReference<CassandraDaemon> daemonRef = new AtomicReference<>();
    executor.execute(() -> {
        if (useUnsafeCassandra) {
            LOGGER.warn(
                    "******* WARNING, starting unsafe embedded Cassandra deamon. This should be only used for unit testing or development and not for production !");
        }

        CassandraDaemon cassandraDaemon = useUnsafeCassandra == true ? new AchillesCassandraDaemon()
                : new CassandraDaemon();

        cassandraDaemon.completeSetup();
        cassandraDaemon.activate();
        daemonRef.getAndSet(cassandraDaemon);
        startupLatch.countDown();
    });

    try {
        startupLatch.await(30, SECONDS);
    } catch (InterruptedException e) {
        log.error("Timeout starting Cassandra embedded", e);
        throw new IllegalStateException("Timeout starting Cassandra embedded", e);
    }

    // Generate an OrderedShutdownHook to shutdown all connections from java clients before closing the server
    Runtime.getRuntime().addShutdownHook(new Thread() {
        public void run() {
            log.info("Calling stop on Embedded Cassandra server");
            daemonRef.get().stop();

            log.info("Calling shutdown on all Cluster instances");
            // First call shutdown on all registered Java driver Cluster instances
            orderedShutdownHook.callShutDown();

            log.info("Shutting down embedded Cassandra server");
            // Then shutdown the server
            executor.shutdownNow();
        }
    });
}

From source file:org.apache.druid.data.input.impl.prefetch.PrefetchableTextFilesFirehoseFactory.java

@Override
public Firehose connect(StringInputRowParser firehoseParser, @Nullable File temporaryDirectory)
        throws IOException {
    if (objects == null) {
        objects = ImmutableList.copyOf(Preconditions.checkNotNull(initObjects(), "objects"));
    }/*from   w w  w .ja  v a2  s .com*/

    if (cacheManager.isEnabled() || prefetchConfig.getMaxFetchCapacityBytes() > 0) {
        Preconditions.checkNotNull(temporaryDirectory, "temporaryDirectory");
        Preconditions.checkArgument(temporaryDirectory.exists(), "temporaryDirectory[%s] does not exist",
                temporaryDirectory);
        Preconditions.checkArgument(temporaryDirectory.isDirectory(),
                "temporaryDirectory[%s] is not a directory", temporaryDirectory);
    }

    LOG.info("Create a new firehose for [%d] objects", objects.size());

    // fetchExecutor is responsible for background data fetching
    final ExecutorService fetchExecutor = Execs.singleThreaded("firehose_fetch_%d");
    final FileFetcher<T> fetcher = new FileFetcher<T>(cacheManager, objects, fetchExecutor, temporaryDirectory,
            prefetchConfig, new ObjectOpenFunction<T>() {
                @Override
                public InputStream open(T object) throws IOException {
                    return openObjectStream(object);
                }

                @Override
                public InputStream open(T object, long start) throws IOException {
                    return openObjectStream(object, start);
                }
            }, getRetryCondition(), getMaxFetchRetry());

    return new FileIteratingFirehose(new Iterator<LineIterator>() {
        @Override
        public boolean hasNext() {
            return fetcher.hasNext();
        }

        @Override
        public LineIterator next() {
            if (!hasNext()) {
                throw new NoSuchElementException();
            }

            final OpenedObject<T> openedObject = fetcher.next();
            try {
                return new ResourceCloseableLineIterator(new InputStreamReader(
                        wrapObjectStream(openedObject.getObject(), openedObject.getObjectStream()),
                        StandardCharsets.UTF_8), openedObject.getResourceCloser());
            } catch (IOException e) {
                try {
                    openedObject.getResourceCloser().close();
                } catch (Throwable t) {
                    e.addSuppressed(t);
                }
                throw new RuntimeException(e);
            }
        }
    }, firehoseParser, () -> {
        fetchExecutor.shutdownNow();
        try {
            Preconditions.checkState(
                    fetchExecutor.awaitTermination(prefetchConfig.getFetchTimeout(), TimeUnit.MILLISECONDS));
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
            throw new ISE("Failed to shutdown fetch executor during close");
        }
    });
}

From source file:io.druid.query.lookup.LookupReferencesManager.java

private void startLookups(final List<LookupBean> lookupBeanList) {
    final ImmutableMap.Builder<String, LookupExtractorFactoryContainer> builder = ImmutableMap.builder();
    final ExecutorService executorService = Execs.multiThreaded(lookupConfig.getNumLookupLoadingThreads(),
            "LookupReferencesManager-Startup-%s");
    final CompletionService<Map.Entry<String, LookupExtractorFactoryContainer>> completionService = new ExecutorCompletionService<>(
            executorService);/*from  w w w .j  a va2 s  .  c  om*/
    final List<LookupBean> remainingLookups = new ArrayList<>(lookupBeanList);
    try {
        LOG.info("Starting lookup loading process");
        for (int i = 0; i < lookupConfig.getLookupStartRetries() && !remainingLookups.isEmpty(); i++) {
            LOG.info("Round of attempts #%d, [%d] lookups", i + 1, remainingLookups.size());
            final Map<String, LookupExtractorFactoryContainer> successfulLookups = startLookups(
                    remainingLookups, completionService);
            builder.putAll(successfulLookups);
            remainingLookups.removeIf(l -> successfulLookups.containsKey(l.getName()));
        }
        if (!remainingLookups.isEmpty()) {
            LOG.warn("Failed to start the following lookups after [%d] attempts: [%s]",
                    lookupConfig.getLookupStartRetries(), remainingLookups);
        }
        stateRef.set(new LookupUpdateState(builder.build(), ImmutableList.of(), ImmutableList.of()));
    } catch (InterruptedException | RuntimeException e) {
        LOG.error(e, "Failed to finish lookup load process.");
    } finally {
        executorService.shutdownNow();
    }
}

From source file:com.amazonaws.services.kinesis.clientlibrary.lib.worker.WorkerTest.java

private void runAndTestWorker(List<Shard> shardList, int threadPoolSize, List<KinesisClientLease> initialLeases,
        boolean callProcessRecordsForEmptyRecordList, int numberOfRecordsPerShard) throws Exception {
    File file = KinesisLocalFileDataCreator.generateTempDataFile(shardList, numberOfRecordsPerShard,
            "unitTestWT001");
    IKinesisProxy fileBasedProxy = new KinesisLocalFileProxy(file.getAbsolutePath());

    Semaphore recordCounter = new Semaphore(0);
    ShardSequenceVerifier shardSequenceVerifier = new ShardSequenceVerifier(shardList);
    TestStreamletFactory recordProcessorFactory = new TestStreamletFactory(recordCounter,
            shardSequenceVerifier);/*from   w w  w  .j a v  a 2 s.c  o  m*/

    ExecutorService executorService = Executors.newFixedThreadPool(threadPoolSize);

    WorkerThread workerThread = runWorker(shardList, initialLeases, callProcessRecordsForEmptyRecordList,
            failoverTimeMillis, numberOfRecordsPerShard, fileBasedProxy, recordProcessorFactory,
            executorService, nullMetricsFactory);

    // TestStreamlet will release the semaphore once for every record it processes
    recordCounter.acquire(numberOfRecordsPerShard * shardList.size());

    // Wait a bit to allow the worker to spin against the end of the stream.
    Thread.sleep(500L);

    testWorker(shardList, threadPoolSize, initialLeases, callProcessRecordsForEmptyRecordList,
            numberOfRecordsPerShard, fileBasedProxy, recordProcessorFactory);

    workerThread.getWorker().shutdown();
    executorService.shutdownNow();
    file.delete();
}

From source file:fr.bmartel.speedtest.SpeedTestTask.java

/**
 * write and flush mSocket.//from  w  w  w .ja v  a 2 s .c o m
 *
 * @param data payload to write
 * @return error status (-1 for error)
 * @throws IOException mSocket io exception
 */
private int writeFlushSocket(final byte[] data) throws IOException {

    final ExecutorService executor = Executors.newSingleThreadExecutor();

    @SuppressWarnings("unchecked")
    final Future<Integer> future = executor.submit(new Callable() {

        /**
         * execute sequential write/flush task.
         *
         * @return status
         */
        public Integer call() {
            try {
                mSocket.getOutputStream().write(data);
                mSocket.getOutputStream().flush();
            } catch (IOException e) {
                return -1;
            }
            return 0;
        }
    });
    int status;
    try {
        status = future.get(mSocketInterface.getSocketTimeout(), TimeUnit.MILLISECONDS);
    } catch (TimeoutException e) {
        future.cancel(true);
        status = -1;
    } catch (InterruptedException | ExecutionException e) {
        status = -1;
    }
    executor.shutdownNow();
    return status;
}

From source file:org.apache.hadoop.hive.ql.io.CombineHiveInputFormat.java

/**
 * Create Hive splits based on CombineFileSplit.
 *///from w w w . j a v  a2 s.co m
@Override
public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
    PerfLogger perfLogger = PerfLogger.getPerfLogger();
    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.GET_SPLITS);
    init(job);

    ArrayList<InputSplit> result = new ArrayList<InputSplit>();

    Path[] paths = getInputPaths(job);

    List<Path> nonCombinablePaths = new ArrayList<Path>(paths.length / 2);
    List<Path> combinablePaths = new ArrayList<Path>(paths.length / 2);

    int numThreads = Math.min(MAX_CHECK_NONCOMBINABLE_THREAD_NUM,
            (int) Math.ceil((double) paths.length / DEFAULT_NUM_PATH_PER_THREAD));
    int numPathPerThread = (int) Math.ceil((double) paths.length / numThreads);

    // This check is necessary because for Spark branch, the result array from
    // getInputPaths() above could be empty, and therefore numThreads could be 0.
    // In that case, Executors.newFixedThreadPool will fail.
    if (numThreads > 0) {
        LOG.info("Total number of paths: " + paths.length + ", launching " + numThreads
                + " threads to check non-combinable ones.");
        ExecutorService executor = Executors.newFixedThreadPool(numThreads);
        List<Future<Set<Integer>>> futureList = new ArrayList<Future<Set<Integer>>>(numThreads);
        try {
            for (int i = 0; i < numThreads; i++) {
                int start = i * numPathPerThread;
                int length = i != numThreads - 1 ? numPathPerThread : paths.length - start;
                futureList.add(executor.submit(new CheckNonCombinablePathCallable(paths, start, length, job)));
            }
            Set<Integer> nonCombinablePathIndices = new HashSet<Integer>();
            for (Future<Set<Integer>> future : futureList) {
                nonCombinablePathIndices.addAll(future.get());
            }
            for (int i = 0; i < paths.length; i++) {
                if (nonCombinablePathIndices.contains(i)) {
                    nonCombinablePaths.add(paths[i]);
                } else {
                    combinablePaths.add(paths[i]);
                }
            }
        } catch (Exception e) {
            LOG.error("Error checking non-combinable path", e);
            perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.GET_SPLITS);
            throw new IOException(e);
        } finally {
            executor.shutdownNow();
        }
    }

    // Store the previous value for the path specification
    String oldPaths = job.get(HiveConf.ConfVars.HADOOPMAPREDINPUTDIR.varname);
    if (LOG.isDebugEnabled()) {
        LOG.debug("The received input paths are: [" + oldPaths + "] against the property "
                + HiveConf.ConfVars.HADOOPMAPREDINPUTDIR.varname);
    }

    // Process the normal splits
    if (nonCombinablePaths.size() > 0) {
        FileInputFormat.setInputPaths(job, nonCombinablePaths.toArray(new Path[nonCombinablePaths.size()]));
        InputSplit[] splits = super.getSplits(job, numSplits);
        for (InputSplit split : splits) {
            result.add(split);
        }
    }

    // Process the combine splits
    if (combinablePaths.size() > 0) {
        FileInputFormat.setInputPaths(job, combinablePaths.toArray(new Path[combinablePaths.size()]));
        Map<String, PartitionDesc> pathToPartitionInfo = this.pathToPartitionInfo != null
                ? this.pathToPartitionInfo
                : Utilities.getMapWork(job).getPathToPartitionInfo();
        InputSplit[] splits = getCombineSplits(job, numSplits, pathToPartitionInfo);
        for (InputSplit split : splits) {
            result.add(split);
        }
    }

    // Restore the old path information back
    // This is just to prevent incompatibilities with previous versions Hive
    // if some application depends on the original value being set.
    if (oldPaths != null) {
        job.set(HiveConf.ConfVars.HADOOPMAPREDINPUTDIR.varname, oldPaths);
    }

    // clear work from ThreadLocal after splits generated in case of thread is reused in pool.
    Utilities.clearWorkMapForConf(job);

    LOG.info("Number of all splits " + result.size());
    perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.GET_SPLITS);
    return result.toArray(new InputSplit[result.size()]);
}