Example usage for java.util.concurrent ThreadPoolExecutor allowCoreThreadTimeOut

List of usage examples for java.util.concurrent ThreadPoolExecutor allowCoreThreadTimeOut

Introduction

In this page you can find the example usage for java.util.concurrent ThreadPoolExecutor allowCoreThreadTimeOut.

Prototype

boolean allowCoreThreadTimeOut

To view the source code for java.util.concurrent ThreadPoolExecutor allowCoreThreadTimeOut.

Click Source Link

Document

If false (default), core threads stay alive even when idle.

Usage

From source file:com.kenshoo.freemarker.services.FreeMarkerService.java

@PostConstruct
public void postConstruct() {
    int actualMaxQueueLength = maxQueueLength != null ? maxQueueLength
            : Math.max(MIN_DEFAULT_MAX_QUEUE_LENGTH,
                    (int) (MAX_DEFAULT_MAX_QUEUE_LENGTH_MILLISECONDS / maxTemplateExecutionTime));
    ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(maxThreads, maxThreads,
            THREAD_KEEP_ALIVE_TIME, TimeUnit.MILLISECONDS,
            new BlockingArrayQueue<Runnable>(actualMaxQueueLength));
    threadPoolExecutor.allowCoreThreadTimeOut(true);
    templateExecutor = threadPoolExecutor;
}

From source file:org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService.java

private void addExecutorForVolume(final File volume) {
    ThreadFactory threadFactory = new ThreadFactory() {
        int counter = 0;

        @Override//from   w  w w.j  a v a 2 s  .  c o  m
        public Thread newThread(Runnable r) {
            int thisIndex;
            synchronized (this) {
                thisIndex = counter++;
            }
            Thread t = new Thread(threadGroup, r);
            t.setName("Async disk worker #" + thisIndex + " for volume " + volume);
            return t;
        }
    };

    ThreadPoolExecutor executor = new ThreadPoolExecutor(CORE_THREADS_PER_VOLUME, MAXIMUM_THREADS_PER_VOLUME,
            THREADS_KEEP_ALIVE_SECONDS, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), threadFactory);

    // This can reduce the number of running threads
    executor.allowCoreThreadTimeOut(true);
    executors.put(volume, executor);
}

From source file:org.apache.accumulo.core.file.rfile.MultiThreadedRFileTest.java

@SuppressFBWarnings(value = "INFORMATION_EXPOSURE_THROUGH_AN_ERROR_MESSAGE", justification = "information put into error message is safe and used for testing")
@Test/* www. j  ava 2  s.  c  o m*/
public void testMultipleReaders() throws IOException {
    final List<Throwable> threadExceptions = Collections.synchronizedList(new ArrayList<Throwable>());
    Map<String, MutableInt> messages = new HashMap<>();
    Map<String, String> stackTrace = new HashMap<>();

    final TestRFile trfBase = new TestRFile(conf);

    writeData(trfBase);

    trfBase.openReader();

    try {

        validate(trfBase);

        final TestRFile trfBaseCopy = trfBase.deepCopy();

        validate(trfBaseCopy);

        // now start up multiple RFile deepcopies
        int maxThreads = 10;
        String name = "MultiThreadedRFileTestThread";
        ThreadPoolExecutor pool = new ThreadPoolExecutor(maxThreads + 1, maxThreads + 1, 5 * 60,
                TimeUnit.SECONDS, new LinkedBlockingQueue<>(), new NamingThreadFactory(name));
        pool.allowCoreThreadTimeOut(true);
        try {
            Runnable runnable = () -> {
                try {
                    TestRFile trf = trfBase;
                    synchronized (trfBaseCopy) {
                        trf = trfBaseCopy.deepCopy();
                    }
                    validate(trf);
                } catch (Throwable t) {
                    threadExceptions.add(t);
                }
            };
            for (int i = 0; i < maxThreads; i++) {
                pool.submit(runnable);
            }
        } finally {
            pool.shutdown();
            try {
                pool.awaitTermination(Long.MAX_VALUE, TimeUnit.MILLISECONDS);
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
        }

        for (Throwable t : threadExceptions) {
            String msg = t.getClass() + " : " + t.getMessage();
            if (!messages.containsKey(msg)) {
                messages.put(msg, new MutableInt(1));
            } else {
                messages.get(msg).increment();
            }
            StringWriter string = new StringWriter();
            PrintWriter writer = new PrintWriter(string);
            t.printStackTrace(writer);
            writer.flush();
            stackTrace.put(msg, string.getBuffer().toString());
        }
    } finally {
        trfBase.closeReader();
        trfBase.close();
    }

    for (String message : messages.keySet()) {
        LOG.error(messages.get(message) + ": " + message);
        LOG.error(stackTrace.get(message));
    }

    assertTrue(threadExceptions.isEmpty());
}

From source file:com.revetkn.ios.analyzer.ArtworkAnalyzer.java

/**
 * @return Creates a backing thread pool used for concurrent execution of image processing tasks.
 *///ww  w. jav  a  2s  . com
protected ExecutorService createExecutorService() {
    int coreThreadCount = getRuntime().availableProcessors();
    int maximumThreadCount = coreThreadCount;
    int unusedThreadTerminationTimeoutInSeconds = 5;

    ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(coreThreadCount, maximumThreadCount,
            unusedThreadTerminationTimeoutInSeconds, SECONDS, new LinkedBlockingQueue<Runnable>());

    threadPoolExecutor.allowCoreThreadTimeOut(true);

    return threadPoolExecutor;
}

From source file:org.apache.hadoop.hbase.replication.regionserver.RegionReplicaReplicationEndpoint.java

/**
 * Returns a Thread pool for the RPC's to region replicas. Similar to
 * Connection's thread pool./*  w  w  w  . j  av  a  2  s  .co m*/
 */
private ExecutorService getDefaultThreadPool(Configuration conf) {
    int maxThreads = conf.getInt("hbase.region.replica.replication.threads.max", 256);
    int coreThreads = conf.getInt("hbase.region.replica.replication.threads.core", 16);
    if (maxThreads == 0) {
        maxThreads = Runtime.getRuntime().availableProcessors() * 8;
    }
    if (coreThreads == 0) {
        coreThreads = Runtime.getRuntime().availableProcessors() * 8;
    }
    long keepAliveTime = conf.getLong("hbase.region.replica.replication.threads.keepalivetime", 60);
    LinkedBlockingQueue<Runnable> workQueue = new LinkedBlockingQueue<Runnable>(maxThreads * conf
            .getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS, HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS));
    ThreadPoolExecutor tpe = new ThreadPoolExecutor(coreThreads, maxThreads, keepAliveTime, TimeUnit.SECONDS,
            workQueue, Threads.newDaemonThreadFactory(this.getClass().getSimpleName() + "-rpc-shared-"));
    tpe.allowCoreThreadTimeOut(true);
    return tpe;
}

From source file:org.apache.hadoop.yarn.server.resourcemanager.security.DelegationTokenRenewer.java

protected ThreadPoolExecutor createNewThreadPoolService(Configuration conf) {
    int nThreads = conf.getInt(YarnConfiguration.RM_DELEGATION_TOKEN_RENEWER_THREAD_COUNT,
            YarnConfiguration.DEFAULT_RM_DELEGATION_TOKEN_RENEWER_THREAD_COUNT);

    ThreadFactory tf = new ThreadFactoryBuilder().setNameFormat("DelegationTokenRenewer #%d").build();
    ThreadPoolExecutor pool = new ThreadPoolExecutor(nThreads, nThreads, 3L, TimeUnit.SECONDS,
            new LinkedBlockingQueue<Runnable>());
    pool.setThreadFactory(tf);/*ww w.  j  a va 2  s .c  o  m*/
    pool.allowCoreThreadTimeOut(true);
    return pool;
}

From source file:com.splicemachine.derby.stream.control.ControlDataSet.java

@Override
public DataSet<V> union(DataSet<V> dataSet) {
    ThreadPoolExecutor tpe = null;
    try {//from  w  ww.j  a  v a 2 s.  c  o  m

        ThreadFactory factory = new ThreadFactoryBuilder().setNameFormat("union-begin-query-%d")
                .setUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
                    @Override
                    public void uncaughtException(Thread t, Throwable e) {
                        e.printStackTrace();
                    }
                }).build();
        tpe = new ThreadPoolExecutor(2, 2, 60, TimeUnit.SECONDS, new SynchronousQueue<Runnable>(), factory,
                new ThreadPoolExecutor.CallerRunsPolicy());
        tpe.allowCoreThreadTimeOut(false);
        tpe.prestartAllCoreThreads();
        Future<Iterator<V>> leftSideFuture = tpe.submit(new NonLazy(iterator));
        Future<Iterator<V>> rightSideFuture = tpe.submit(new NonLazy(((ControlDataSet<V>) dataSet).iterator));

        return new ControlDataSet<>(Iterators.concat(leftSideFuture.get(), rightSideFuture.get()));
    } catch (Exception e) {
        throw new RuntimeException(e);
    } finally {
        if (tpe != null)
            tpe.shutdown();
    }
}

From source file:org.apache.hadoop.hbase.tool.LoadIncrementalHFiles.java

private ExecutorService createExecutorService() {
    ThreadPoolExecutor pool = new ThreadPoolExecutor(nrThreads, nrThreads, 60, TimeUnit.SECONDS,
            new LinkedBlockingQueue<>(),
            new ThreadFactoryBuilder().setNameFormat("LoadIncrementalHFiles-%1$d").build());
    pool.allowCoreThreadTimeOut(true);
    return pool;//from  w  w  w  .  j av  a  2s.c  o m
}

From source file:org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexProviderService.java

private ExecutorService createExecutor() {
    ThreadPoolExecutor executor = new ThreadPoolExecutor(0, 5, 60L, TimeUnit.SECONDS,
            new LinkedBlockingQueue<Runnable>(), new ThreadFactory() {
                private final AtomicInteger counter = new AtomicInteger();
                private final Thread.UncaughtExceptionHandler handler = new Thread.UncaughtExceptionHandler() {
                    @Override/*  ww  w .  j  a  v a 2 s  .c o m*/
                    public void uncaughtException(Thread t, Throwable e) {
                        log.warn("Error occurred in asynchronous processing ", e);
                    }
                };

                @Override
                public Thread newThread(@Nonnull Runnable r) {
                    Thread thread = new Thread(r, createName());
                    thread.setDaemon(true);
                    thread.setPriority(Thread.MIN_PRIORITY);
                    thread.setUncaughtExceptionHandler(handler);
                    return thread;
                }

                private String createName() {
                    return "oak-lucene-" + counter.getAndIncrement();
                }
            });
    executor.setKeepAliveTime(1, TimeUnit.MINUTES);
    executor.allowCoreThreadTimeOut(true);
    return executor;
}

From source file:org.apache.hadoop.hbase.client.ConnectionImplementation.java

private ExecutorService getThreadPool(int maxThreads, int coreThreads, String nameHint,
        BlockingQueue<Runnable> passedWorkQueue) {
    // shared HTable thread executor not yet initialized
    if (maxThreads == 0) {
        maxThreads = Runtime.getRuntime().availableProcessors() * 8;
    }//from  w  ww .  ja v  a 2 s .c  o m
    if (coreThreads == 0) {
        coreThreads = Runtime.getRuntime().availableProcessors() * 8;
    }
    long keepAliveTime = conf.getLong("hbase.hconnection.threads.keepalivetime", 60);
    BlockingQueue<Runnable> workQueue = passedWorkQueue;
    if (workQueue == null) {
        workQueue = new LinkedBlockingQueue<Runnable>(maxThreads * conf.getInt(
                HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS, HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS));
    }
    ThreadPoolExecutor tpe = new ThreadPoolExecutor(coreThreads, maxThreads, keepAliveTime, TimeUnit.SECONDS,
            workQueue, Threads.newDaemonThreadFactory(toString() + nameHint));
    tpe.allowCoreThreadTimeOut(true);
    return tpe;
}