Example usage for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor

List of usage examples for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor

Introduction

In this page you can find the example usage for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor.

Prototype

public ThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit,
        BlockingQueue<Runnable> workQueue, RejectedExecutionHandler handler) 

Source Link

Document

Creates a new ThreadPoolExecutor with the given initial parameters and Executors#defaultThreadFactory default thread factory .

Usage

From source file:org.apache.hadoop.hbase.replication.regionserver.RegionReplicaReplicationEndpoint.java

/**
 * Returns a Thread pool for the RPC's to region replicas. Similar to
 * Connection's thread pool.// w w  w  .  jav  a  2 s .  co  m
 */
private ExecutorService getDefaultThreadPool(Configuration conf) {
    int maxThreads = conf.getInt("hbase.region.replica.replication.threads.max", 256);
    int coreThreads = conf.getInt("hbase.region.replica.replication.threads.core", 16);
    if (maxThreads == 0) {
        maxThreads = Runtime.getRuntime().availableProcessors() * 8;
    }
    if (coreThreads == 0) {
        coreThreads = Runtime.getRuntime().availableProcessors() * 8;
    }
    long keepAliveTime = conf.getLong("hbase.region.replica.replication.threads.keepalivetime", 60);
    LinkedBlockingQueue<Runnable> workQueue = new LinkedBlockingQueue<Runnable>(maxThreads * conf
            .getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS, HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS));
    ThreadPoolExecutor tpe = new ThreadPoolExecutor(coreThreads, maxThreads, keepAliveTime, TimeUnit.SECONDS,
            workQueue, Threads.newDaemonThreadFactory(this.getClass().getSimpleName() + "-rpc-shared-"));
    tpe.allowCoreThreadTimeOut(true);
    return tpe;
}

From source file:mondrian.olap.Util.java

/**
 * Creates an {@link ExecutorService} object backed by a thread pool.
 * @param maximumPoolSize Maximum number of concurrent
 * threads./*www .java 2 s  .  c o  m*/
 * @param corePoolSize Minimum number of concurrent
 * threads to maintain in the pool, even if they are
 * idle.
 * @param keepAliveTime Time, in seconds, for which to
 * keep alive unused threads.
 * @param name The name of the threads.
 * @param rejectionPolicy The rejection policy to enforce.
 * @return An executor service preconfigured.
 */
public static ExecutorService getExecutorService(int maximumPoolSize, int corePoolSize, long keepAliveTime,
        final String name, RejectedExecutionHandler rejectionPolicy) {
    if (Util.PreJdk16) {
        // On JDK1.5, if you specify corePoolSize=0, nothing gets executed.
        // Bummer.
        corePoolSize = Math.max(corePoolSize, 1);
    }

    // We must create a factory where the threads
    // have the right name and are marked as daemon threads.
    final ThreadFactory factory = new ThreadFactory() {
        private final AtomicInteger counter = new AtomicInteger(0);

        public Thread newThread(Runnable r) {
            final Thread t = Executors.defaultThreadFactory().newThread(r);
            t.setDaemon(true);
            t.setName(name + '_' + counter.incrementAndGet());
            return t;
        }
    };

    // Ok, create the executor
    final ThreadPoolExecutor executor = new ThreadPoolExecutor(corePoolSize,
            maximumPoolSize > 0 ? maximumPoolSize : Integer.MAX_VALUE, keepAliveTime, TimeUnit.SECONDS,
            // we use a sync queue. any other type of queue
            // will prevent the tasks from running concurrently
            // because the executors API requires blocking queues.
            // Important to pass true here. This makes the
            // order of tasks deterministic.
            // TODO Write a non-blocking queue which implements
            // the blocking queue API so we can pass that to the
            // executor.
            new SynchronousQueue<Runnable>(true), factory);

    // Set the rejection policy if required.
    if (rejectionPolicy != null) {
        executor.setRejectedExecutionHandler(rejectionPolicy);
    }

    // Done
    return executor;
}

From source file:org.apache.hadoop.hbase.PerformanceEvaluation2.java

private void doMultipleClients(final Test cmd, final List<TableSplit> splits, final int nthread)
        throws IOException {

    BlockingQueue<Runnable> queue = new LinkedBlockingQueue<Runnable>(nthread);
    final ThreadPoolExecutor services = new ThreadPoolExecutor(nthread, nthread, 10, TimeUnit.SECONDS, queue,
            new ThreadPoolExecutor.CallerRunsPolicy());
    for (final TableSplit ts : splits) {
        services.submit(new Runnable() {

            @Override/*from  w  w  w .  ja  v a 2 s .  co  m*/
            public void run() {
                try {
                    long startTime = System.currentTimeMillis();
                    runOneClient(cmd, ts);
                    long elapsedTime = System.currentTimeMillis() - startTime;

                    LOG.info("Finished " + Thread.currentThread().getName() + " in " + elapsedTime + "ms for "
                            + cmd.rows.get() + " rows and " + cmd.kvs.get() + " cols");

                    totalRowCount.add(cmd.rows.get());
                    totalKVCount.add(cmd.kvs.get());
                } catch (Exception e) {
                    e.printStackTrace();
                    throw new RuntimeException(e);
                }
            }
        });
    }

    services.shutdown();
    try {
        services.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS);
    } catch (InterruptedException e) {
        e.printStackTrace();
    }
}

From source file:org.codice.ddf.commands.catalog.DumpCommand.java

@Override
protected Object executeWithSubject() throws Exception {
    final File dumpDir = new File(dirPath);

    if (!dumpDir.exists()) {
        printErrorMessage("Directory [" + dirPath + "] must exist.");
        console.println("If the directory does indeed exist, try putting the path in quotes.");
        return null;
    }//from  www.  j a  va 2 s . c  o  m

    if (!dumpDir.isDirectory()) {
        printErrorMessage("Path [" + dirPath + "] must be a directory.");
        return null;
    }

    if (!DEFAULT_TRANSFORMER_ID.matches(transformerId)) {
        transformers = getTransformers();
        if (transformers == null) {
            console.println(transformerId + " is an invalid metacard transformer.");
            return null;
        }
    }

    CatalogFacade catalog = getCatalog();
    FilterBuilder builder = getFilterBuilder();

    Filter createdFilter = null;
    if ((createdAfter != null) && (createdBefore != null)) {
        DateTime createStartDateTime = DateTime.parse(createdAfter);
        DateTime createEndDateTime = DateTime.parse(createdBefore);
        createdFilter = builder.attribute(Metacard.CREATED).is().during().dates(createStartDateTime.toDate(),
                createEndDateTime.toDate());
    } else if (createdAfter != null) {
        DateTime createStartDateTime = DateTime.parse(createdAfter);
        createdFilter = builder.attribute(Metacard.CREATED).is().after().date(createStartDateTime.toDate());
    } else if (createdBefore != null) {
        DateTime createEndDateTime = DateTime.parse(createdBefore);
        createdFilter = builder.attribute(Metacard.CREATED).is().before().date(createEndDateTime.toDate());
    }

    Filter modifiedFilter = null;
    if ((modifiedAfter != null) && (modifiedBefore != null)) {
        DateTime modifiedStartDateTime = DateTime.parse(modifiedAfter);
        DateTime modifiedEndDateTime = DateTime.parse(modifiedBefore);
        modifiedFilter = builder.attribute(Metacard.MODIFIED).is().during()
                .dates(modifiedStartDateTime.toDate(), modifiedEndDateTime.toDate());
    } else if (modifiedAfter != null) {
        DateTime modifiedStartDateTime = DateTime.parse(modifiedAfter);
        modifiedFilter = builder.attribute(Metacard.MODIFIED).is().after().date(modifiedStartDateTime.toDate());
    } else if (modifiedBefore != null) {
        DateTime modifiedEndDateTime = DateTime.parse(modifiedBefore);
        modifiedFilter = builder.attribute(Metacard.MODIFIED).is().before().date(modifiedEndDateTime.toDate());
    }

    Filter filter = null;
    if ((createdFilter != null) && (modifiedFilter != null)) {
        // Filter by both created and modified dates
        filter = builder.allOf(createdFilter, modifiedFilter);
    } else if (createdFilter != null) {
        // Only filter by created date
        filter = createdFilter;
    } else if (modifiedFilter != null) {
        // Only filter by modified date
        filter = modifiedFilter;
    } else {
        // Don't filter by date range
        filter = builder.attribute(Metacard.ID).is().like().text(WILDCARD);
    }

    if (cqlFilter != null) {
        filter = CQL.toFilter(cqlFilter);
    }

    QueryImpl query = new QueryImpl(filter);
    query.setRequestsTotalResultsCount(false);
    query.setPageSize(pageSize);

    Map<String, Serializable> props = new HashMap<String, Serializable>();
    // Avoid caching all results while dumping with native query mode
    props.put("mode", "native");

    final AtomicLong resultCount = new AtomicLong(0);
    long start = System.currentTimeMillis();

    SourceResponse response = catalog.query(new QueryRequestImpl(query, props));

    BlockingQueue<Runnable> blockingQueue = new ArrayBlockingQueue<Runnable>(multithreaded);
    RejectedExecutionHandler rejectedExecutionHandler = new ThreadPoolExecutor.CallerRunsPolicy();
    final ExecutorService executorService = new ThreadPoolExecutor(multithreaded, multithreaded, 0L,
            TimeUnit.MILLISECONDS, blockingQueue, rejectedExecutionHandler);

    while (response.getResults().size() > 0) {
        response = catalog.query(new QueryRequestImpl(query, props));

        if (multithreaded > 1) {
            final List<Result> results = new ArrayList<Result>(response.getResults());
            executorService.submit(new Runnable() {
                @Override
                public void run() {
                    boolean transformationFailed = false;
                    for (final Result result : results) {
                        Metacard metacard = result.getMetacard();
                        try {
                            exportMetacard(dumpDir, metacard);
                        } catch (IOException | CatalogTransformerException e) {
                            transformationFailed = true;
                            LOGGER.debug("Failed to dump metacard {}", metacard.getId(), e);
                            executorService.shutdownNow();
                        }
                        printStatus(resultCount.incrementAndGet());
                    }
                    if (transformationFailed) {
                        LOGGER.error(
                                "One or more metacards failed to transform. Enable debug log for more details.");
                    }
                }
            });
        } else {
            for (final Result result : response.getResults()) {
                Metacard metacard = result.getMetacard();
                exportMetacard(dumpDir, metacard);
                printStatus(resultCount.incrementAndGet());
            }
        }

        if (response.getResults().size() < pageSize || pageSize == -1) {
            break;
        }

        if (pageSize > 0) {
            query.setStartIndex(query.getStartIndex() + pageSize);
        }
    }

    executorService.shutdown();

    while (!executorService.isTerminated()) {
        try {
            TimeUnit.MILLISECONDS.sleep(100);
        } catch (InterruptedException e) {
            // ignore
        }
    }

    long end = System.currentTimeMillis();
    String elapsedTime = timeFormatter.print(new Period(start, end).withMillis(0));
    console.printf(" %d file(s) dumped in %s\t%n", resultCount.get(), elapsedTime);
    LOGGER.info("{} file(s) dumped in {}", resultCount.get(), elapsedTime);
    console.println();

    return null;
}

From source file:org.apache.bookkeeper.common.util.OrderedExecutor.java

protected ThreadPoolExecutor createSingleThreadExecutor(ThreadFactory factory) {
    BlockingQueue<Runnable> queue;
    if (enableBusyWait) {
        // Use queue with busy-wait polling strategy
        queue = new BlockingMpscQueue<>(maxTasksInQueue > 0 ? maxTasksInQueue : DEFAULT_MAX_ARRAY_QUEUE_SIZE);
    } else {/*from   w ww.  j  av a 2s.c o  m*/
        // By default, use regular JDK LinkedBlockingQueue
        queue = new LinkedBlockingQueue<>();
    }
    return new ThreadPoolExecutor(1, 1, 0L, TimeUnit.MILLISECONDS, queue, factory);
}

From source file:org.apache.hadoop.hbase.client.HTable.java

public static ThreadPoolExecutor getDefaultExecutor(Configuration conf) {
    int maxThreads = conf.getInt("hbase.htable.threads.max", Integer.MAX_VALUE);
    if (maxThreads == 0) {
        maxThreads = 1; // is there a better default?
    }//from  ww w . j av a 2s .co  m
    long keepAliveTime = conf.getLong("hbase.htable.threads.keepalivetime", 60);

    // Using the "direct handoff" approach, new threads will only be created
    // if it is necessary and will grow unbounded. This could be bad but in HCM
    // we only create as many Runnables as there are region servers. It means
    // it also scales when new region servers are added.
    ThreadPoolExecutor pool = new ThreadPoolExecutor(1, maxThreads, keepAliveTime, TimeUnit.SECONDS,
            new SynchronousQueue<Runnable>(), Threads.newDaemonThreadFactory("htable"));
    ((ThreadPoolExecutor) pool).allowCoreThreadTimeOut(true);
    return pool;
}

From source file:org.apache.tez.dag.app.launcher.ContainerLauncherImpl.java

@Override
public void serviceStart() {
    cmProxy = new ContainerManagementProtocolProxy(getConfig());

    ThreadFactory tf = new ThreadFactoryBuilder().setNameFormat("ContainerLauncher #%d").setDaemon(true)
            .build();//from   w w  w  .j  a v a 2 s.  c  o  m

    // Start with a default core-pool size of 10 and change it dynamically.
    launcherPool = new ThreadPoolExecutor(INITIAL_POOL_SIZE, Integer.MAX_VALUE, 1, TimeUnit.HOURS,
            new LinkedBlockingQueue<Runnable>(), tf);
    eventHandlingThread = new Thread() {
        @Override
        public void run() {
            NMCommunicatorEvent event = null;
            while (!Thread.currentThread().isInterrupted()) {
                try {
                    event = eventQueue.take();
                } catch (InterruptedException e) {
                    LOG.error("Returning, interrupted : " + e);
                    return;
                }
                int poolSize = launcherPool.getCorePoolSize();

                // See if we need up the pool size only if haven't reached the
                // maximum limit yet.
                if (poolSize != limitOnPoolSize) {

                    // nodes where containers will run at *this* point of time. This is
                    // *not* the cluster size and doesn't need to be.
                    int numNodes = context.getAllNodes().size();
                    int idealPoolSize = Math.min(limitOnPoolSize, numNodes);

                    if (poolSize < idealPoolSize) {
                        // Bump up the pool size to idealPoolSize+INITIAL_POOL_SIZE, the
                        // later is just a buffer so we are not always increasing the
                        // pool-size
                        int newPoolSize = Math.min(limitOnPoolSize, idealPoolSize + INITIAL_POOL_SIZE);
                        LOG.info("Setting ContainerLauncher pool size to " + newPoolSize
                                + " as number-of-nodes to talk to is " + numNodes);
                        launcherPool.setCorePoolSize(newPoolSize);
                    }
                }

                // the events from the queue are handled in parallel
                // using a thread pool
                launcherPool.execute(createEventProcessor(event));

                // TODO: Group launching of multiple containers to a single
                // NodeManager into a single connection
            }
        }
    };
    eventHandlingThread.setName("ContainerLauncher Event Handler");
    eventHandlingThread.start();
}

From source file:com.spotify.heroic.HeroicCore.java

/**
 * Setup a fixed thread pool executor that correctly handles unhandled exceptions.
 *
 * @param threads Number of threads to configure.
 */// w ww . j  a v  a 2  s .co m
private ExecutorService setupExecutor(final int threads) {
    return new ThreadPoolExecutor(threads, threads, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<>(),
            new ThreadFactoryBuilder().setNameFormat("heroic-core-%d")
                    .setUncaughtExceptionHandler(uncaughtExceptionHandler).build()) {
        @Override
        protected void afterExecute(Runnable r, Throwable t) {
            super.afterExecute(r, t);

            if (t == null && (r instanceof Future<?>)) {
                try {
                    ((Future<?>) r).get();
                } catch (CancellationException e) {
                    t = e;
                } catch (ExecutionException e) {
                    t = e.getCause();
                } catch (InterruptedException e) {
                    Thread.currentThread().interrupt();
                }
            }

            if (t != null) {
                if (log.isErrorEnabled()) {
                    log.error("Unhandled exception caught in core executor", t);
                    log.error("Exiting (code=2)");
                } else {
                    System.err.println("Unhandled exception caught in core executor");
                    System.err.println("Exiting (code=2)");
                    t.printStackTrace(System.err);
                }

                System.exit(2);
            }
        }
    };
}

From source file:com.alibaba.wasp.fserver.FServer.java

/**
 * Starts a FServer at the default location
 * /*  www .  jav a2s  .  com*/
 * @param conf
 * @throws java.io.IOException
 * @throws InterruptedException
 */
public FServer(Configuration conf) throws IOException, InterruptedException {
    this.conf = conf;
    this.isOnline = false;
    // Set how many times to retry talking to another server over FConnection.
    FConnectionManager.setServerSideFConnectionRetries(this.conf, LOG);

    // Config'ed params
    this.msgInterval = conf.getInt("wasp.fserver.msginterval", 3 * 1000);

    this.sleeper = new Sleeper(this.msgInterval, this);

    this.numEntityGroupsToReport = conf.getInt("wasp.fserver.numentitygroupstoreport", 10);

    this.rpcTimeout = conf.getInt(FConstants.WASP_RPC_TIMEOUT_KEY, FConstants.DEFAULT_WASP_RPC_TIMEOUT);

    this.abortRequested = false;
    this.stopped = false;
    this.actionManager = new StorageActionManager(conf);

    // Server to handle client requests.
    String hostname = Strings
            .domainNamePointerToHostName(DNS.getDefaultHost(conf.get("wasp.fserver.dns.interface", "default"),
                    conf.get("wasp.fserver.dns.nameserver", "default")));
    int port = conf.getInt(FConstants.FSERVER_PORT, FConstants.DEFAULT_FSERVER_PORT);
    // Creation of a HSA will force a resolve.
    InetSocketAddress initialIsa = new InetSocketAddress(hostname, port);
    if (initialIsa.getAddress() == null) {
        throw new IllegalArgumentException("Failed resolve of " + initialIsa);
    }

    this.rpcServer = WaspRPC.getServer(FServer.class, this,
            new Class<?>[] { ClientProtocol.class, AdminProtocol.class, WaspRPCErrorHandler.class,
                    OnlineEntityGroups.class },
            initialIsa.getHostName(), // BindAddress is
            // IP we got for
            // this server.
            initialIsa.getPort(), conf);
    // Set our address.
    this.isa = this.rpcServer.getListenerAddress();

    this.leases = new Leases(conf.getInt(FConstants.THREAD_WAKE_FREQUENCY, 10 * 1000));

    this.startcode = System.currentTimeMillis();

    int maxThreads = conf.getInt("wasp.transaction.threads.max", 150);

    this.pool = new ThreadPoolExecutor(1, maxThreads, 60, TimeUnit.SECONDS, new SynchronousQueue<Runnable>(),
            new DaemonThreadFactory("thread factory"));
    ((ThreadPoolExecutor) this.pool).allowCoreThreadTimeOut(true);

    this.scannerLeaseTimeoutPeriod = conf.getInt(FConstants.WASP_CLIENT_SCANNER_TIMEOUT_PERIOD,
            FConstants.DEFAULT_WASP_CLIENT_SCANNER_TIMEOUT_PERIOD);

    this.driver = new BaseDriver(this);
    this.splitThread = new SplitThread(this);
    this.globalEntityGroup = new GlobalEntityGroup(this);
}

From source file:org.alfresco.repo.batch.BatchProcessor.java

/**
 * Invokes the worker for each entry in the collection, managing transactions and collating success / failure
 * information.//from  www.  j a  va  2  s . c o m
 * 
 * @param worker
 *            the worker
 * @param splitTxns
 *            Can the modifications to Alfresco be split across multiple transactions for maximum performance? If
 *            <code>true</code>, worker invocations are isolated in separate transactions in batches for
 *            increased performance. If <code>false</code>, all invocations are performed in the current
 *            transaction. This is required if calling synchronously (e.g. in response to an authentication event in
 *            the same transaction).
 * @return the number of invocations
 */
@SuppressWarnings("serial")
public int process(final BatchProcessWorker<T> worker, final boolean splitTxns) {
    int count = workProvider.getTotalEstimatedWorkSize();
    synchronized (this) {
        this.startTime = new Date();
        if (this.logger.isInfoEnabled()) {
            if (count >= 0) {
                this.logger.info(getProcessName() + ": Commencing batch of " + count + " entries");
            } else {
                this.logger.info(getProcessName() + ": Commencing batch");

            }
        }
    }

    // Create a thread pool executor with the specified number of threads and a finite blocking queue of jobs
    ExecutorService executorService = splitTxns && this.workerThreads > 1
            ? new ThreadPoolExecutor(this.workerThreads, this.workerThreads, 0L, TimeUnit.MILLISECONDS,
                    new ArrayBlockingQueue<Runnable>(this.workerThreads * this.batchSize * 10) {
                        // Add blocking behaviour to work queue
                        @Override
                        public boolean offer(Runnable o) {
                            try {
                                put(o);
                            } catch (InterruptedException e) {
                                return false;
                            }
                            return true;
                        }

                    }, threadFactory)
            : null;
    try {
        Iterator<T> iterator = new WorkProviderIterator<T>(this.workProvider);
        int id = 0;
        List<T> batch = new ArrayList<T>(this.batchSize);
        while (iterator.hasNext()) {
            batch.add(iterator.next());
            boolean hasNext = iterator.hasNext();
            if (batch.size() >= this.batchSize || !hasNext) {
                final TxnCallback callback = new TxnCallback(id++, worker, batch, splitTxns);
                if (hasNext) {
                    batch = new ArrayList<T>(this.batchSize);
                }

                if (executorService == null) {
                    callback.run();
                } else {
                    executorService.execute(callback);
                }
            }
        }
        return count;
    } finally {
        if (executorService != null) {
            executorService.shutdown();
            try {
                executorService.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS);
            } catch (InterruptedException e) {
            }
        }
        synchronized (this) {
            reportProgress(true);
            this.endTime = new Date();
            if (this.logger.isInfoEnabled()) {
                if (count >= 0) {
                    this.logger.info(getProcessName() + ": Completed batch of " + count + " entries");
                } else {
                    this.logger.info(getProcessName() + ": Completed batch");

                }
            }
            if (this.totalErrors > 0 && this.logger.isErrorEnabled()) {
                this.logger.error(
                        getProcessName() + ": " + this.totalErrors
                                + " error(s) detected. Last error from entry \"" + this.lastErrorEntryId + "\"",
                        this.lastError);
            }
        }
    }
}