Example usage for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor

List of usage examples for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor

Introduction

In this page you can find the example usage for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor.

Prototype

public ThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit,
        BlockingQueue<Runnable> workQueue, RejectedExecutionHandler handler) 

Source Link

Document

Creates a new ThreadPoolExecutor with the given initial parameters and Executors#defaultThreadFactory default thread factory .

Usage

From source file:org.apache.hadoop.fs.s3r.S3RFileSystem.java

/** Called after a new FileSystem instance is constructed.
 * @param name a uri whose authority section names the host, port, etc.
 *   for this FileSystem/*from  ww w .j a va2s  .c o m*/
 * @param conf the configuration
 */
public void initialize(URI name, Configuration conf) throws IOException {
    super.initialize(name, conf);

    uri = URI.create(name.getScheme() + "://" + name.getAuthority());
    workingDir = new Path("/user", System.getProperty("user.name")).makeQualified(this.uri,
            this.getWorkingDirectory());

    // Try to get our credentials or just connect anonymously
    String accessKey = conf.get(ACCESS_KEY, null);
    String secretKey = conf.get(SECRET_KEY, null);

    String userInfo = name.getUserInfo();
    if (userInfo != null) {
        int index = userInfo.indexOf(':');
        if (index != -1) {
            accessKey = userInfo.substring(0, index);
            secretKey = userInfo.substring(index + 1);
        } else {
            accessKey = userInfo;
        }
    }

    AWSCredentialsProviderChain credentials = new AWSCredentialsProviderChain(
            new BasicAWSCredentialsProvider(accessKey, secretKey), new InstanceProfileCredentialsProvider(),
            new AnonymousAWSCredentialsProvider());

    bucket = name.getHost();

    ClientConfiguration awsConf = new ClientConfiguration();
    awsConf.setMaxConnections(conf.getInt(MAXIMUM_CONNECTIONS, DEFAULT_MAXIMUM_CONNECTIONS));
    boolean secureConnections = conf.getBoolean(SECURE_CONNECTIONS, DEFAULT_SECURE_CONNECTIONS);
    awsConf.setProtocol(secureConnections ? Protocol.HTTPS : Protocol.HTTP);
    awsConf.setMaxErrorRetry(conf.getInt(MAX_ERROR_RETRIES, DEFAULT_MAX_ERROR_RETRIES));
    awsConf.setConnectionTimeout(conf.getInt(ESTABLISH_TIMEOUT, DEFAULT_ESTABLISH_TIMEOUT));
    awsConf.setSocketTimeout(conf.getInt(SOCKET_TIMEOUT, DEFAULT_SOCKET_TIMEOUT));

    String proxyHost = conf.getTrimmed(PROXY_HOST, "");
    int proxyPort = conf.getInt(PROXY_PORT, -1);
    if (!proxyHost.isEmpty()) {
        awsConf.setProxyHost(proxyHost);
        if (proxyPort >= 0) {
            awsConf.setProxyPort(proxyPort);
        } else {
            if (secureConnections) {
                LOG.warn("Proxy host set without port. Using HTTPS default 443");
                awsConf.setProxyPort(443);
            } else {
                LOG.warn("Proxy host set without port. Using HTTP default 80");
                awsConf.setProxyPort(80);
            }
        }
        String proxyUsername = conf.getTrimmed(PROXY_USERNAME);
        String proxyPassword = conf.getTrimmed(PROXY_PASSWORD);
        if ((proxyUsername == null) != (proxyPassword == null)) {
            String msg = "Proxy error: " + PROXY_USERNAME + " or " + PROXY_PASSWORD + " set without the other.";
            LOG.error(msg);
            throw new IllegalArgumentException(msg);
        }
        awsConf.setProxyUsername(proxyUsername);
        awsConf.setProxyPassword(proxyPassword);
        awsConf.setProxyDomain(conf.getTrimmed(PROXY_DOMAIN));
        awsConf.setProxyWorkstation(conf.getTrimmed(PROXY_WORKSTATION));
        if (LOG.isDebugEnabled()) {
            LOG.debug(
                    "Using proxy server {}:{} as user {} with password {} on " + "domain {} as workstation {}",
                    awsConf.getProxyHost(), awsConf.getProxyPort(), String.valueOf(awsConf.getProxyUsername()),
                    awsConf.getProxyPassword(), awsConf.getProxyDomain(), awsConf.getProxyWorkstation());
        }
    } else if (proxyPort >= 0) {
        String msg = "Proxy error: " + PROXY_PORT + " set without " + PROXY_HOST;
        LOG.error(msg);
        throw new IllegalArgumentException(msg);
    }

    s3 = new AmazonS3Client(credentials, awsConf);
    String endPoint = conf.getTrimmed(ENDPOINT, "");
    if (!endPoint.isEmpty()) {
        try {
            s3.setEndpoint(endPoint);
        } catch (IllegalArgumentException e) {
            String msg = "Incorrect endpoint: " + e.getMessage();
            LOG.error(msg);
            throw new IllegalArgumentException(msg, e);
        }
    }

    maxKeys = conf.getInt(MAX_PAGING_KEYS, DEFAULT_MAX_PAGING_KEYS);
    partSize = conf.getLong(MULTIPART_SIZE, DEFAULT_MULTIPART_SIZE);
    multiPartThreshold = conf.getInt(MIN_MULTIPART_THRESHOLD, DEFAULT_MIN_MULTIPART_THRESHOLD);

    if (partSize < 5 * 1024 * 1024) {
        LOG.error(MULTIPART_SIZE + " must be at least 5 MB");
        partSize = 5 * 1024 * 1024;
    }

    if (multiPartThreshold < 5 * 1024 * 1024) {
        LOG.error(MIN_MULTIPART_THRESHOLD + " must be at least 5 MB");
        multiPartThreshold = 5 * 1024 * 1024;
    }

    int maxThreads = conf.getInt(MAX_THREADS, DEFAULT_MAX_THREADS);
    int coreThreads = conf.getInt(CORE_THREADS, DEFAULT_CORE_THREADS);
    if (maxThreads == 0) {
        maxThreads = Runtime.getRuntime().availableProcessors() * 8;
    }
    if (coreThreads == 0) {
        coreThreads = Runtime.getRuntime().availableProcessors() * 8;
    }
    long keepAliveTime = conf.getLong(KEEPALIVE_TIME, DEFAULT_KEEPALIVE_TIME);
    LinkedBlockingQueue<Runnable> workQueue = new LinkedBlockingQueue<>(
            maxThreads * conf.getInt(MAX_TOTAL_TASKS, DEFAULT_MAX_TOTAL_TASKS));
    threadPoolExecutor = new ThreadPoolExecutor(coreThreads, maxThreads, keepAliveTime, TimeUnit.SECONDS,
            workQueue, newDaemonThreadFactory("s3a-transfer-shared-"));
    threadPoolExecutor.allowCoreThreadTimeOut(true);

    TransferManagerConfiguration transferConfiguration = new TransferManagerConfiguration();
    transferConfiguration.setMinimumUploadPartSize(partSize);
    transferConfiguration.setMultipartUploadThreshold(multiPartThreshold);

    transfers = new TransferManager(s3, threadPoolExecutor);
    transfers.setConfiguration(transferConfiguration);

    String cannedACLName = conf.get(CANNED_ACL, DEFAULT_CANNED_ACL);
    if (!cannedACLName.isEmpty()) {
        cannedACL = CannedAccessControlList.valueOf(cannedACLName);
    } else {
        cannedACL = null;
    }

    if (!s3.doesBucketExist(bucket)) {
        throw new IOException("Bucket " + bucket + " does not exist");
    }

    boolean purgeExistingMultipart = conf.getBoolean(PURGE_EXISTING_MULTIPART,
            DEFAULT_PURGE_EXISTING_MULTIPART);
    long purgeExistingMultipartAge = conf.getLong(PURGE_EXISTING_MULTIPART_AGE,
            DEFAULT_PURGE_EXISTING_MULTIPART_AGE);

    if (purgeExistingMultipart) {
        Date purgeBefore = new Date(new Date().getTime() - purgeExistingMultipartAge * 1000);

        transfers.abortMultipartUploads(bucket, purgeBefore);
    }

    serverSideEncryptionAlgorithm = conf.get(SERVER_SIDE_ENCRYPTION_ALGORITHM);

    setConf(conf);
}

From source file:com.jkoolcloud.tnt4j.streams.inputs.TNTInputStream.java

/**
 * Creates thread pool executor service for a given number of threads with bounded tasks queue - queue size is
 * 2x{@code threadsQty}. When queue size is reached, new tasks are offered to queue using defined offer timeout. If
 * task can't be put into queue over this time, task is skipped with making warning log entry. Thus memory use does
 * not grow drastically if consumers can't keep up the pace of producers filling in the queue, making producers
 * synchronize with consumers.//ww  w .  j av  a 2 s  . c om
 *
 * @param threadsQty
 *            the number of threads in the pool
 * @param offerTimeout
 *            how long to wait before giving up on offering task to queue
 *
 * @return the newly created thread pool executor
 *
 * @see ThreadPoolExecutor#ThreadPoolExecutor(int, int, long, TimeUnit, BlockingQueue, ThreadFactory)
 */
private ExecutorService getBoundedExecutorService(int threadsQty, final int offerTimeout) {
    StreamsThreadFactory stf = new StreamsThreadFactory("StreamBoundedExecutorThread-"); // NON-NLS
    stf.addThreadFactoryListener(new StreamsThreadFactoryListener());

    ThreadPoolExecutor tpe = new ThreadPoolExecutor(threadsQty, threadsQty, 0L, TimeUnit.MILLISECONDS,
            new LinkedBlockingQueue<Runnable>(threadsQty * 2), stf);

    tpe.setRejectedExecutionHandler(new RejectedExecutionHandler() {
        @Override
        public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
            try {
                boolean added = executor.getQueue().offer(r, offerTimeout, TimeUnit.SECONDS);
                if (!added) {
                    logger().log(OpLevel.WARNING,
                            StreamsResources.getString(StreamsResources.RESOURCE_BUNDLE_NAME,
                                    "TNTInputStream.tasks.buffer.limit"),
                            offerTimeout);
                    notifyStreamTaskRejected(r);
                }
            } catch (InterruptedException exc) {
                halt(true);
            }
        }
    });

    return tpe;
}

From source file:com.linkedin.pinot.integration.tests.HybridClusterScanComparisonIntegrationTest.java

protected void runTestLoop(Callable<Object> testMethod, boolean useMultipleThreads) throws Exception {
    // Clean up the Kafka topic
    // TODO jfim: Re-enable this once PINOT-2598 is fixed
    // purgeKafkaTopicAndResetRealtimeTable();

    List<Pair<File, File>> enabledRealtimeSegments = new ArrayList<>();

    // Sort the realtime segments based on their segment name so they get added from earliest to latest
    TreeMap<File, File> sortedRealtimeSegments = new TreeMap<File, File>(new Comparator<File>() {
        @Override/*  www  .  j a  v  a2  s  . c om*/
        public int compare(File o1, File o2) {
            return _realtimeAvroToSegmentMap.get(o1).getName()
                    .compareTo(_realtimeAvroToSegmentMap.get(o2).getName());
        }
    });
    sortedRealtimeSegments.putAll(_realtimeAvroToSegmentMap);

    for (File avroFile : sortedRealtimeSegments.keySet()) {
        enabledRealtimeSegments.add(Pair.of(avroFile, sortedRealtimeSegments.get(avroFile)));

        if (useMultipleThreads) {
            _queryExecutor = new ThreadPoolExecutor(4, 4, 5, TimeUnit.SECONDS,
                    new ArrayBlockingQueue<Runnable>(50), new ThreadPoolExecutor.CallerRunsPolicy());
        }

        // Push avro for the new segment
        LOGGER.info("Pushing Avro file {} into Kafka", avroFile);
        pushAvroIntoKafka(Collections.singletonList(avroFile), KafkaStarterUtils.DEFAULT_KAFKA_BROKER,
                KAFKA_TOPIC);

        // Configure the scan based comparator to use the distinct union of the offline and realtime segments
        configureScanBasedComparator(enabledRealtimeSegments);

        QueryResponse queryResponse = _scanBasedQueryProcessor.processQuery("select count(*) from mytable");

        int expectedRecordCount = queryResponse.getNumDocsScanned();
        waitForRecordCountToStabilizeToExpectedCount(expectedRecordCount,
                System.currentTimeMillis() + getStabilizationTimeMs());

        // Run the actual tests
        LOGGER.info("Running queries");
        testMethod.call();

        if (useMultipleThreads) {
            if (_nQueriesRead == -1) {
                _queryExecutor.shutdown();
                _queryExecutor.awaitTermination(5, TimeUnit.MINUTES);
            } else {
                int totalQueries = _failedQueries.get() + _successfulQueries.get();
                while (totalQueries < _nQueriesRead) {
                    LOGGER.info("Completed " + totalQueries + " out of " + _nQueriesRead + " - waiting");
                    Uninterruptibles.sleepUninterruptibly(20, TimeUnit.SECONDS);
                    totalQueries = _failedQueries.get() + _successfulQueries.get();
                }
                if (totalQueries > _nQueriesRead) {
                    throw new RuntimeException("Executed " + totalQueries + " more than " + _nQueriesRead);
                }
                _queryExecutor.shutdown();
            }
        }
        int totalQueries = _failedQueries.get() + _successfulQueries.get();
        doDisplayStatus(totalQueries);

        // Release resources
        _scanBasedQueryProcessor.close();
        _compareStatusFileWriter.write("Status after push of " + avroFile + ":" + System.currentTimeMillis()
                + ":Executed " + _nQueriesRead + " queries, " + _failedQueries + " failures,"
                + _emptyResults.get() + " empty results\n");
    }
}

From source file:org.apache.hadoop.hbase.thrift.ThriftServerRunner.java

ExecutorService createExecutor(BlockingQueue<Runnable> callQueue, int workerThreads) {
    ThreadFactoryBuilder tfb = new ThreadFactoryBuilder();
    tfb.setDaemon(true);//from   ww  w.j av a 2 s . c  o  m
    tfb.setNameFormat("thrift-worker-%d");
    return new ThreadPoolExecutor(workerThreads, workerThreads, Long.MAX_VALUE, TimeUnit.SECONDS, callQueue,
            tfb.build());
}

From source file:org.schedulesdirect.grabber.Grabber.java

private ThreadPoolExecutor createThreadPoolExecutor() {
    return new ThreadPoolExecutor(0, globalOpts.getMaxThreads(), 10, TimeUnit.SECONDS,
            new SynchronousQueue<Runnable>(), new ThreadPoolExecutor.CallerRunsPolicy()) {
        @Override/*www  .  j  a v  a2 s.co  m*/
        protected void afterExecute(Runnable r, Throwable t) {
            //super.afterExecute(r, t);
            if (t != null) {
                Logger log = Logger.getLogger(r.getClass());
                log.error("Task failed!", t);
                if (!(r instanceof LogoTask))
                    failedTask = true;
            }
        }
    };
}

From source file:org.apache.drill.exec.client.DrillClient.java

public synchronized void connect(String connect, Properties props) throws RpcException {
    if (connected) {
        return;//from  ww  w  .  j  a  va2  s.c o  m
    }

    final List<DrillbitEndpoint> endpoints = new ArrayList<>();
    if (isDirectConnection) {
        // Populate the endpoints list with all the drillbit information provided in the connection string
        endpoints.addAll(parseAndVerifyEndpoints(props.getProperty("drillbit"),
                config.getString(ExecConstants.INITIAL_USER_PORT)));
    } else {
        if (ownsZkConnection) {
            try {
                this.clusterCoordinator = new ZKClusterCoordinator(this.config, connect);
                this.clusterCoordinator.start(10000);
            } catch (Exception e) {
                throw new RpcException("Failure setting up ZK for client.", e);
            }
        }
        endpoints.addAll(clusterCoordinator.getAvailableEndpoints());
        // Make sure we have at least one endpoint in the list
        checkState(!endpoints.isEmpty(), "No active Drillbit endpoint found from ZooKeeper");
    }

    // shuffle the collection then get the first endpoint
    Collections.shuffle(endpoints);
    final DrillbitEndpoint endpoint = endpoints.get(0);

    if (props != null) {
        final UserProperties.Builder upBuilder = UserProperties.newBuilder();
        for (final String key : props.stringPropertyNames()) {
            upBuilder.addProperties(Property.newBuilder().setKey(key).setValue(props.getProperty(key)));
        }

        this.props = upBuilder.build();
    }

    eventLoopGroup = createEventLoop(config.getInt(ExecConstants.CLIENT_RPC_THREADS), "Client-");
    executor = new ThreadPoolExecutor(0, Integer.MAX_VALUE, 60L, TimeUnit.SECONDS,
            new SynchronousQueue<Runnable>(), new NamedThreadFactory("drill-client-executor-")) {
        @Override
        protected void afterExecute(final Runnable r, final Throwable t) {
            if (t != null) {
                logger.error("{}.run() leaked an exception.", r.getClass().getName(), t);
            }
            super.afterExecute(r, t);
        }
    };
    client = new UserClient(clientName, config, supportComplexTypes, allocator, eventLoopGroup, executor);
    logger.debug("Connecting to server {}:{}", endpoint.getAddress(), endpoint.getUserPort());
    connect(endpoint);
    connected = true;
}

From source file:org.apache.hadoop.hbase.client.ConnectionImplementation.java

private ExecutorService getThreadPool(int maxThreads, int coreThreads, String nameHint,
        BlockingQueue<Runnable> passedWorkQueue) {
    // shared HTable thread executor not yet initialized
    if (maxThreads == 0) {
        maxThreads = Runtime.getRuntime().availableProcessors() * 8;
    }/*from  w w w  .  j  ava2s  .  c om*/
    if (coreThreads == 0) {
        coreThreads = Runtime.getRuntime().availableProcessors() * 8;
    }
    long keepAliveTime = conf.getLong("hbase.hconnection.threads.keepalivetime", 60);
    BlockingQueue<Runnable> workQueue = passedWorkQueue;
    if (workQueue == null) {
        workQueue = new LinkedBlockingQueue<Runnable>(maxThreads * conf.getInt(
                HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS, HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS));
    }
    ThreadPoolExecutor tpe = new ThreadPoolExecutor(coreThreads, maxThreads, keepAliveTime, TimeUnit.SECONDS,
            workQueue, Threads.newDaemonThreadFactory(toString() + nameHint));
    tpe.allowCoreThreadTimeOut(true);
    return tpe;
}

From source file:com.bosscs.spark.commons.utils.Utils.java

/**
 * Returns an instance of ThreadPoolExecutor using an bounded queue and blocking when the worker queue is full.
 * @param nThreads thread pool size//  w  w w  .j ava2s  . c om
 * @param queueSize workers queue size
 * @return thread pool executor
 */
public static ExecutorService newBlockingFixedThreadPoolExecutor(int nThreads, int queueSize) {
    BlockingQueue<Runnable> blockingQueue = new ArrayBlockingQueue<>(queueSize);
    RejectedExecutionHandler blockingRejectedExecutionHandler = new RejectedExecutionHandler() {
        @Override
        public void rejectedExecution(Runnable task, ThreadPoolExecutor executor) {
            try {
                executor.getQueue().put(task);
            } catch (InterruptedException e) {
            }
        }

    };

    return new ThreadPoolExecutor(nThreads, nThreads, 0L, TimeUnit.MILLISECONDS, blockingQueue,
            blockingRejectedExecutionHandler);
}

From source file:org.gtdfree.ApplicationHelper.java

public static synchronized void executeInBackground(Runnable r) {

    if (backgroundExecutor == null) {
        backgroundExecutor = new ThreadPoolExecutor(0, 1, 1, TimeUnit.SECONDS,
                new LinkedBlockingQueue<Runnable>(), new ThreadFactory() {

                    @Override//from  ww w.  j a  va  2s.com
                    public Thread newThread(Runnable r) {
                        Thread t = new Thread(r);
                        t.setName("BackgroundExecutor"); //$NON-NLS-1$
                        t.setPriority(Thread.MIN_PRIORITY);
                        t.setDaemon(false);
                        return t;
                    }
                });
    }

    backgroundExecutor.execute(r);

}

From source file:com.sentaroh.android.TaskAutomation.TaskManager.java

static final public void buildTaskExecThreadPool(final EnvironmentParms envParms,
        final TaskManagerParms taskMgrParms, final CommonUtilities util) {
    if (taskMgrParms.taskExecutorThreadPool != null)
        removeTaskExecThreadPool(envParms, taskMgrParms, util);
    SynchronousQueue<Runnable> slq = new SynchronousQueue<Runnable>();
    RejectedExecutionHandler rh = new RejectedExecutionHandler() {
        @Override/*from  w  ww . ja  v a 2s  .c  om*/
        public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
            util.addDebugMsg(1, "W", "Task executor reject handler entered.");
            startTaskOutsideThreadPool(taskMgrParms, envParms, util, (TaskExecutor) r);
        }
    };
    taskMgrParms.taskExecutorThreadPool = new ThreadPoolExecutor(envParms.settingTaskExecThreadPoolCount + 2,
            envParms.settingTaskExecThreadPoolCount + 2, 10, TimeUnit.SECONDS, slq, rh);
    for (int i = 0; i < envParms.settingTaskExecThreadPoolCount + 2; i++) {
        final int num = i + 1;
        Runnable rt = new Runnable() {
            @Override
            public void run() {
                Thread.currentThread().setPriority(THREAD_PRIORITY_TASK_EXEC);
                Thread.currentThread().setName("TaskExec-" + num);
            }
        };
        taskMgrParms.taskExecutorThreadPool.execute(rt);
    }
    taskMgrParms.taskExecutorThreadPool.prestartAllCoreThreads();
    util.addDebugMsg(1, "I", "Task executor thread pool was created.");
}