Example usage for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor

List of usage examples for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor

Introduction

In this page you can find the example usage for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor.

Prototype

public ThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit,
        BlockingQueue<Runnable> workQueue, RejectedExecutionHandler handler) 

Source Link

Document

Creates a new ThreadPoolExecutor with the given initial parameters and Executors#defaultThreadFactory default thread factory .

Usage

From source file:com.datasalt.pangool.solr.BatchWriter.java

public BatchWriter(EmbeddedSolrServer solr, int batchSize, TaskID tid, int writerThreads, int queueSize) {
    this.solr = solr;
    this.writerThreads = writerThreads;
    this.queueSize = queueSize;
    taskId = tid;/*from  www  .ja v  a2s  .co  m*/

    // we need to obtain the settings before the constructor
    batchPool = new ThreadPoolExecutor(writerThreads, writerThreads, 5, TimeUnit.SECONDS,
            new LinkedBlockingQueue<Runnable>(queueSize), new ThreadPoolExecutor.CallerRunsPolicy());
    this.batchToWrite = new ArrayList<SolrInputDocument>(batchSize);
}

From source file:gov.nrel.bacnet.consumer.BACnet.java

private void initialize(Config config) throws IOException {
    LinkedBlockingQueue<Runnable> queue = new LinkedBlockingQueue<Runnable>(1000);
    RejectedExecutionHandler rejectedExec = new RejectedExecHandler();
    // schedule polling on single threaded service because local device instance is not threadsafe
    execSvc = Executors.newFixedThreadPool(config.getNumThreads());
    //give databus recording 2 threads to match old code
    recorderSvc = new ThreadPoolExecutor(20, 20, 120, TimeUnit.SECONDS, queue, rejectedExec);
    schedSvc = (ScheduledThreadPoolExecutor) Executors.newScheduledThreadPool(config.getNumThreads());
    exec = new OurExecutor(schedSvc, execSvc, recorderSvc);
    String devname = config.getNetworkDevice();
    int device_id = config.getDeviceId();
    NetworkInterface networkinterface = null;

    try {/*from  w  ww  . j av  a  2  s . c o  m*/
        networkinterface = java.net.NetworkInterface.getByName(devname);
    } catch (Exception ex) {
        System.out.println("Unable to open device: " + devname);
        System.exit(-1);
    }

    if (networkinterface == null) {
        System.out.println("Unable to open device: " + devname);
        System.exit(-1);
    }

    List<InterfaceAddress> addresses = networkinterface.getInterfaceAddresses();

    String sbroadcast = null;
    String saddress = null;
    //InterfaceAddress ifaceaddr = null;

    for (InterfaceAddress address : addresses) {
        logger.fine("Evaluating address: " + address.toString());
        if (address.getAddress().getAddress().length == 4) {
            logger.info("Address is ipv4, selecting: " + address.toString());
            sbroadcast = address.getBroadcast().toString().substring(1);
            saddress = address.getAddress().toString().substring(1);
            //ifaceaddr = address;
            break;
        } else {
            logger.info("Address is not ipv4, not selecting: " + address.toString());
        }
    }

    logger.info("Binding to: " + saddress + " " + sbroadcast);

    localDevice = new LocalDevice(device_id, sbroadcast);
    localDevice.setPort(LocalDevice.DEFAULT_PORT);
    localDevice.setTimeout(localDevice.getTimeout() * 3);
    localDevice.setSegTimeout(localDevice.getSegTimeout() * 3);
    try {
        localDevice.initialize();
        localDevice.setRetries(0); //don't retry as it seems to really be a waste.
    } catch (IOException e) {
        e.printStackTrace();
        return;
    }

    if (config.getSlaveDeviceEnabled()) {
        slaveDeviceTimer = new Timer();
        slaveDeviceTimer.schedule(new gov.nrel.bacnet.SlaveDevice(localDevice, config), 1000,
                config.getSlaveDeviceUpdateInterval() * 1000);
    }

    int counter = 0;

    String username = config.getDatabusUserName();
    String key = config.getDatabusKey();

    logger.info("user=" + username + " key=" + key);

    DatabusSender sender = null;

    if (config.getDatabusEnabled()) {
        sender = new DatabusSender(username, key, execSvc, config.getDatabusUrl(), config.getDatabusPort(),
                true);
    }
    logger.info("databus sender: " + sender);
    writer = new DatabusDataWriter(new DataPointWriter(sender));
    logger.info("databus writer" + writer);
}

From source file:org.deeplearning4j.models.word2vec.Word2Vec.java

/**
 * Train the model//from  ww  w .ja v  a2  s  .c o  m
 */
public void fit() throws IOException {
    boolean loaded = buildVocab();
    //save vocab after building
    if (!loaded && saveVocab)
        vocab().saveVocab();
    if (stopWords == null)
        readStopWords();

    log.info("Training word2vec multithreaded");

    if (sentenceIter != null)
        sentenceIter.reset();
    if (docIter != null)
        docIter.reset();

    int[] docs = vectorizer.index().allDocs();

    if (docs.length < 1) {
        vectorizer.fit();
    }

    docs = vectorizer.index().allDocs();
    if (docs.length < 1) {
        throw new IllegalStateException("No documents found");
    }

    totalWords = vectorizer.numWordsEncountered();
    if (totalWords < 1)
        throw new IllegalStateException("Unable to train, total words less than 1");

    totalWords *= numIterations;

    log.info("Processing sentences...");

    AtomicLong numWordsSoFar = new AtomicLong(0);
    final AtomicLong nextRandom = new AtomicLong(5);
    ExecutorService exec = new ThreadPoolExecutor(Runtime.getRuntime().availableProcessors(),
            Runtime.getRuntime().availableProcessors(), 0L, TimeUnit.MILLISECONDS,
            new LinkedBlockingQueue<Runnable>(), new RejectedExecutionHandler() {
                @Override
                public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
                    try {
                        Thread.sleep(1000);
                    } catch (InterruptedException e) {
                        Thread.currentThread().interrupt();
                    }
                    executor.submit(r);
                }
            });

    final Queue<List<VocabWord>> batch2 = new ConcurrentLinkedDeque<>();
    vectorizer.index().eachDoc(new Function<List<VocabWord>, Void>() {
        @Override
        public Void apply(List<VocabWord> input) {
            List<VocabWord> batch = new ArrayList<>();
            addWords(input, nextRandom, batch);
            if (!batch.isEmpty()) {
                batch2.add(batch);
            }

            return null;
        }
    }, exec);

    exec.shutdown();
    try {
        exec.awaitTermination(1, TimeUnit.DAYS);
    } catch (InterruptedException e) {
        e.printStackTrace();
    }

    ActorSystem actorSystem = ActorSystem.create();

    for (int i = 0; i < numIterations; i++)
        doIteration(batch2, numWordsSoFar, nextRandom, actorSystem);
    actorSystem.shutdown();

}

From source file:org.wso2.carbon.device.mgt.input.adapter.http.HTTPEventAdapter.java

@Override
public void init(InputEventAdapterListener eventAdaptorListener) throws InputEventAdapterException {
    this.eventAdaptorListener = eventAdaptorListener;

    //ThreadPoolExecutor will be assigned  if it is null
    if (executorService == null) {
        int minThread;
        int maxThread;
        long defaultKeepAliveTime;
        int jobQueueSize;

        //If global properties are available those will be assigned else constant values will be assigned
        if (globalProperties.get(HTTPEventAdapterConstants.ADAPTER_MIN_THREAD_POOL_SIZE_NAME) != null) {
            minThread = Integer.parseInt(
                    globalProperties.get(HTTPEventAdapterConstants.ADAPTER_MIN_THREAD_POOL_SIZE_NAME));
        } else {//from ww w.  jav  a 2 s  . com
            minThread = HTTPEventAdapterConstants.ADAPTER_MIN_THREAD_POOL_SIZE;
        }

        if (globalProperties.get(HTTPEventAdapterConstants.ADAPTER_MAX_THREAD_POOL_SIZE_NAME) != null) {
            maxThread = Integer.parseInt(
                    globalProperties.get(HTTPEventAdapterConstants.ADAPTER_MAX_THREAD_POOL_SIZE_NAME));
        } else {
            maxThread = HTTPEventAdapterConstants.ADAPTER_MAX_THREAD_POOL_SIZE;
        }

        if (globalProperties.get(HTTPEventAdapterConstants.ADAPTER_KEEP_ALIVE_TIME_NAME) != null) {
            defaultKeepAliveTime = Integer
                    .parseInt(globalProperties.get(HTTPEventAdapterConstants.ADAPTER_KEEP_ALIVE_TIME_NAME));
        } else {
            defaultKeepAliveTime = HTTPEventAdapterConstants.DEFAULT_KEEP_ALIVE_TIME_IN_MILLS;
        }

        if (globalProperties.get(HTTPEventAdapterConstants.ADAPTER_EXECUTOR_JOB_QUEUE_SIZE_NAME) != null) {
            jobQueueSize = Integer.parseInt(
                    globalProperties.get(HTTPEventAdapterConstants.ADAPTER_EXECUTOR_JOB_QUEUE_SIZE_NAME));
        } else {
            jobQueueSize = HTTPEventAdapterConstants.ADAPTER_EXECUTOR_JOB_QUEUE_SIZE;
        }

        RejectedExecutionHandler rejectedExecutionHandler = new RejectedExecutionHandler() {
            @Override
            public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
                try {
                    executor.getQueue().put(r);
                } catch (InterruptedException e) {
                    log.error("Exception while adding event to executor queue : " + e.getMessage(), e);
                }
            }

        };

        executorService = new ThreadPoolExecutor(minThread, maxThread, defaultKeepAliveTime,
                TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>(jobQueueSize),
                rejectedExecutionHandler);

    }
}

From source file:org.apache.hadoop.hbase.replication.regionserver.HFileReplicator.java

public HFileReplicator(Configuration sourceClusterConf, String sourceBaseNamespaceDirPath,
        String sourceHFileArchiveDirPath, Map<String, List<Pair<byte[], List<String>>>> tableQueueMap,
        Configuration conf, Connection connection) throws IOException {
    this.sourceClusterConf = sourceClusterConf;
    this.sourceBaseNamespaceDirPath = sourceBaseNamespaceDirPath;
    this.sourceHFileArchiveDirPath = sourceHFileArchiveDirPath;
    this.bulkLoadHFileMap = tableQueueMap;
    this.conf = conf;
    this.connection = connection;

    userProvider = UserProvider.instantiate(conf);
    fsDelegationToken = new FsDelegationToken(userProvider, "renewer");
    this.hbaseStagingDir = conf.get("hbase.bulkload.staging.dir");
    this.maxCopyThreads = this.conf.getInt(REPLICATION_BULKLOAD_COPY_MAXTHREADS_KEY,
            REPLICATION_BULKLOAD_COPY_MAXTHREADS_DEFAULT);
    ThreadFactoryBuilder builder = new ThreadFactoryBuilder();
    builder.setNameFormat("HFileReplicationCallable-%1$d");
    this.exec = new ThreadPoolExecutor(maxCopyThreads, maxCopyThreads, 60, TimeUnit.SECONDS,
            new LinkedBlockingQueue<Runnable>(), builder.build());
    this.exec.allowCoreThreadTimeOut(true);
    this.copiesPerThread = conf.getInt(REPLICATION_BULKLOAD_COPY_HFILES_PERTHREAD_KEY,
            REPLICATION_BULKLOAD_COPY_HFILES_PERTHREAD_DEFAULT);

    sinkFs = FileSystem.get(conf);
}

From source file:org.apache.hadoop.hbase.util.TestHBaseFsckOneRS.java

@BeforeClass
public static void setUpBeforeClass() throws Exception {
    TEST_UTIL.getConfiguration().set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
            MasterSyncObserver.class.getName());

    conf.setInt("hbase.regionserver.handler.count", 2);
    conf.setInt("hbase.regionserver.metahandler.count", 30);

    conf.setInt("hbase.htable.threads.max", POOL_SIZE);
    conf.setInt("hbase.hconnection.threads.max", 2 * POOL_SIZE);
    conf.setInt("hbase.hbck.close.timeout", 2 * REGION_ONLINE_TIMEOUT);
    conf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 8 * REGION_ONLINE_TIMEOUT);
    TEST_UTIL.startMiniCluster(1);// w  ww.  j av  a 2s . c  o m

    tableExecutorService = new ThreadPoolExecutor(1, POOL_SIZE, 60, TimeUnit.SECONDS,
            new SynchronousQueue<Runnable>(), Threads.newDaemonThreadFactory("testhbck"));

    hbfsckExecutorService = new ScheduledThreadPoolExecutor(POOL_SIZE);

    AssignmentManager assignmentManager = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager();
    regionStates = assignmentManager.getRegionStates();

    connection = (ClusterConnection) TEST_UTIL.getConnection();

    admin = connection.getAdmin();
    admin.setBalancerRunning(false, true);

    TEST_UTIL.waitUntilAllRegionsAssigned(TableName.META_TABLE_NAME);
    TEST_UTIL.waitUntilAllRegionsAssigned(TableName.NAMESPACE_TABLE_NAME);
}

From source file:com.jkoolcloud.tnt4j.streams.custom.dirStream.DirStreamingManager.java

private void initialize() {
    executorService = new ThreadPoolExecutor(CORE_TREAD_POOL_SIZE, MAX_TREAD_POOL_SIZE, KEEP_ALIVE_TIME,
            TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(MAX_TREAD_POOL_SIZE * 2),
            new TNTInputStream.StreamsThreadFactory("DirStreamingManagerExecutorThread-")); // NON-NLS

    executorService.setRejectedExecutionHandler(new RejectedExecutionHandler() {
        @Override/*from   w w w . j a  v  a  2 s  .com*/
        public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
            try {
                boolean added = executor.getQueue().offer(r, offerTimeout, TimeUnit.SECONDS);
                if (!added) {
                    LOGGER.log(OpLevel.WARNING,
                            StreamsResources.getString(StreamsResources.RESOURCE_BUNDLE_NAME,
                                    "TNTInputStream.tasks.buffer.limit"),
                            offerTimeout);
                    notifyStreamingJobRejected(r);
                }
            } catch (InterruptedException exc) {
                LOGGER.log(OpLevel.WARNING,
                        StreamsResources.getString(StreamsResources.RESOURCE_BUNDLE_NAME,
                                "DirStreamingManager.job.offer.interrupted"),
                        ((StreamingJob) r).getJobId(), exc);
            }
        }
    });

    dirWatchdog = new DirWatchdog(dirPath, DirWatchdog.getDefaultFilter(fileWildcardName));
    dirWatchdog.addObserverListener(new FileAlterationListenerAdaptor() {
        @Override
        public void onFileCreate(File file) {
            handleJobConfigCreate(file);
        }

        @Override
        public void onFileChange(File file) {
            handleJobConfigChange(file);
        }

        @Override
        public void onFileDelete(File file) {
            handleJobConfigRemoval(file);
        }
    });

    LOGGER.log(OpLevel.DEBUG, StreamsResources.getString(StreamsResources.RESOURCE_BUNDLE_NAME,
            "DirStreamingManager.dir.monitoring.started"), dirPath, fileWildcardName);
}

From source file:org.codice.ddf.commands.catalog.ReplicationCommand.java

@Override
protected Object executeWithSubject() throws Exception {
    final CatalogFacade catalog = getCatalog();

    final CatalogFacade framework = new Framework(getService(CatalogFramework.class));
    Set<String> sourceIds = framework.getSourceIds();

    while (true) {
        if (StringUtils.isBlank(sourceId) || !sourceIds.contains(sourceId)) {
            console.println("Please enter the Source ID you would like to replicate:");
            for (String id : sourceIds) {
                console.println("\t" + id);
            }/*from   www. j ava2 s  .c o m*/
        } else {
            break;
        }
        sourceId = getInput("ID:  ");
    }

    if (batchSize > MAX_BATCH_SIZE || batchSize < 1) {
        console.println("Batch Size must be between 1 and 1000.");
        return null;
    }

    start = System.currentTimeMillis();

    final Filter filter = (cqlFilter != null) ? CQL.toFilter(cqlFilter)
            : getFilter(getFilterStartTime(start), start, Metacard.EFFECTIVE);

    QueryImpl query = new QueryImpl(filter);
    query.setRequestsTotalResultsCount(true);
    query.setPageSize(batchSize);
    query.setSortBy(new SortByImpl(Metacard.EFFECTIVE, SortOrder.DESCENDING));
    QueryRequest queryRequest = new QueryRequestImpl(query, Arrays.asList(sourceId));
    SourceResponse response;
    try {
        response = framework.query(queryRequest);
    } catch (Exception e) {
        printErrorMessage("Error occurred while querying the Federated Source.\n" + e.getMessage());
        return null;
    }

    final long totalHits = response.getHits();
    final long totalPossible;
    if (totalHits == 0) {
        console.println("No records were found to replicate.");
        return null;
    }

    // If the maxMetacards is set, restrict the totalPossible to the number of maxMetacards
    if (maxMetacards > 0 && maxMetacards <= totalHits) {
        totalPossible = maxMetacards;
    } else {
        totalPossible = totalHits;
    }

    console.println("Starting replication for " + totalPossible + " Records");

    if (multithreaded > 1 && totalPossible > batchSize) {
        BlockingQueue<Runnable> blockingQueue = new ArrayBlockingQueue<>(multithreaded);
        RejectedExecutionHandler rejectedExecutionHandler = new ThreadPoolExecutor.CallerRunsPolicy();
        final ExecutorService executorService = new ThreadPoolExecutor(multithreaded, multithreaded, 0L,
                TimeUnit.MILLISECONDS, blockingQueue, rejectedExecutionHandler);
        console.printf("Running %d threads during replication.%n", multithreaded);

        do {
            LOGGER.debug("In loop at iteration {}", queryIndex.get());
            final int startIndex = queryIndex.get();
            executorService.submit(new Runnable() {
                @Override
                public void run() {
                    int count = queryAndIngest(framework, catalog, startIndex, filter);
                    printProgressAndFlush(start, totalPossible, ingestCount.addAndGet(count));
                }
            });
        } while (queryIndex.addAndGet(batchSize) <= totalPossible);
        executorService.shutdown();

        while (!executorService.isTerminated()) {
            try {
                TimeUnit.SECONDS.sleep(1);
            } catch (InterruptedException e) {
                // ignore
            }
        }
    } else {
        do {
            int count = queryAndIngest(framework, catalog, queryIndex.get(), filter);
            printProgressAndFlush(start, totalPossible, ingestCount.addAndGet(count));
        } while (queryIndex.addAndGet(batchSize) <= totalPossible);
    }

    console.println();
    long end = System.currentTimeMillis();
    String completed = String.format(
            " %d record(s) replicated; %d record(s) failed; completed in %3.3f seconds.", ingestCount.get(),
            failedCount.get(), (end - start) / MS_PER_SECOND);
    LOGGER.info("Replication Complete: {}", completed);
    console.println(completed);

    if (StringUtils.isNotBlank(failedDir)) {
        writeFailedMetacards(failedMetacards);
    }

    return null;
}

From source file:org.apache.http.contrib.benchmark.HttpBenchmark.java

private void execute() {

    prepare();//www .jav  a 2s. c  o  m

    ThreadPoolExecutor workerPool = new ThreadPoolExecutor(threads, threads, 5, TimeUnit.SECONDS,
            new LinkedBlockingQueue<Runnable>(), new ThreadFactory() {

                public Thread newThread(Runnable r) {
                    return new Thread(r, "ClientPool");
                }

            });
    workerPool.prestartAllCoreThreads();

    BenchmarkWorker[] workers = new BenchmarkWorker[threads];
    for (int i = 0; i < threads; i++) {
        workers[i] = new BenchmarkWorker(params, verbosity, request[i], host, requests, keepAlive);
        workerPool.execute(workers[i]);
    }

    while (workerPool.getCompletedTaskCount() < threads) {
        Thread.yield();
        try {
            Thread.sleep(1000);
        } catch (InterruptedException ignore) {
        }
    }

    workerPool.shutdown();
    ResultProcessor.printResults(workers, host, url.toString(), contentLength);
}

From source file:org.flowable.engine.impl.asyncexecutor.DefaultAsyncJobExecutor.java

protected void initAsyncJobExecutionThreadPool() {
    if (threadPoolQueue == null) {
        log.info("Creating thread pool queue of size {}", queueSize);
        threadPoolQueue = new ArrayBlockingQueue<Runnable>(queueSize);
    }//from w w  w  .  j  av  a  2s  . co m

    if (executorService == null) {
        log.info("Creating executor service with corePoolSize {}, maxPoolSize {} and keepAliveTime {}",
                corePoolSize, maxPoolSize, keepAliveTime);

        BasicThreadFactory threadFactory = new BasicThreadFactory.Builder()
                .namingPattern("flowable-async-job-executor-thread-%d").build();
        executorService = new ThreadPoolExecutor(corePoolSize, maxPoolSize, keepAliveTime,
                TimeUnit.MILLISECONDS, threadPoolQueue, threadFactory);
    }

    if (unlockOwnedJobs) {
        unlockOwnedJobs();
    }
}