Example usage for java.util.concurrent Executors newFixedThreadPool

List of usage examples for java.util.concurrent Executors newFixedThreadPool

Introduction

In this page you can find the example usage for java.util.concurrent Executors newFixedThreadPool.

Prototype

public static ExecutorService newFixedThreadPool(int nThreads) 

Source Link

Document

Creates a thread pool that reuses a fixed number of threads operating off a shared unbounded queue.

Usage

From source file:com.walmart.gatling.commons.ScriptExecutor.java

@Override
public void onReceive(Object message) {
    log.debug("Script worker received task: {}", message);
    if (message instanceof Master.Job) {
        Cancellable abortLoop = getContext().system().scheduler().schedule(Duration.Zero(),
                Duration.create(60, TimeUnit.SECONDS), () -> {
                    Master.Job job = (Master.Job) message;
                    runCancelJob(job);//from w w  w .j ava  2  s.  com
                }, getContext().system().dispatcher());
        ActorRef sender = getSender();
        ExecutorService pool = Executors.newFixedThreadPool(1);
        ExecutionContextExecutorService ctx = ExecutionContexts.fromExecutorService(pool);
        Future<Object> f = future(() -> runJob(message), ctx);
        f.onSuccess(new OnSuccess<Object>() {
            @Override
            public void onSuccess(Object result) throws Throwable {
                log.info("Notify Worker job status {}", result);
                sender.tell(result, getSelf());
                abortLoop.cancel();
            }
        }, ctx);
        f.onFailure(new OnFailure() {
            @Override
            public void onFailure(Throwable throwable) throws Throwable {
                log.error(throwable.toString());
                abortLoop.cancel();
                unhandled(message);
            }
        }, ctx);
        //getSender().tell(runJob(message));
    } else if (message instanceof Master.FileJob) {
        Master.FileJob fileJob = (Master.FileJob) message;
        try {
            if (fileJob.content != null) {
                FileUtils.touch(
                        new File(agentConfig.getJob().getPath(), fileJob.uploadFileRequest.getFileName()));
                FileUtils.writeStringToFile(
                        new File(agentConfig.getJob().getPath(), fileJob.uploadFileRequest.getFileName()),
                        fileJob.content);
                getSender().tell(new Worker.FileUploadComplete(fileJob.uploadFileRequest, HostUtils.lookupIp()),
                        getSelf());
            } else if (fileJob.remotePath != null) {
                FileUtils.touch(
                        new File(agentConfig.getJob().getPath(), fileJob.uploadFileRequest.getFileName()));
                FileUtils.copyURLToFile(new URL(fileJob.remotePath),
                        new File(agentConfig.getJob().getPath(), fileJob.uploadFileRequest.getFileName()));
                getSender().tell(new Worker.FileUploadComplete(fileJob.uploadFileRequest, HostUtils.lookupIp()),
                        getSelf());
            }
        } catch (IOException e) {
            e.printStackTrace();
            throw new RuntimeException(e);
        }
    }
}

From source file:com.netflix.spinnaker.echo.pubsub.amazon.SQSSubscriberProvider.java

@PostConstruct
public void start() {
    if (properties == null) {
        return;/* w w  w  .j a  v  a 2 s  .  c  o  m*/
    }

    ExecutorService executorService = Executors.newFixedThreadPool(properties.getSubscriptions().size());

    List<PubsubSubscriber> subscribers = new ArrayList<>();

    properties.getSubscriptions().forEach((AmazonPubsubProperties.AmazonPubsubSubscription subscription) -> {
        log.info("Bootstrapping SQS for SNS topic: {}", subscription.getTopicARN());
        if (subscription.getTemplatePath() != null && !subscription.getTemplatePath().equals("")) {
            log.info("Using template: {} for subscription: {}", subscription.getTemplatePath(),
                    subscription.getName());
        }

        ARN queueArn = new ARN(subscription.getQueueARN());

        SQSSubscriber worker = new SQSSubscriber(objectMapper, subscription, pubsubMessageHandler,
                AmazonSNSClientBuilder.standard().withCredentials(awsCredentialsProvider)
                        .withClientConfiguration(new ClientConfiguration()).withRegion(queueArn.getRegion())
                        .build(),
                AmazonSQSClientBuilder.standard().withCredentials(awsCredentialsProvider)
                        .withClientConfiguration(new ClientConfiguration()).withRegion(queueArn.getRegion())
                        .build(),
                () -> enabled.get(), registry);

        try {
            executorService.submit(worker);
            subscribers.add(worker);
            log.debug("Created worker for subscription: {}", subscription.getName());
        } catch (RejectedExecutionException e) {
            log.error("Could not start " + worker.getWorkerName(), e);
        }
    });
    pubsubSubscribers.putAll(subscribers);
}

From source file:at.ac.ait.ubicity.fileloader.FileLoader.java

/**
 * /*from  w w  w . j a v a  2 s  . c o m*/
 * @param _fileInfo A FileInformation object representing usage information on the file we are supposed to load: line count already ingested, last usage time...
 * @param _keySpace Cassandra key space into which to ingest
 * @param _host Cassandra host / server
 * @param _batchSize MutationBatch size
 * @throws Exception Shouldn't happen, although the Disruptor may throw an Exception under duress
 */
@SuppressWarnings("unchecked")
public final static void load(final FileInformation _fileInfo, final String _keySpace, final String _host,
        final int _batchSize) throws Exception {

    if (!cassandraInitialized) {
        keySpace = AstyanaxInitializer.doInit("Test Cluster", _host, _keySpace);
        cassandraInitialized = true;
    }

    LongTimeStampSorter tsSorter = new LongTimeStampSorter();
    Thread tTSSorter = new Thread(tsSorter);
    tTSSorter.setPriority(Thread.MAX_PRIORITY - 1);
    tTSSorter.setName("long timestamp sorter ");
    tTSSorter.start();
    //get the log id from the file's URI
    final String log_id = _fileInfo.getURI().toString();

    final MutationBatch batch = keySpace.prepareMutationBatch();

    logger.info("got keyspace " + keySpace.getKeyspaceName() + " from Astyanax initializer");

    final LineIterator onLines = FileUtils.lineIterator(new File(_fileInfo.getURI()));

    final ExecutorService exec = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors() * 2);

    ColumnFamily crawl_stats = null;

    AggregationJob aggregationJob = new AggregationJob(keySpace, crawl_stats);
    Thread tAggJob = new Thread(aggregationJob);
    tAggJob.setName("Monitrix loader / aggregation job ");
    tAggJob.setPriority(Thread.MIN_PRIORITY + 1);
    tAggJob.start();
    logger.info("[FILELOADER] started aggregation job, ring buffer running");

    final Disruptor<SingleLogLineAsString> disruptor = new Disruptor(SingleLogLineAsString.EVENT_FACTORY,
            (int) Math.pow(TWO, 17), exec);
    SingleLogLineAsStringEventHandler.batch = batch;
    SingleLogLineAsStringEventHandler.keySpace = keySpace;
    SingleLogLineAsStringEventHandler.batchSize = _batchSize;
    SingleLogLineAsStringEventHandler.LOG_ID = log_id;
    SingleLogLineAsStringEventHandler.tsSorter = tsSorter;
    SingleLogLineAsStringEventHandler.aggregationJob = aggregationJob;

    //The EventHandler contains the actual logic for ingesting
    final EventHandler<SingleLogLineAsString> handler = new SingleLogLineAsStringEventHandler();

    disruptor.handleEventsWith(handler);

    //get our Aggregate job in place

    //we are almost ready to start
    final RingBuffer<SingleLogLineAsString> rb = disruptor.start();

    int _lineCount = 0;
    long _start, _lapse;
    _start = System.nanoTime();

    int _linesAlreadyProcessed = _fileInfo.getLineCount();

    //cycle through the lines already processed
    while (_lineCount < _linesAlreadyProcessed) {
        onLines.nextLine();
        _lineCount++;
    }

    //now get down to the work we actually must do, and fill the ring buffer
    logger.info("begin proccessing of file " + _fileInfo.getURI() + " @line #" + _lineCount);
    while (onLines.hasNext()) {

        final long _seq = rb.next();
        final SingleLogLineAsString event = rb.get(_seq);
        event.setValue(onLines.nextLine());
        rb.publish(_seq);
        _lineCount++;
    }
    _lapse = System.nanoTime() - _start;
    logger.info("ended proccessing of file " + _fileInfo.getURI() + " @line #" + _lineCount);

    //stop, waiting for last threads still busy to finish their work
    disruptor.shutdown();

    //update the file info, this will  land in the cache
    _fileInfo.setLineCount(_lineCount);
    _fileInfo.setLastAccess(System.currentTimeMillis());
    int _usageCount = _fileInfo.getUsageCount();
    _fileInfo.setUsageCount(_usageCount++);

    //make sure we release resources
    onLines.close();

    logger.info(
            "handled " + (_lineCount - _linesAlreadyProcessed) + " log lines in " + _lapse + " nanoseconds");

    //now go to aggregation step
    SortedSet<Long> timeStamps = new TreeSet(tsSorter.timeStamps);

    long _minTs = timeStamps.first();
    long _maxTs = timeStamps.last();
    logger.info("**** min TimeStamp = " + _minTs);
    logger.info("**** max TimeStamp = " + _maxTs);

    StatsTableActualizer.update(_fileInfo.getURI().toString(), _minTs, _maxTs, _lineCount);

    //        AggregationJob aggJob = new AggregationJob( keySpace, _host, _batchSize );
    //        Thread tAgg = new Thread( aggJob );
    //        tAgg.setName( "aggregation job " );
    //        tAgg.setPriority( Thread.MAX_PRIORITY - 1 );
    //        tAgg.start();

}

From source file:br.prof.salesfilho.oci.view.console.Main.java

public void extractFeatures() {

    if (this.propertySource.containsProperty("inputDir") && this.propertySource.containsProperty("outputDir")) {

        //Create new thread pool to each image file
        ExecutorService executor = Executors.newFixedThreadPool(2);

        BodyWomanFeatureExtractorExecutor e1 = new BodyWomanFeatureExtractorExecutor(true);
        e1.setInputDir(this.propertySource.getProperty("inputDir").toString());
        e1.setOutputDir(this.propertySource.getProperty("outputDir").toString());
        e1.setKernelSize(Double.valueOf(this.propertySource.getProperty("kernelsize").toString()));
        e1.setDatabaseName(this.propertySource.getProperty("databaseName").toString());

        executor.execute(e1);/* ww  w.  j  a  va 2  s.c  o  m*/

        BodyWomanFeatureExtractorExecutor e2 = new BodyWomanFeatureExtractorExecutor(false);

        e2.setInputDir(this.propertySource.getProperty("inputDir").toString());
        e2.setOutputDir(this.propertySource.getProperty("outputDir").toString());
        e2.setKernelSize(Double.valueOf(this.propertySource.getProperty("kernelsize").toString()));
        e2.setDatabaseName(this.propertySource.getProperty("databaseName").toString());

        executor.execute(e2);

        //Wait finish
        executor.shutdown();
        while (!executor.isTerminated()) {
        }
        File databaseFile = new File(e1.getDatabaseName());
        bodyWomanDescriptorService.openDatabase(databaseFile);
        bodyWomanDescriptorService.add(e1.getBodyWomanDescriptor());
        bodyWomanDescriptorService.add(e2.getBodyWomanDescriptor());
        bodyWomanDescriptorService.save(databaseFile);

    } else {
        usage();
    }
}

From source file:com.devicehive.shim.config.server.KafkaRpcServerConfig.java

@Bean
public ExecutorService workerExecutor() {
    return Executors.newFixedThreadPool(workerThreads);
}

From source file:com.normalexception.app.rx8club.handler.AvatarLoader.java

/**
 * Constructor/*w  w w.jav a2  s.  co  m*/
 * @param context   Source context
 */
public AvatarLoader(Context context) {
    fileCache = new FileCache(context);
    executorService = Executors.newFixedThreadPool(5);
}

From source file:com.ibm.stocator.fs.swift.SwiftOutputStream.java

/**
 * Default constructor/*from w  ww .ja va2s . c o  m*/
 *
 * @param account JOSS account object
 * @param url URL connection
 * @param contentType content type
 * @param metadata input metadata
 * @param connectionManager SwiftConnectionManager
 * @throws IOException if error
 */
public SwiftOutputStream(JossAccount account, URL url, final String contentType, Map<String, String> metadata,
        SwiftConnectionManager connectionManager) throws IOException {
    mUrl = url;
    totalWritten = 0;
    mAccount = account;
    client = connectionManager.createHttpConnection();
    request = new HttpPut(mUrl.toString());
    request.addHeader("X-Auth-Token", account.getAuthToken());
    if (metadata != null && !metadata.isEmpty()) {
        for (Map.Entry<String, String> entry : metadata.entrySet()) {
            request.addHeader("X-Object-Meta-" + entry.getKey(), entry.getValue());
        }
    }

    PipedOutputStream out = new PipedOutputStream();
    final PipedInputStream in = new PipedInputStream();
    out.connect(in);
    execService = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors());
    mOutputStream = out;
    Callable<Void> task = new Callable<Void>() {
        @Override
        public Void call() throws Exception {
            InputStreamEntity entity = new InputStreamEntity(in, -1);
            entity.setChunked(true);
            entity.setContentType(contentType);
            request.setEntity(entity);

            LOG.debug("HTTP PUT request {}", mUrl.toString());
            HttpResponse response = client.execute(request);
            int responseCode = response.getStatusLine().getStatusCode();
            LOG.debug("HTTP PUT response {}. Response code {}", mUrl.toString(), responseCode);
            if (responseCode == 401) { // Unauthorized error
                mAccount.authenticate();
                request.removeHeaders("X-Auth-Token");
                request.addHeader("X-Auth-Token", mAccount.getAuthToken());
                LOG.warn("Token recreated for {}.  Retry request", mUrl.toString());
                response = client.execute(request);
                responseCode = response.getStatusLine().getStatusCode();
            }
            if (responseCode >= 400) { // Code may have changed from retrying
                throw new IOException("HTTP Error: " + responseCode + " Reason: "
                        + response.getStatusLine().getReasonPhrase());
            }

            return null;
        }
    };
    futureTask = execService.submit(task);
}

From source file:com.kurento.test.player.ParallelPlayerIT.java

private void testParallelPlay(String url, int statusCode, String contentType, boolean interrupt,
        String[] expectedHandlerFlow)
        throws ClientProtocolException, IOException, InterruptedException, ExecutionException {
    ExecutorService execute = Executors.newFixedThreadPool(nThreads);
    Collection<Future<?>> futures = new LinkedList<Future<?>>();

    // Perform nThreads calls
    for (int i = 0; i < nThreads; i++) {
        futures.add(execute.submit(//w w w.  ja v a 2 s.com
                new PlayerTst(url, getServerPort(), statusCode, contentType, interrupt, expectedHandlerFlow)));
    }

    // Wait for all threads to be terminated
    for (Future<?> future : futures) {
        future.get();
    }
}

From source file:net.solarnetwork.node.io.rxtx.SerialPortSupport.java

/**
 * Constructor.//w w  w .j a va 2s. c o m
 * 
 * @param serialPort
 *        the SerialPort to use
 * @param maxWait
 *        the maximum number of milliseconds to wait when waiting to read
 *        data
 */
public SerialPortSupport(SerialPort serialPort, long maxWait) {
    this.serialPort = serialPort;
    this.maxWait = maxWait;
    if (maxWait > 0) {
        executor = Executors.newFixedThreadPool(1);
    } else {
        executor = null;
    }
}

From source file:com.espertech.esper.multithread.TestMTDeterminismInsertInto.java

private void tryMultiInsertGroup(int numThreads, int numStatements, int numEvents) throws Exception {
    Configuration config = SupportConfigFactory.getConfiguration();
    // This should fail all test in this class
    // config.getEngineDefaults().getThreading().setInsertIntoDispatchPreserveOrder(false);

    EPServiceProvider engine = EPServiceProviderManager.getDefaultProvider(config);
    engine.initialize();/*from   ww  w.j  a v  a2 s.  c  om*/

    // setup statements
    EPStatement[] insertIntoStmts = new EPStatement[numStatements];
    for (int i = 0; i < numStatements; i++) {
        insertIntoStmts[i] = engine.getEPAdministrator().createEPL("insert into MyStream select " + i
                + " as ident,count(*) as cnt from " + SupportBean.class.getName());
    }
    EPStatement stmtInsertTwo = engine.getEPAdministrator()
            .createEPL("select ident, sum(cnt) as mysum from MyStream group by ident");
    SupportUpdateListener listener = new SupportUpdateListener();
    stmtInsertTwo.addListener(listener);

    // execute
    ExecutorService threadPool = Executors.newFixedThreadPool(numThreads);
    Future future[] = new Future[numThreads];
    ReentrantReadWriteLock sharedStartLock = new ReentrantReadWriteLock();
    sharedStartLock.writeLock().lock();
    for (int i = 0; i < numThreads; i++) {
        future[i] = threadPool.submit(
                new SendEventRWLockCallable(i, sharedStartLock, engine, new GeneratorIterator(numEvents)));
    }
    Thread.sleep(100);
    sharedStartLock.writeLock().unlock();

    threadPool.shutdown();
    threadPool.awaitTermination(10, TimeUnit.SECONDS);

    for (int i = 0; i < numThreads; i++) {
        assertTrue((Boolean) future[i].get());
    }

    // assert result
    EventBean newEvents[] = listener.getNewDataListFlattened();
    ArrayList resultsPerIdent[] = new ArrayList[numStatements];
    for (EventBean theEvent : newEvents) {
        int ident = (Integer) theEvent.get("ident");
        if (resultsPerIdent[ident] == null) {
            resultsPerIdent[ident] = new ArrayList();
        }
        long mysum = (Long) theEvent.get("mysum");
        resultsPerIdent[ident].add(mysum);
    }

    for (int statement = 0; statement < numStatements; statement++) {
        for (int i = 0; i < numEvents - 1; i++) {
            long expected = total(i + 1);
            assertEquals(expected, resultsPerIdent[statement].get(i));
        }
    }

    // destroy
    for (int i = 0; i < numStatements; i++) {
        insertIntoStmts[i].destroy();
    }
    stmtInsertTwo.destroy();
}