Example usage for java.util.concurrent ExecutorService execute

List of usage examples for java.util.concurrent ExecutorService execute

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService execute.

Prototype

void execute(Runnable command);

Source Link

Document

Executes the given command at some time in the future.

Usage

From source file:stroom.index.server.BenchmarkIndex.java

@Override
public void run() {
    init();//from  w w w . j a v  a2 s  .  c o m

    final long batchStartTime = System.currentTimeMillis();

    final IndexShardWriterImpl[] writers = new IndexShardWriterImpl[indexShards.length];
    for (int i = 0; i < writers.length; i++) {
        final IndexShard indexShard = indexShards[i];
        writers[i] = new IndexShardWriterImpl(indexShardService, indexFields, indexShard.getIndex(),
                indexShard);
        writers[i].setRamBufferSizeMB(ramBufferMbSize);
        writers[i].open(true);
    }
    final AtomicLong atomicLong = new AtomicLong();

    final long indexStartTime = System.currentTimeMillis();

    final ExecutorService threadPoolExecutor = Executors.newFixedThreadPool(jobSize);
    for (int i = 0; i < jobSize; i++) {
        final Runnable r = () -> {
            long myId;
            while ((myId = atomicLong.incrementAndGet()) < docCount) {
                try {
                    final int idx = (int) (myId % writers.length);
                    writers[idx].addDocument(getDocument(myId));
                } catch (final Exception e) {
                    e.printStackTrace();
                }
            }
        };
        threadPoolExecutor.execute(r);
    }

    threadPoolExecutor.shutdown();

    // Wait for termination.
    while (!threadPoolExecutor.isTerminated()) {
        // Wait 1 second.
        ThreadUtil.sleep(1000);

        final long docsSoFar = atomicLong.get();
        final long secondsSoFar = (System.currentTimeMillis() - batchStartTime) / 1000;

        for (int i = 0; i < writers.length; i++) {
            final IndexShardWriterImpl impl = writers[i];
            final IndexShard indexShard = indexShards[i];

            if (secondsSoFar > 0) {
                final long docsPerSecond = docsSoFar / secondsSoFar;
                impl.sync();
                LOGGER.info("run() - " + StringUtils.rightPad(ModelStringUtil.formatCsv(docsSoFar), 10)
                        + " doc ps " + ModelStringUtil.formatCsv(docsPerSecond) + " ("
                        + indexShard.getFileSizeString() + ")");
            }
            if (nextCommit != null && docsSoFar > nextCommit) {
                impl.flush();
                nextCommit = ((docsSoFar / commitCount) * commitCount) + commitCount;
                LOGGER.info("run() - commit " + docsSoFar + " next commit is " + nextCommit);
            }
        }
    }
    final long indexEndTime = System.currentTimeMillis();
    final long secondsSoFar = (System.currentTimeMillis() - batchStartTime) / 1000;
    final long docsPerSecond = atomicLong.get() / secondsSoFar;

    for (final IndexShardWriter writer : writers) {
        writer.close();
    }

    final long batchEndTime = System.currentTimeMillis();

    LOGGER.info("runWrite() - Complete");
    LOGGER.info("=====================");
    LOGGER.info("");
    LOGGER.info("Using Args");
    LOGGER.info("==========");
    LoggerPrintStream traceStream = LoggerPrintStream.create(LOGGER, false);
    traceArguments(traceStream);
    traceStream.close();
    LOGGER.info("");
    LOGGER.info("Stats");
    LOGGER.info("=====");

    LOGGER.info("Open Time  " + toMsNiceString(indexStartTime - batchStartTime));
    LOGGER.info("Index Time " + toMsNiceString(indexEndTime - indexStartTime));
    LOGGER.info("Close Time " + toMsNiceString(batchEndTime - indexEndTime));
    LOGGER.info("Total Time " + toMsNiceString(batchEndTime - batchStartTime));
    LOGGER.info("");
    LOGGER.info("Final Docs PS " + ModelStringUtil.formatCsv(docsPerSecond));

    traceStream = LoggerPrintStream.create(LOGGER, false);
    for (int i = 0; i < writers.length; i++) {
        LOGGER.info("");
        final IndexShardWriterImpl impl = writers[i];
        LOGGER.info("Writer " + StringUtils.leftPad(String.valueOf(i), 2));
        LOGGER.info("=========");
        impl.trace(traceStream);
    }
    traceStream.close();

    LOGGER.info("");
    LOGGER.info("Search");
    LOGGER.info("=====");

    try {
        final IndexShardSearcherImpl[] reader = new IndexShardSearcherImpl[indexShards.length];
        final IndexReader[] readers = new IndexReader[indexShards.length];
        for (int i = 0; i < reader.length; i++) {
            reader[i] = new IndexShardSearcherImpl(indexShards[i]);
            reader[i].open();
            readers[i] = reader[i].getReader();
        }

        for (final String arg : docArgs) {
            doSearchOnField(readers, arg);
        }

        doSearchOnField(readers, "multifield");
        doSearchOnField(readers, "dupfield");

        LOGGER.info("=====");

        for (int i = 0; i < reader.length; i++) {
            reader[i].close();
        }

    } catch (final Exception ex) {
        ex.printStackTrace();
    }

}

From source file:com.chicm.cmraft.core.NodeConnectionManager.java

public void collectVote(long term, long lastLogIndex, long lastLogTerm) {
    int nServers = getRemoteServers().size();
    if (nServers <= 0) {
        return;/*from  ww  w.ja v  a 2 s. c  o  m*/
    }
    ExecutorService executor = Executors.newFixedThreadPool(nServers, new ThreadFactory() {
        @Override
        public Thread newThread(Runnable r) {
            Thread t = new Thread(r);
            t.setName(getRaftNode().getName() + "-AsyncRpcCaller" + (byte) System.currentTimeMillis());
            return t;
        }
    });

    for (ServerInfo server : getRemoteServers()) {
        NodeConnection conn = connections.get(server);
        LOG.debug(getRaftNode().getName() + ": SENDING COLLECTVOTE Request TO: " + server);
        Thread t = new Thread(new AsynchronousVoteWorker(getRaftNode(), conn, getRaftNode().getServerInfo(),
                term, lastLogIndex, lastLogTerm));
        t.setDaemon(true);
        executor.execute(t);
    }
}

From source file:jcuda.jcublas.kernel.TestMatrixOperations.java

@Test
public void testMultipleThreads() throws InterruptedException {
    int numThreads = 10;
    final INDArray array = Nd4j.rand(300, 300);
    final INDArray expected = array.dup().mmul(array).mmul(array).div(array).div(array);
    final AtomicInteger correct = new AtomicInteger();
    final CountDownLatch latch = new CountDownLatch(numThreads);
    System.out.println("Running on " + ContextHolder.getInstance().deviceNum());
    ExecutorService executors = ExecutorServiceProvider.getExecutorService();

    for (int x = 0; x < numThreads; x++) {
        executors.execute(new Runnable() {
            @Override//from   w w w. j  a v  a  2s. co  m
            public void run() {
                try {
                    int total = 10;
                    int right = 0;
                    for (int x = 0; x < total; x++) {
                        StopWatch watch = new StopWatch();
                        watch.start();
                        INDArray actual = array.dup().mmul(array).mmul(array).div(array).div(array);
                        watch.stop();
                        if (expected.equals(actual))
                            right++;
                    }

                    if (total == right)
                        correct.incrementAndGet();
                } finally {
                    latch.countDown();
                }

            }
        });
    }

    latch.await();

    assertEquals(numThreads, correct.get());

}

From source file:org.springframework.amqp.rabbit.core.RabbitTemplatePublisherCallbacksIntegrationTests.java

private void testPublisherConfirmCloseConcurrency(final int closeAfter) throws Exception {
    ConnectionFactory mockConnectionFactory = mock(ConnectionFactory.class);
    Connection mockConnection = mock(Connection.class);
    Channel mockChannel1 = mock(Channel.class);
    final AtomicLong seq1 = new AtomicLong();
    doAnswer(invocation -> seq1.incrementAndGet()).when(mockChannel1).getNextPublishSeqNo();

    Channel mockChannel2 = mock(Channel.class);
    when(mockChannel2.isOpen()).thenReturn(true);
    final AtomicLong seq2 = new AtomicLong();
    doAnswer(invocation -> seq2.incrementAndGet()).when(mockChannel2).getNextPublishSeqNo();

    when(mockConnectionFactory.newConnection(any(ExecutorService.class), anyString()))
            .thenReturn(mockConnection);
    when(mockConnection.isOpen()).thenReturn(true);
    when(mockConnection.createChannel()).thenReturn(mockChannel1, mockChannel2);

    CachingConnectionFactory ccf = new CachingConnectionFactory(mockConnectionFactory);
    ccf.setPublisherConfirms(true);/*from w ww  . j  a  va2s . c o m*/
    final RabbitTemplate template = new RabbitTemplate(ccf);

    final CountDownLatch confirmed = new CountDownLatch(1);
    template.setConfirmCallback((correlationData, ack, cause) -> confirmed.countDown());
    ExecutorService exec = Executors.newSingleThreadExecutor();
    final AtomicInteger sent = new AtomicInteger();
    doAnswer(invocation -> sent.incrementAndGet() < closeAfter).when(mockChannel1).isOpen();
    final CountDownLatch sentAll = new CountDownLatch(1);
    exec.execute(() -> {
        for (int i = 0; i < 1000; i++) {
            try {
                template.convertAndSend(ROUTE, (Object) "message", new CorrelationData("abc"));
            } catch (AmqpException e) {
            }
        }
        sentAll.countDown();
    });
    assertTrue(sentAll.await(10, TimeUnit.SECONDS));
    assertTrue(confirmed.await(10, TimeUnit.SECONDS));
}

From source file:net.ychron.unirestins.test.http.UnirestInstTest.java

private void makeParallelRequests() throws InterruptedException {
    ExecutorService newFixedThreadPool = Executors.newFixedThreadPool(10);
    final AtomicInteger counter = new AtomicInteger(0);
    for (int i = 0; i < 200; i++) {
        newFixedThreadPool.execute(new Runnable() {
            public void run() {
                try {
                    unirestInst.get("http://httpbin.org/get").queryString("index", counter.incrementAndGet())
                            .asJson();//  w ww .ja v  a2 s  .  c  o  m
                } catch (UnirestException e) {
                    throw new RuntimeException(e);
                }
            }
        });
    }

    newFixedThreadPool.shutdown();
    newFixedThreadPool.awaitTermination(10, TimeUnit.MINUTES);
}

From source file:gridool.db.partitioning.phihash.csv.grace.CsvGraceHashPartitioningTask.java

private final void invokeShuffle(@Nonnull final ExecutorService shuffleExecPool,
        @Nonnull final ArrayQueue<String> queue, final int bucket) {
    assert (kernel != null);
    final String[] lines = queue.toArray(String.class);
    final String fileName = csvFileName;
    if (isFirstShuffle) {
        PartitioningJobConf conf = new PartitioningJobConf(lines, fileName, true, primaryForeignKeys, jobConf,
                bucket);/* w  ww . ja  v a 2  s. c om*/
        runShuffleJob(kernel, conf, assignMap, outputMap, deploymentGroup);
        this.isFirstShuffle = false;
    } else {
        shuffleExecPool.execute(new Runnable() {
            public void run() {
                PartitioningJobConf conf = new PartitioningJobConf(lines, fileName, false, primaryForeignKeys,
                        jobConf, bucket);
                runShuffleJob(kernel, conf, assignMap, outputMap, deploymentGroup);
            }
        });
    }
}

From source file:com.google.api.ads.adwords.jaxws.extensions.processors.onmemory.ReportProcessorOnMemory.java

/**
 * Downloads all the files from the API and process all the rows, saving the
 * data to the configured data base.//ww w  .  ja  va2  s .  co  m
 * 
 * @param builder
 *            the session builder.
 * @param reportType
 *            the report type.
 * @param dateRangeType
 *            the date range type.
 * @param dateStart
 *            the start date.
 * @param dateEnd
 *            the ending date.
 * @param acountIdList
 *            the account IDs.
 * @param properties
 *            the properties resource.
 */
private <R extends Report> void downloadAndProcess(String userId, String mccAccountId,
        AdWordsSession.Builder builder, ReportDefinitionReportType reportType,
        ReportDefinitionDateRangeType dateRangeType, String dateStart, String dateEnd, Set<Long> acountIdList,
        Properties properties) {

    // Download Reports to local files and Generate Report objects
    LOGGER.info("\n\n ** Generating: " + reportType.name() + " **");
    LOGGER.info(" Processing reports...");

    ReportDefinition reportDefinition = getReportDefinition(reportType, dateRangeType, dateStart, dateEnd,
            properties);

    @SuppressWarnings("unchecked")
    Class<R> reportBeanClass = (Class<R>) this.csvReportEntitiesMapping.getReportBeanClass(reportType);

    final CountDownLatch latch = new CountDownLatch(acountIdList.size());
    ExecutorService executorService = Executors.newFixedThreadPool(numberOfReportProcessors);

    Stopwatch stopwatch = Stopwatch.createStarted();

    for (Long accountId : acountIdList) {
        LOGGER.trace(".");
        try {

            ModifiedCsvToBean<R> csvToBean = new ModifiedCsvToBean<R>();
            MappingStrategy<R> mappingStrategy = new AnnotationBasedMappingStrategy<R>(reportBeanClass);

            LOGGER.debug("Parsing account: " + accountId);

            RunnableProcessorOnMemory<R> runnableProcesor = new RunnableProcessorOnMemory<R>(accountId, builder,
                    reportDefinition, csvToBean, mappingStrategy, dateRangeType, dateStart, dateEnd,
                    mccAccountId, persister, reportRowsSetSize);

            runnableProcesor.setLatch(latch);
            executorService.execute(runnableProcesor);

        } catch (Exception e) {
            LOGGER.error("Ignoring account (Error when processing): " + accountId);
            e.printStackTrace();
        }
    }

    try {
        latch.await();
    } catch (InterruptedException e) {
        LOGGER.error(e.getMessage());
        e.printStackTrace();
    }
    executorService.shutdown();

    stopwatch.stop();
    LOGGER.info("*** Finished processing all reports in " + (stopwatch.elapsed(TimeUnit.MILLISECONDS) / 1000)
            + " seconds ***\n");
}

From source file:org.trnltk.apps.morphology.contextless.parser.CachingMorphologicParserApp.java

@App("Parse sample TBMM Journal w/o bulk parse")
public void parseTbmmJournal_b0241h_noBulkParse() throws Exception {
    final File tokenizedFile = new File("core/src/test/resources/tokenizer/tbmm_b0241h_tokenized.txt");
    final List<String> lines = Files.readLines(tokenizedFile, Charsets.UTF_8);
    final LinkedList<String> words = new LinkedList<String>();
    final HashSet<String> uniqueWords = new HashSet<String>();
    for (String line : lines) {
        final ArrayList<String> strings = Lists
                .newArrayList(Splitter.on(" ").trimResults().omitEmptyStrings().split(line));
        words.addAll(strings);//from w  w w.j  a  va 2  s .c o m
        uniqueWords.addAll(strings);
    }

    final int initialL1CacheSize = uniqueWords.size();
    final int maxL1CacheSize = initialL1CacheSize;

    final MorphologicParserCache l1Cache = new LRUMorphologicParserCache(NUMBER_OF_THREADS, initialL1CacheSize,
            maxL1CacheSize);

    final ExecutorService pool = Executors.newFixedThreadPool(NUMBER_OF_THREADS);

    final MorphologicParser[] parsers = new MorphologicParser[NUMBER_OF_THREADS];
    for (int i = 0; i < parsers.length; i++) {
        parsers[i] = new CachingMorphologicParser(new TwoLevelMorphologicParserCache(BULK_SIZE, l1Cache),
                contextlessMorphologicParser, true);
    }

    final StopWatch stopWatch = new StopWatch();
    stopWatch.start();

    for (int i = 0; i < words.size(); i++) {
        final MorphologicParser parser = parsers[i % NUMBER_OF_THREADS];
        final String word = words.get(i);
        final int wordIndex = i;
        pool.execute(new SingleParseCommand(parser, word, wordIndex, false));
    }

    pool.shutdown();
    while (!pool.isTerminated()) {
        System.out.println("Waiting pool to be terminated!");
        pool.awaitTermination(500, TimeUnit.MILLISECONDS);
    }

    stopWatch.stop();

    System.out.println("Total time :" + stopWatch.toString());
    System.out.println("Nr of tokens : " + words.size());
    System.out.println("Avg time : " + (stopWatch.getTime() * 1.0d) / (words.size() * 1.0d) + " ms");
}

From source file:org.kurento.test.base.BrowserTest.java

public void syncTimeForOcr(final W[] webpages, final String[] videoTagsId, final String[] peerConnectionsId)
        throws InterruptedException {
    int webpagesLength = webpages.length;
    int videoTagsLength = videoTagsId.length;
    if (webpagesLength != videoTagsLength) {
        throw new KurentoException("The size of webpage arrays (" + webpagesLength
                + "}) must be the same as videoTags (" + videoTagsLength + ")");
    }//  w ww .  j  av  a2 s .  co  m

    final ExecutorService service = Executors.newFixedThreadPool(webpagesLength);
    final CountDownLatch latch = new CountDownLatch(webpagesLength);

    for (int i = 0; i < webpagesLength; i++) {
        final int j = i;
        service.execute(new Runnable() {
            @Override
            public void run() {
                webpages[j].syncTimeForOcr(videoTagsId[j], peerConnectionsId[j]);
                latch.countDown();
            }
        });
    }
    latch.await();
    service.shutdown();
}

From source file:net.fenyo.mail4hotspot.dns.DnsListener.java

public void run() {
    final ExecutorService pool = Executors.newCachedThreadPool();

    try {//  w ww  .  j a v a  2  s .com
        socket = new DatagramSocket(DNSPORT);
    } catch (final SocketException ex) {
        ex.printStackTrace();
        log.error("can not start DNS service");
        return;
    }

    do {
        final DatagramPacket query = new DatagramPacket(new byte[DATAGRAMMAXSIZE], DATAGRAMMAXSIZE);
        try {
            socket.receive(query);
            pool.execute(new Handler(query));
        } catch (IOException ex) {
            log.error(ex);
        }
    } while (thread.isInterrupted() == false);

    try {
        log.info("waiting for executor tasks to terminate");
        pool.awaitTermination(120, TimeUnit.SECONDS);
    } catch (InterruptedException ex) {
        log.error(ex);
    }
}