Example usage for java.util.concurrent ExecutorService submit

List of usage examples for java.util.concurrent ExecutorService submit

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService submit.

Prototype

Future<?> submit(Runnable task);

Source Link

Document

Submits a Runnable task for execution and returns a Future representing that task.

Usage

From source file:com.btoddb.fastpersitentqueue.JournalMgrIT.java

@Test
public void testThreading() throws IOException, ExecutionException {
    final int numEntries = 10000;
    final int numPushers = 3;
    int numPoppers = 3;

    final Random pushRand = new Random(1000L);
    final Random popRand = new Random(1000000L);
    final ConcurrentLinkedQueue<FpqEntry> events = new ConcurrentLinkedQueue<FpqEntry>();
    final AtomicInteger pusherFinishCount = new AtomicInteger();
    final AtomicInteger numPops = new AtomicInteger();
    final AtomicLong pushSum = new AtomicLong();
    final AtomicLong popSum = new AtomicLong();

    mgr.setMaxJournalFileSize(1000);/*from www  .j ava2 s  . c  o  m*/
    mgr.init();

    ExecutorService execSrvc = Executors.newFixedThreadPool(numPushers + numPoppers);

    Set<Future> futures = new HashSet<Future>();

    // start pushing
    for (int i = 0; i < numPushers; i++) {
        Future future = execSrvc.submit(new Runnable() {
            @Override
            public void run() {
                for (int i = 0; i < numEntries; i++) {
                    try {
                        long x = idGen.incrementAndGet();
                        FpqEntry entry = mgr.append(new FpqEntry(x, new byte[100]));
                        events.offer(entry);
                        pushSum.addAndGet(x);
                        if (x % 500 == 0) {
                            System.out.println("pushed ID = " + x);
                        }
                        Thread.sleep(pushRand.nextInt(5));
                    } catch (Exception e) {
                        e.printStackTrace();
                    }
                }
                pusherFinishCount.incrementAndGet();
            }
        });
        futures.add(future);
    }

    // start popping
    for (int i = 0; i < numPoppers; i++) {
        Future future = execSrvc.submit(new Runnable() {
            @Override
            public void run() {
                while (pusherFinishCount.get() < numPushers || !events.isEmpty()) {
                    try {
                        FpqEntry entry;
                        while (null != (entry = events.poll())) {
                            if (entry.getId() % 500 == 0) {
                                System.out.println("popped ID = " + entry.getId());
                            }
                            popSum.addAndGet(entry.getId());
                            numPops.incrementAndGet();
                            mgr.reportTake(entry);
                            Thread.sleep(popRand.nextInt(5));
                        }
                    } catch (Exception e) {
                        e.printStackTrace();
                    }
                }
            }
        });
        futures.add(future);
    }

    boolean finished = false;
    while (!finished) {
        try {
            for (Future f : futures) {
                f.get();
            }
            finished = true;
        } catch (InterruptedException e) {
            // ignore
            Thread.interrupted();
        }
    }

    assertThat(numPops.get(), is(numEntries * numPushers));
    assertThat(popSum.get(), is(pushSum.get()));
    assertThat(mgr.getJournalIdMap().entrySet(), hasSize(1));
    assertThat(FileUtils.listFiles(theDir, TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE), hasSize(1));
}

From source file:at.salzburgresearch.stanbol.enhancer.nlp.talismane.impl.TestTalismaneAnalyser.java

@Test
public void testConcurrentAnalyses() throws IOException, InterruptedException, ExecutionException {
    //warm up//from www. j  a v  a 2 s. co m
    log.info("Start concurrent analyses test");
    log.info("  ... warm up");
    for (Entry<String, Blob> example : examples.entrySet()) {
        analyzer.analyse(example.getValue());
    }

    //performance test
    long start = System.currentTimeMillis();
    int concurrentRequests = 3;
    ExecutorService executor = Executors.newFixedThreadPool(concurrentRequests);
    int iterations = 100;
    log.info("  ... start test with {} iterations", iterations);
    List<Future<?>> tasks = new ArrayList<Future<?>>(iterations);
    long[] times = new long[iterations];
    Iterator<Blob> texts = examples.values().iterator();
    for (int i = 0; i < iterations; i++) {
        if (!texts.hasNext()) {
            texts = examples.values().iterator();
        }
        tasks.add(executor.submit(new AnalyzerRequest(i, times, analyzer, texts.next())));
    }
    for (Future<?> task : tasks) { //wait for completion of all tasks
        task.get();
    }
    long duration = System.currentTimeMillis() - start;
    log.info("Processed {} texts", iterations);
    log.info("  > time       : {}ms", duration);
    log.info("  > average    : {}ms", (duration) / (double) iterations);
    long sumTime = 0;
    for (int i = 0; i < times.length; i++) {
        sumTime = sumTime + times[i];
    }
    log.info("  > processing : {}ms", sumTime);
    float concurrency = sumTime / (float) duration;
    log.info("  > concurrency: {} / {}%", concurrency, concurrency * 100 / concurrentRequests);
}

From source file:cn.ctyun.amazonaws.services.s3.transfer.internal.UploadMonitor.java

/**
 * Constructs a new upload watcher, which immediately submits itself to the
 * thread pool./*from   w ww  . j  a va2  s.c om*/
 *
 * @param manager
 *            The {@link TransferManager} that owns this upload.
 * @param transfer
 *            The transfer being processed.
 * @param threadPool
 *            The {@link ExecutorService} to which we should submit new
 *            tasks.
 * @param multipartUploadCallable
 *            The callable responsible for processing the upload asynchronously
 * @param putObjectRequest
 *            The original putObject request
 * @param progressListenerChain
 *            A chain of listeners that wish to be notified of upload
 *            progress
 */
public UploadMonitor(TransferManager manager, UploadImpl transfer, ExecutorService threadPool,
        UploadCallable multipartUploadCallable, PutObjectRequest putObjectRequest,
        ProgressListenerChain progressListenerChain) {

    this.s3 = manager.getAmazonS3Client();
    this.configuration = manager.getConfiguration();

    this.multipartUploadCallable = multipartUploadCallable;
    this.threadPool = threadPool;
    this.putObjectRequest = putObjectRequest;
    this.progressListenerChain = progressListenerChain;
    this.transfer = transfer;

    setNextFuture(threadPool.submit(this));
}

From source file:com.bittorrent.mpetazzoni.common.Torrent.java

private static String hashFiles(List<File> files) throws InterruptedException, IOException {
    int threads = getHashingThreadsCount();
    ExecutorService executor = Executors.newFixedThreadPool(threads);
    ByteBuffer buffer = ByteBuffer.allocate(Torrent.PIECE_LENGTH);
    List<Future<String>> results = new LinkedList<Future<String>>();
    StringBuilder hashes = new StringBuilder();

    long length = 0L;
    int pieces = 0;

    long start = System.nanoTime();
    for (File file : files) {
        logger.info("Hashing data from {} with {} threads ({} pieces)...", new Object[] { file.getName(),
                threads, (int) (Math.ceil((double) file.length() / Torrent.PIECE_LENGTH)) });

        length += file.length();/*from   ww  w  . j av  a  2s . c o m*/

        FileInputStream fis = new FileInputStream(file);
        FileChannel channel = fis.getChannel();
        int step = 10;

        try {
            while (channel.read(buffer) > 0) {
                if (buffer.remaining() == 0) {
                    buffer.clear();
                    results.add(executor.submit(new CallableChunkHasher(buffer)));
                }

                if (results.size() >= threads) {
                    pieces += accumulateHashes(hashes, results);
                }

                if (channel.position() / (double) channel.size() * 100f > step) {
                    logger.info("  ... {}% complete", step);
                    step += 10;
                }
            }
        } finally {
            channel.close();
            fis.close();
        }
    }

    // Hash the last bit, if any
    if (buffer.position() > 0) {
        buffer.limit(buffer.position());
        buffer.position(0);
        results.add(executor.submit(new CallableChunkHasher(buffer)));
    }

    pieces += accumulateHashes(hashes, results);

    // Request orderly executor shutdown and wait for hashing tasks to
    // complete.
    executor.shutdown();
    while (!executor.isTerminated()) {
        Thread.sleep(10);
    }
    long elapsed = System.nanoTime() - start;

    int expectedPieces = (int) (Math.ceil((double) length / Torrent.PIECE_LENGTH));
    logger.info("Hashed {} file(s) ({} bytes) in {} pieces ({} expected) in {}ms.", new Object[] { files.size(),
            length, pieces, expectedPieces, String.format("%.1f", elapsed / 1e6), });

    return hashes.toString();
}

From source file:com.isoftstone.crawl.template.crawlstate.CrawlState.java

/**
 * ?./*w  ww .  j a  va  2 s. com*/
 *
 * @param dispatchName
 */
public String crawlFull(final String folderName, boolean isDeploy) {
    String rootFolder = Config.getValue(WebtoolConstants.FOLDER_NAME_ROOT);
    String shDir;
    String crawlDir = Config.getValue(WebtoolConstants.KEY_NUTCH_CRAWLDIR);
    String solrURL = Config.getValue(WebtoolConstants.KEY_NUTCH_SOLR_URL);
    String depth = "3";
    final String dispatchName = folderName + WebtoolConstants.DISPATCH_REIDIS_POSTFIX_NORMAL;
    final DispatchVo dispatchVo = RedisOperator.getDispatchResult(dispatchName,
            Constants.DISPATCH_REDIS_DBINDEX);
    boolean userProxy = dispatchVo.isUserProxy();

    if (isDeploy) {
        shDir = Config.getValue(WebtoolConstants.KEY_NUTCH_DEPLOY_NORMAL_SHDIR);
        if (userProxy) {
            shDir = Config.getValue(WebtoolConstants.KEY_NUTCH_DEPLOY_NORMAL_PROXY_SHDIR);
        }
    } else {
        shDir = Config.getValue(WebtoolConstants.KEY_NUTCH_LOCAL_NORMAL_SHDIR);
        if (userProxy) {
            shDir = Config.getValue(WebtoolConstants.KEY_NUTCH_LOCAL_NORMAL_PROXY_SHDIR);
        }
    }

    String folderNameSeed = dispatchName.substring(0, dispatchName.lastIndexOf("_"));
    String folderNameData = folderNameSeed.substring(0, folderNameSeed.lastIndexOf("_"));
    String seedFolder = rootFolder + File.separator + folderNameSeed;
    if (isDeploy) {
        seedFolder = Config.getValue(WebtoolConstants.KEY_HDFS_ROOT_PREFIX) + folderNameSeed;
    }

    List<Seed> seedList = dispatchVo.getSeed();
    final List<String> seedStrs = new ArrayList<String>();
    for (Iterator<Seed> it = seedList.iterator(); it.hasNext();) {
        Seed seed = it.next();
        if ("true".equals(seed.getIsEnabled())) {
            seedStrs.add(seed.getUrl());
        }
    }
    contentToTxt4CrawlerAgain(folderName, seedStrs, "true");

    dispatchVo.setStatus(WebtoolConstants.DISPATCH_STATIS_RUNNING);
    RedisOperator.setDispatchResult(dispatchVo, dispatchName, Constants.DISPATCH_REDIS_DBINDEX);

    String command = shDir + " " + seedFolder + " " + crawlDir + folderNameData + "_data" + " " + solrURL + " "
            + depth;
    LOG.info("??:" + command);
    CrawlToolResource.putSeedsFolder(folderNameSeed, "local");
    final RunManager runManager = getRunmanager(command);

    String resultMsg = "";
    ExecutorService es = Executors.newSingleThreadExecutor();
    Future<String> result = es.submit(new Callable<String>() {
        public String call() throws Exception {
            // the other thread
            //return  ShellUtils.execCmd(runManager);
            String tpResult = "";
            LOG.info("?:runManager.ip" + runManager.getHostIp());
            LOG.info("?:runManager.command" + runManager.getCommand());
            tpResult = ShellUtils.execCmd(runManager);
            LOG.info("??runManager.command" + runManager.getCommand());
            contentToTxt4CrawlerAgain(folderName, seedStrs, "false");
            dispatchVo.setStatus(WebtoolConstants.DISPATCH_STATIS_COMPLETE);
            RedisOperator.setDispatchResult(dispatchVo, dispatchName, Constants.DISPATCH_REDIS_DBINDEX);
            return tpResult;
        }
    });
    try {
        resultMsg = result.get();
    } catch (Exception e) {
        // failed
    }

    //        new Thread(new Runnable() {
    //
    //            @Override
    //            public void run() {
    //                LOG.info("?:runManager.ip" + runManager.getHostIp());
    //                LOG.info("?:runManager.command" + runManager.getCommand());
    //                ShellUtils.execCmd(runManager);
    //                LOG.info("??runManager.command" + runManager.getCommand());
    //                contentToTxt4CrawlerAgain(folderName, seedStrs, "false");
    //                dispatchVo.setStatus(WebtoolConstants.DISPATCH_STATIS_COMPLETE);
    //                RedisOperator.setDispatchResult(dispatchVo, dispatchName, Constants.DISPATCH_REDIS_DBINDEX);
    //            }
    //        }).start();

    return resultMsg;
}

From source file:com.opentransport.rdfmapper.nmbs.ScrapeTrip.java

private void requestJsons(Map trainDelays) {
    String trainName;/*from  www.j  a v  a2s . c o  m*/
    Iterator iterator = trainDelays.entrySet().iterator();

    ExecutorService pool = Executors.newFixedThreadPool(NUMBER_OF_CONNECTIONS_TO_IRAIL_API);
    while (iterator.hasNext()) {
        Map.Entry mapEntry = (Map.Entry) iterator.next();
        trainName = returnCorrectTrainFormat((String) mapEntry.getKey());
        url = "https://api.irail.be/vehicle/?id=BE.NMBS." + trainName + "&format=json";
        System.out.println("HTTP GET - " + url);
        countConnections++;
        pool.submit(new DownloadDelayedTrains(trainName, url));
    }
    pool.shutdown();

    try {
        pool.awaitTermination(Long.MAX_VALUE, TimeUnit.MILLISECONDS);
        // all tasks have now finished (unless an exception is thrown abo
    } catch (InterruptedException ex) {
        Logger.getLogger(ScrapeTrip.class.getName()).log(Level.SEVERE, null, ex);
        errorWriter.writeError(ex.toString());
    }
}

From source file:edu.duke.cabig.c3pr.webservice.integration.C3PREmbeddedTomcatTestBase.java

/**
 * // w w w.  java2 s.c o m
 */
private void stopContainer() {
    try {
        if (container != null) {
            logger.info("Stopping Tomcat...");
            // stopping Tomcat may block, so we need to do it in another
            // thread and join.
            final ExecutorService executor = Executors.newSingleThreadExecutor();
            try {
                Future future = executor.submit(new Runnable() {
                    public void run() {
                        try {
                            container.stop();
                            container = null;
                            logger.info("Tomcat has been stopped.");
                        } catch (LifecycleException e) {
                            logger.severe(ExceptionUtils.getFullStackTrace(e));
                        }
                    }
                });
                future.get(TOMCAT_SHUTDOWN_TIMEOUT, TimeUnit.SECONDS);
            } finally {
                executor.shutdownNow();
            }
        }
    } catch (Exception e) {
        logger.severe(ExceptionUtils.getFullStackTrace(e));
    }

}

From source file:com.github.c77.base_driver.KobukiBaseDevice.java

public KobukiBaseDevice(UsbSerialDriver driver) {
    serialDriver = driver;//from  w w  w.  j  av a  2s. c  o m
    try {
        serialDriver.open();
        serialDriver.setParameters(115200, UsbSerialDriver.DATABITS_8, UsbSerialDriver.STOPBITS_1,
                UsbSerialDriver.PARITY_NONE);
    } catch (IOException e) {
        log.info("Error setting up device: " + e.getMessage(), e);
        e.printStackTrace();
        try {
            serialDriver.close();
        } catch (IOException e1) {
            e1.printStackTrace();
        }
        serialDriver = null;
    }

    final ExecutorService executorService = Executors.newSingleThreadExecutor();

    SerialInputOutputManager serialInputOutputManager;

    final SerialInputOutputManager.Listener listener = new SerialInputOutputManager.Listener() {
        @Override
        public void onRunError(Exception e) {
        }

        @Override
        public void onNewData(final byte[] data) {
            KobukiBaseDevice.this.updateReceivedData(data);
        }
    };

    serialInputOutputManager = new SerialInputOutputManager(serialDriver, listener);
    executorService.submit(serialInputOutputManager);
}

From source file:jp.aegif.nemaki.cmis.service.impl.AclServiceImpl.java

private void clearCachesRecursively(ExecutorService executorService, CallContext callContext,
        final String repositoryId, Content content, boolean executeOnParent) {

    //Call threads for recursive applyAcl
    if (content.isFolder()) {
        List<Content> children = contentService.getChildren(repositoryId, content.getId());
        if (CollectionUtils.isEmpty(children)) {
            return;
        }/*from w w  w  . ja va2  s. c  o m*/

        if (executeOnParent) {
            executorService.submit(new ClearCacheTask(repositoryId, content.getId()));
        }
        for (Content child : children) {
            if (contentService.getAclInheritedWithDefault(repositoryId, child)) {
                executorService.submit(
                        new ClearCachesRecursivelyTask(executorService, callContext, repositoryId, child));
            }
        }
    } else {
        executorService.submit(new ClearCacheTask(repositoryId, content.getId()));
    }
}

From source file:jp.aegif.nemaki.cmis.service.impl.AclServiceImpl.java

private void writeChangeEventsRecursively(ExecutorService executorService, CallContext callContext,
        final String repositoryId, Content content, boolean executeOnParent) {

    //Call threads for recursive applyAcl
    if (content.isFolder()) {
        List<Content> children = contentService.getChildren(repositoryId, content.getId());
        if (CollectionUtils.isEmpty(children)) {
            return;
        }/*from  ww w.java2  s.  c  o  m*/

        if (executeOnParent) {
            executorService.submit(new ClearCacheTask(repositoryId, content.getId()));
        }
        for (Content child : children) {
            if (contentService.getAclInheritedWithDefault(repositoryId, child)) {
                executorService.submit(new WriteChangeEventsRecursivelyTask(executorService, callContext,
                        repositoryId, child));
            }
        }
    } else {
        executorService.submit(new WriteChangeEventTask(callContext, repositoryId, content));
    }
}