Example usage for java.util.concurrent ExecutorService submit

List of usage examples for java.util.concurrent ExecutorService submit

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService submit.

Prototype

Future<?> submit(Runnable task);

Source Link

Document

Submits a Runnable task for execution and returns a Future representing that task.

Usage

From source file:com.github.c77.base_driver.HuskyBaseDevice.java

public HuskyBaseDevice(UsbSerialDriver driver) {
    serialDriver = driver;/*ww  w  .j  a  v  a 2 s . c  o  m*/
    try {
        serialDriver.open();
        serialDriver.setParameters(115200, UsbSerialDriver.DATABITS_8, UsbSerialDriver.STOPBITS_1,
                UsbSerialDriver.PARITY_NONE);
    } catch (IOException e) {
        log.info("Error setting up device: " + e.getMessage(), e);
        e.printStackTrace();
        try {
            serialDriver.close();
        } catch (IOException e1) {
            e1.printStackTrace();
        }
        serialDriver = null;
    }

    final ExecutorService executorService = Executors.newSingleThreadExecutor();

    SerialInputOutputManager serialInputOutputManager;

    final SerialInputOutputManager.Listener listener = new SerialInputOutputManager.Listener() {
        @Override
        public void onRunError(Exception e) {
        }

        @Override
        public void onNewData(final byte[] data) {
        }
    };

    serialInputOutputManager = new SerialInputOutputManager(serialDriver, listener);
    executorService.submit(serialInputOutputManager);
}

From source file:com.isoftstone.crawl.template.crawlstate.CrawlState.java

/**
 * ?.// w  w w . j  a  v a2 s . co m
 *
 * @param folderNameSeed
 */
public String reParse(String folderNameSeed, boolean isDeploy, boolean isNomal) {
    String nutch_reparse;
    String solrURL = Config.getValue(WebtoolConstants.KEY_NUTCH_SOLR_URL);
    String crawlDir = Config.getValue(WebtoolConstants.KEY_NUTCH_CRAWLDIR);
    String folderNameData = folderNameSeed.substring(0, folderNameSeed.lastIndexOf("_"));

    //-- ???.
    if (isDeploy) {
        nutch_reparse = Config.getValue(WebtoolConstants.KEY_NUTCH_REPARSE_DEPLOY);
    } else {
        nutch_reparse = Config.getValue(WebtoolConstants.KEY_NUTCH_REPARSE_LOCAL);
    }

    //-- data.
    if (!isNomal) {
        folderNameData = folderNameData.substring(0, folderNameData.lastIndexOf("_")) + "_"
                + WebtoolConstants.INCREMENT_FILENAME_SIGN;
    }

    String data_folder = crawlDir + folderNameData + "_data";

    LOG.info("ParseAndIndex: nutch_root: " + nutch_reparse);
    LOG.info("ParseAndIndex: data_folder: " + data_folder);

    String command = "java -jar /reparseAndIndex.jar " + nutch_reparse + " " + data_folder + " " + solrURL
            + " true";
    LOG.info("ParseAndIndex: command:" + command);
    final RunManager runManager = getRunmanager(command);

    String resultMsg = "";
    ExecutorService es = Executors.newSingleThreadExecutor();
    Future<String> result = es.submit(new Callable<String>() {
        public String call() throws Exception {
            // the other thread
            return ShellUtils.execCmd(runManager);
        }
    });
    try {
        resultMsg = result.get();
    } catch (Exception e) {
        // failed
    }

    //ShellUtils.execCmd(runManager);

    return resultMsg;
}

From source file:com.yahoo.gondola.container.ZookeeperRegistryClientTest.java

@Test
public void testWaitForClusterComplete() throws Exception {
    // 0. A three nodes shard, two server joins
    ExecutorService executorService = Executors.newSingleThreadExecutor();
    Future<Boolean> result;
    registryClient.register(SITE_1_HOST_3_CLUSTERS, new InetSocketAddress(1234), URI.create("http://foo.com"));
    registryClient.register(SITE_1_HOST_2_CLUSTERS, new InetSocketAddress(1235), URI.create("http://foo.com"));

    // 1. The waitForClusterComplete call should block for 1 second
    Callable<Boolean> awaitCall = () -> registryClient.waitForClusterComplete(1000);

    result = executorService.submit(awaitCall);
    assertEquals(result.get(), Boolean.FALSE);

    // 2. The request should be block after the next node joins
    result = executorService.submit(awaitCall);
    registryClient.register(SITE_1_HOST_1_CLUSTER, new InetSocketAddress(1236), URI.create("http://foo.com"));
    assertEquals(result.get(), Boolean.TRUE);

    // 3. The request should success immediately, since all nodes are in the shards.
    result = executorService.submit(awaitCall);
    assertEquals(result.get(), Boolean.TRUE);
}

From source file:de.undercouch.citeproc.TestSuiteRunner.java

/**
 * Runs tests/*from ww  w  . java  2 s .  c  o  m*/
 * @param f either a compiled test file (.json) to run or a directory
 * containing compiled test files
 * @param runnerType the type of the script runner that will be used
 * to execute all JavaScript code
 * @throws IOException if a file could not be loaded
 */
public void runTests(File f, RunnerType runnerType) throws IOException {
    ScriptRunnerFactory.setRunnerType(runnerType);
    {
        ScriptRunner sr = ScriptRunnerFactory.createRunner();
        System.out.println("Using script runner: " + sr.getName() + " " + sr.getVersion());
    }

    //find test files
    File[] testFiles;
    if (f.isDirectory()) {
        testFiles = f.listFiles(new FilenameFilter() {
            @Override
            public boolean accept(File dir, String name) {
                return name.endsWith(".json");
            }
        });
    } else {
        testFiles = new File[] { f };
    }

    AnsiConsole.systemInstall();
    try {
        long start = System.currentTimeMillis();
        int count = testFiles.length;
        int success = 0;

        ExecutorService executor = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors());

        //submit a job for each test file
        List<Future<Boolean>> fus = new ArrayList<Future<Boolean>>();
        for (File fi : testFiles) {
            fus.add(executor.submit(new TestCallable(fi)));
        }

        //receive results
        try {
            for (Future<Boolean> fu : fus) {
                if (fu.get()) {
                    ++success;
                }
            }
        } catch (Exception e) {
            //should never happen
            throw new RuntimeException(e);
        }

        executor.shutdown();

        //output total time
        long end = System.currentTimeMillis();
        double time = (end - start) / 1000.0;
        System.out.println("Successfully executed " + success + " of " + count + " tests.");
        System.out.println(String.format(Locale.ENGLISH, "Total time: %.3f secs", time));
    } finally {
        AnsiConsole.systemUninstall();
    }
}

From source file:com.jivesoftware.os.amza.service.replication.http.HttpRowsTaker.java

public HttpRowsTaker(String name, AmzaStats amzaStats, TenantAwareHttpClient<String> ringClient,
        ObjectMapper mapper, AmzaInterner amzaInterner, ExecutorService queueExecutor,
        ExecutorService flushExecutor) {
    this.name = name;
    this.amzaStats = amzaStats;
    this.ringClient = ringClient;
    this.mapper = mapper;
    this.streamingTakesConsumer = new StreamingTakesConsumer(amzaInterner);
    this.flushExecutor = flushExecutor;

    //TODO lifecycle
    queueExecutor.submit(() -> {
        while (true) {
            try {
                long currentVersion = flushVersion.get();
                for (Entry<RingHost, Ackable> entry : hostQueue.entrySet()) {
                    Ackable ackable = entry.getValue();
                    if (ackable.running.compareAndSet(false, true)) {
                        flushQueues(entry.getKey(), ackable, currentVersion);
                    }//from   ww w. ja  v  a  2  s  . co m
                }
                synchronized (flushVersion) {
                    if (currentVersion == flushVersion.get()) {
                        flushVersion.wait();
                    }
                }
            } catch (Throwable t) {
                LOG.error("HttpRowsTaker failure", t);
            }
        }
    });
}

From source file:com.isoftstone.crawl.template.crawlstate.CrawlState.java

/**
 * ?./*from  w w w.  ja  va2 s. c om*/
 *
 * @param dispatchName
 */
public String crawlIncrement(String folderName, boolean isDeploy) {
    String rootFolder = Config.getValue(WebtoolConstants.FOLDER_NAME_ROOT);
    String shDir;
    String crawlDir = Config.getValue(WebtoolConstants.KEY_NUTCH_CRAWLDIR);
    String solrURL = Config.getValue(WebtoolConstants.KEY_NUTCH_SOLR_URL);
    String depth = "2";
    String dispatchName = folderName + WebtoolConstants.DISPATCH_REIDIS_POSTFIX_INCREMENT;
    DispatchVo dispatchVo = RedisOperator.getDispatchResult(dispatchName, Constants.DISPATCH_REDIS_DBINDEX);
    boolean userProxy = dispatchVo.isUserProxy();

    //--shDir.
    if (isDeploy) {
        shDir = Config.getValue(WebtoolConstants.KEY_NUTCH_DEPLOY_INCREMENT_SHDIR);
        if (userProxy) {
            shDir = Config.getValue(WebtoolConstants.KEY_NUTCH_DEPLOY_INCREMENT_PROXY_SHDIR);
        }
    } else {
        shDir = Config.getValue(WebtoolConstants.KEY_NUTCH_LOCAL_INCREMENT_SHDIR);
        if (userProxy) {
            shDir = Config.getValue(WebtoolConstants.KEY_NUTCH_LOCAL_INCREMENT_PROXY_SHDIR);
        }
    }

    String folderNameSeed = dispatchName.substring(0, dispatchName.lastIndexOf("_"));
    String folderNameData = folderNameSeed.substring(0, folderNameSeed.lastIndexOf("_"));
    String[] folderNameStrs = folderNameSeed.split("_");
    folderNameSeed = folderNameStrs[0] + "_" + folderNameStrs[1] + "_"
            + WebtoolConstants.INCREMENT_FILENAME_SIGN + "_" + folderNameStrs[2];
    folderNameData = folderNameData.substring(0, folderNameData.lastIndexOf("_")) + "_"
            + WebtoolConstants.INCREMENT_FILENAME_SIGN;
    String seedFolder = rootFolder + File.separator + folderNameSeed;
    String command = shDir + " " + seedFolder + " " + crawlDir + folderNameData + "_data" + " " + solrURL + " "
            + depth;
    final RunManager runManager = getRunmanager(command);
    LOG.info("??:" + command);
    CrawlToolResource.putSeedsFolder(folderNameSeed, "local");

    String resultMsg = "";
    ExecutorService es = Executors.newSingleThreadExecutor();
    Future<String> result = es.submit(new Callable<String>() {
        public String call() throws Exception {
            // the other thread
            return ShellUtils.execCmd(runManager);
        }
    });
    try {
        resultMsg = result.get();
    } catch (Exception e) {
        LOG.info("", e);
        // failed
    }
    //        new Thread(new Runnable() {
    //
    //            @Override
    //            public void run() {
    //                ShellUtils.execCmd(runManager);
    //            }
    //        }).start();
    return resultMsg;
}

From source file:gdsc.smlm.ij.plugins.MedianFilter.java

public void run(ImageProcessor ip) {
    long start = System.currentTimeMillis();

    ImageStack stack = imp.getImageStack();

    final int width = stack.getWidth();
    final int height = stack.getHeight();
    size = width * height;/*www .  jav a  2  s.c o m*/
    float[][] imageStack = new float[stack.getSize()][];
    float[] mean = new float[imageStack.length];

    // Get the mean for each frame and normalise the data using the mean
    ExecutorService threadPool = Executors.newFixedThreadPool(Prefs.getThreads());
    List<Future<?>> futures = new LinkedList<Future<?>>();

    counter = 0;
    IJ.showStatus("Calculating means...");
    for (int n = 1; n <= stack.getSize(); n++) {
        futures.add(threadPool.submit(new ImageNormaliser(stack, imageStack, mean, n)));
    }

    // Finish processing data
    Utils.waitForCompletion(futures);

    futures = new LinkedList<Future<?>>();

    counter = 0;
    IJ.showStatus("Calculating medians...");
    for (int i = 0; i < size; i += blockSize) {
        futures.add(
                threadPool.submit(new ImageGenerator(imageStack, mean, i, FastMath.min(i + blockSize, size))));
    }

    // Finish processing data
    Utils.waitForCompletion(futures);

    if (Utils.isInterrupted())
        return;

    if (subtract) {
        counter = 0;
        IJ.showStatus("Subtracting medians...");
        for (int n = 1; n <= stack.getSize(); n++) {
            futures.add(threadPool.submit(new ImageFilter(stack, imageStack, n)));
        }

        // Finish processing data
        Utils.waitForCompletion(futures);
    }

    // Update the image
    ImageStack outputStack = new ImageStack(stack.getWidth(), stack.getHeight(), stack.getSize());
    for (int n = 1; n <= stack.getSize(); n++) {
        outputStack.setPixels(imageStack[n - 1], n);
    }

    imp.setStack(outputStack);
    imp.updateAndDraw();

    IJ.showTime(imp, start, "Completed");
    long milliseconds = System.currentTimeMillis() - start;
    Utils.log(TITLE + " : Radius %d, Interval %d, Block size %d = %s, %s / frame", radius, interval, blockSize,
            Utils.timeToString(milliseconds), Utils.timeToString((double) milliseconds / imp.getStackSize()));
}

From source file:org.kurento.test.grid.GridHandler.java

public void runParallel(List<GridNode> nodeList, Runnable myFunc)
        throws InterruptedException, ExecutionException {
    ExecutorService exec = Executors.newFixedThreadPool(nodes.size());
    List<Future<?>> results = new ArrayList<>();
    for (int i = 0; i < nodes.size(); i++) {
        results.add(exec.submit(myFunc));
    }/*from   w w w  . jav  a2s .  c  o  m*/
    for (Future<?> r : results) {
        r.get();
    }
}

From source file:com.turn.ttorrent.common.Torrent.java

private static String hashFiles(List<File> files, int pieceLenght)
        throws InterruptedException, IOException, NoSuchAlgorithmException {
    int threads = getHashingThreadsCount();
    ExecutorService executor = Executors.newFixedThreadPool(threads);
    ByteBuffer buffer = ByteBuffer.allocate(pieceLenght);
    List<Future<String>> results = new LinkedList<Future<String>>();
    StringBuilder hashes = new StringBuilder();

    long length = 0L;
    int pieces = 0;

    long start = System.nanoTime();
    for (File file : files) {
        logger.info("Hashing data from {} with {} threads ({} pieces)...", new Object[] { file.getName(),
                threads, (int) (Math.ceil((double) file.length() / pieceLenght)) });

        length += file.length();//from   w  w w .  j a  va 2  s  .c  o  m

        FileInputStream fis = new FileInputStream(file);
        FileChannel channel = fis.getChannel();
        int step = 10;

        try {
            while (channel.read(buffer) > 0) {
                if (buffer.remaining() == 0) {
                    buffer.clear();
                    results.add(executor.submit(new CallableChunkHasher(buffer)));
                }

                if (results.size() >= threads) {
                    pieces += accumulateHashes(hashes, results);
                }

                if (channel.position() / (double) channel.size() * 100f > step) {
                    logger.info("  ... {}% complete", step);
                    step += 10;
                }
            }
        } finally {
            channel.close();
            fis.close();
        }
    }

    // Hash the last bit, if any
    if (buffer.position() > 0) {
        buffer.limit(buffer.position());
        buffer.position(0);
        results.add(executor.submit(new CallableChunkHasher(buffer)));
    }

    pieces += accumulateHashes(hashes, results);

    // Request orderly executor shutdown and wait for hashing tasks to
    // complete.
    executor.shutdown();
    while (!executor.isTerminated()) {
        Thread.sleep(10);
    }
    long elapsed = System.nanoTime() - start;

    int expectedPieces = (int) (Math.ceil((double) length / pieceLenght));
    logger.info("Hashed {} file(s) ({} bytes) in {} pieces ({} expected) in {}ms.", new Object[] { files.size(),
            length, pieces, expectedPieces, String.format("%.1f", elapsed / 1e6), });

    return hashes.toString();
}

From source file:com.googlecode.icegem.cacheutils.regioncomparator.CompareTool.java

public void execute(String[] args, boolean debugEnabled, boolean quiet) {
    AdminDistributedSystem adminDs = AdminDistributedSystemFactory
            .getDistributedSystem(AdminDistributedSystemFactory.defineDistributedSystem());
    adminDs.connect();/*from ww  w.j  av  a  2s.  c  o  m*/

    parseCommandLineArguments(args);

    List<Pool> poolList = new ArrayList<Pool>();
    if (serversOption != null && serversOption.length() > 0)
        for (String serverOption : serversOption.split(",")) {
            String serverHost = serverOption.substring(0, serverOption.indexOf("["));
            String serverPort = serverOption.substring(serverOption.indexOf("[") + 1,
                    serverOption.indexOf("]"));
            poolList.add(PoolManager.createFactory().addServer(serverHost, Integer.parseInt(serverPort))
                    .create("poolTo" + serverHost + serverPort));
        }
    if (locatorsProperties != null && !locatorsProperties.isEmpty())
        for (Object poolOption : locatorsProperties.keySet()) {
            String locator = (String) locatorsProperties.get(poolOption);
            String serverHost = locator.substring(0, locator.indexOf("["));
            String serverPort = locator.substring(locator.indexOf("[") + 1, locator.indexOf("]"));
            poolList.add(PoolManager.createFactory().addLocator(serverHost, Integer.parseInt(serverPort)) //todo: check when we have two identical locators options: exception a pool name already exist
                    .create("poolTo" + serverHost + serverPort));
        }

    //todo: insert checking that each cluster contains region and one's type is equal (Partitioned, Replicated)

    boolean partitioned = false; //todo: insert CLI usage  + throw exception if real region has another type

    List<ServerLocation> serverFromPool = new ArrayList<ServerLocation>();
    List<Pool> emptyPools = new ArrayList<Pool>(); //contains pool with no available servers
    for (Pool pool : poolList) {
        List<ServerLocation> allServers = null;
        if (!pool.getLocators().isEmpty())
            allServers = ((AutoConnectionSourceImpl) ((PoolImpl) pool).getConnectionSource()).findAllServers(); //todo: ConnectionError if locator doesn't exist
        else if (!pool.getServers().isEmpty())
            allServers = Arrays
                    .asList((((PoolImpl) pool).getConnectionSource()).findServer(Collections.emptySet()));

        if (allServers != null)
            serverFromPool.addAll(allServers);
        else {
            log.info("not found servers on locator {}", pool);
            emptyPools.add(pool);
        }
    }
    poolList.removeAll(emptyPools);

    if (serverFromPool.size() == 0) {
        log.info("no servers available");
        return;
    }

    printServerLocationDetails(serverFromPool);

    //source for comparison //todo: if this node doesn't contain region! it's problem
    Pool sourcePool;
    if (!partitioned) {
        int randomServerLocation = new Random().nextInt(serverFromPool.size());
        sourcePool = PoolManager.createFactory()
                .addServer(serverFromPool.get(randomServerLocation).getHostName(),
                        serverFromPool.get(randomServerLocation).getPort())
                .create("target");
    } else {
        sourcePool = poolList.get(0);
        poolList.remove(0);
    }

    FunctionService.registerFunction(new RegionInfoFunction());
    ResultCollector regionInfoResult = FunctionService.onServers(sourcePool).withArgs(regionName)
            .execute(new RegionInfoFunction());

    Map regionInfo = (HashMap) ((ArrayList) regionInfoResult.getResult()).get(0);
    System.out.println("region info: " + regionInfo);

    int totalNumBuckets = (Integer) regionInfo.get("totalNumBuckets");
    //log.debug("total keys' batch counts is ", totalNumBuckets);
    System.out.println("total keys' batch counts is " + totalNumBuckets);
    KeyExtractor keyExtractor = new KeyExtractor(regionName, sourcePool, partitioned, totalNumBuckets);

    Map<String, Map<String, Set>> clusterDifference = new HashMap<String, Map<String, Set>>(); //key: memeberId list: absent keys, diff values

    List<PoolResult> taskResults = new ArrayList<PoolResult>();
    List<Future<PoolResult>> collectTasks = new ArrayList<Future<PoolResult>>(poolList.size());
    ExecutorService executorService = Executors.newFixedThreadPool(poolList.size());
    while (keyExtractor.hasKeys()) {
        Set keys = keyExtractor.getNextKeysBatch();
        System.out.println("keys to check: " + keys);
        for (Pool nextPool : poolList)
            collectTasks.add(executorService.submit(new CollectorTask(keys, nextPool, regionName)));
        System.out.println("active tasks: " + collectTasks.size());
        try {
            //for (Future<ResultCollector> futureTask : collectTasks) {
            for (Future<PoolResult> futureTask : collectTasks) {
                taskResults.add(futureTask.get());
            }
        } catch (InterruptedException ie) {
            ie.printStackTrace();
        } catch (ExecutionException ee) {
            ee.printStackTrace();
        }
        collectTasks.clear();

        System.out.println("compare contents..");
        //getting source contents
        Map sourceData = new HashMap();

        //getting source map
        FutureTask<PoolResult> ft = new FutureTask<PoolResult>(new CollectorTask(keys, sourcePool, regionName));
        ft.run();
        try {
            PoolResult rc = ft.get();
            List poolResult = (List) rc.getResultCollector().getResult();
            for (Object singleResult : poolResult) {
                sourceData.putAll((Map) ((HashMap) singleResult).get("map"));
            }
        } catch (Exception e) {
            throw new RuntimeException("error getting key-hash from pool: " + sourcePool, e);
        }
        //todo: aggregate members' data from one cluster

        System.out.println("source data is: " + sourceData);
        //for (ResultCollector taskResultFromPool : taskResults) {
        for (PoolResult taskResultFromPool : taskResults) {
            List poolResult = (ArrayList) taskResultFromPool.getResultCollector().getResult();
            if (!partitioned) {
                for (Object resultFromMember : poolResult) {
                    Map result = (HashMap) resultFromMember;
                    String memberId = (String) result.get("memberId");
                    if (regionInfo.get("id").equals(result.get("memberId"))) //for replicated region
                        continue;
                    Map<String, Set> aggregationInfo = compareAndAggregate(sourceData,
                            (HashMap) result.get("map"));
                    System.out.println("result of comparing is: " + aggregationInfo);
                    if (!clusterDifference.containsKey(memberId)) {
                        aggregationInfo.put("absentKeys", new HashSet());
                        clusterDifference.put(memberId, aggregationInfo);
                    } else {
                        Map<String, Set> difference = clusterDifference.get(memberId);
                        difference.get("absentKeys").addAll((Set) result.get("absentKeys"));
                        difference.get("diffValues").addAll(aggregationInfo.get("diffValues"));
                        clusterDifference.put(memberId, difference);
                    }
                }
            } else {
                Map targetData = new HashMap();
                Set absentKeysFromPool = new HashSet();

                //aggregate data from different members with partition region
                for (Object resultFromMember : poolResult) {
                    targetData.putAll((Map) ((HashMap) resultFromMember).get("map"));
                    absentKeysFromPool.addAll((Set) ((HashMap) resultFromMember).get("absentKeys"));
                }

                Map<String, Set> aggregationInfo = compareAndAggregate(sourceData, targetData);
                System.out.println("result of comparing is: " + aggregationInfo);
                String keyForPartitionRegionType = taskResultFromPool.getPool().toString();
                if (!clusterDifference.containsKey(keyForPartitionRegionType)) {
                    clusterDifference.put(keyForPartitionRegionType, aggregationInfo);
                } else {
                    Map<String, Set> difference = clusterDifference.get(keyForPartitionRegionType);
                    difference.get("absentKeys").addAll(aggregationInfo.get("absentKeys"));
                    difference.get("diffValues").addAll(aggregationInfo.get("diffValues"));
                    clusterDifference.put(keyForPartitionRegionType, difference);
                }
            }
        }

        taskResults.clear();
    }

    System.out.println("____________________________");
    System.out.println("difference: ");
    System.out.println(clusterDifference);
    executorService.shutdown();
    adminDs.disconnect();
}