Example usage for java.util.concurrent ExecutorService submit

List of usage examples for java.util.concurrent ExecutorService submit

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService submit.

Prototype

Future<?> submit(Runnable task);

Source Link

Document

Submits a Runnable task for execution and returns a Future representing that task.

Usage

From source file:com.espertech.esper.multithread.dispatchmodel.TestMTDispatch.java

private void trySend(int numThreads, int numCount, int ratioDoubleAdd,
        UpdateDispatchViewModel updateDispatchView, DispatchService dispatchService) throws Exception {
    // execute/*from w  ww  .  ja v a2 s . c o  m*/
    ExecutorService threadPool = Executors.newFixedThreadPool(numThreads);
    Future future[] = new Future[numThreads];
    DispatchCallable callables[] = new DispatchCallable[numThreads];
    DispatchProducer producer = new DispatchProducer(updateDispatchView);
    for (int i = 0; i < numThreads; i++) {
        callables[i] = new DispatchCallable(producer, i, numCount, ratioDoubleAdd, updateDispatchView,
                dispatchService);
        future[i] = threadPool.submit(callables[i]);
    }

    threadPool.shutdown();
    threadPool.awaitTermination(10, TimeUnit.SECONDS);

    for (int i = 0; i < numThreads; i++) {
        assertTrue((Boolean) future[i].get());
    }
}

From source file:fr.inria.lille.repair.nopol.NoPol.java

/**
 * Method used as proxy for runNopolProcessor to handle timeout
 *///from   w  w w .j ava 2  s . co m
private List<Patch> executeNopolProcessor(final List<TestResult> tests, final SourceLocation sourceLocation,
        final SpoonedClass spoonCl, final NopolProcessor nopolProcessor) {
    final ExecutorService executor = Executors.newSingleThreadExecutor();
    final Future nopolExecution = executor.submit(new Callable() {
        @Override
        public Object call() throws Exception {
            return runNopolProcessor(tests, sourceLocation, spoonCl, nopolProcessor);
        }
    });
    try {
        executor.shutdown();
        return (List) nopolExecution.get(nopolContext.getMaxTimeEachTypeOfFixInMinutes(), TimeUnit.MINUTES);
    } catch (ExecutionException exception) {
        LoggerFactory.getLogger(this.getClass()).error("Error ExecutionException " + exception.toString());
        return Collections.emptyList();
    } catch (InterruptedException execption) {
        LoggerFactory.getLogger(this.getClass()).error("Repair interrupted");
        return Collections.emptyList();
    } catch (TimeoutException exception) {
        LoggerFactory.getLogger(this.getClass()).error("Timeout: execution time > "
                + nopolContext.getMaxTimeEachTypeOfFixInMinutes() + " " + TimeUnit.MINUTES, exception);
        return Collections.emptyList();
    }
}

From source file:com.github.kubernetes.java.client.live.KubernetesApiClientLiveTest.java

@Test
public void testCreateReplicationController() throws Exception {
    if (log.isDebugEnabled()) {
        log.debug("Creating a Replication Controller: " + contr);
    }//from   w w w  . jav  a2 s . com
    getClient().createReplicationController(contr);
    assertNotNull(getClient().getReplicationController(contr.getId()));

    ExecutorService executor = Executors.newSingleThreadExecutor();
    Future<PodList> future = executor.submit(new Callable<PodList>() {
        public PodList call() throws Exception {
            PodList pods;
            do {
                log.info("Waiting for Pods to be ready");
                Thread.sleep(1000);
                pods = getClient()
                        .getSelectedPods(ImmutableMap.of("name", "kubernetes-test-controller-selector"));
                if (pods.isEmpty()) {
                    continue;
                }

                StateInfo info = pods.get(0).getCurrentState().getInfo("kubernetes-test");
                if ((info != null) && info.getState("waiting") != null) {
                    throw new RuntimeException("Pod is waiting due to " + info.getState("waiting"));
                }
            } while (pods.isEmpty() || !FluentIterable.from(pods).allMatch(new Predicate<Pod>() {
                public boolean apply(Pod pod) {
                    return "Running".equals(pod.getCurrentState().getStatus());
                }
            }));
            return pods;
        }
    });

    PodList pods;
    try {
        pods = future.get(90, TimeUnit.SECONDS);
    } finally {
        executor.shutdownNow();
    }
    for (Pod pod : pods) {
        assertNotNull(pod.getCurrentState().getInfo("kubernetes-test").getState("running"));
        assertNotNull(pod.getCurrentState().getNetInfo().getState("running"));
    }

    // test recreation using same id
    try {
        getClient().createReplicationController(contr);
        fail("Should have thrown exception");
    } catch (Exception e) {
        // ignore
    }
    assertNotNull(getClient().getReplicationController(contr.getId()));

    PodList podList = getClient().getSelectedPods(contr.getLabels());
    assertNotNull(podList);
    assertNotNull(podList.getItems());
    assertEquals(contr.getDesiredState().getReplicas(), podList.getItems().size());
}

From source file:com.chess.genesis.net.SyncClient.java

private void sync_recent(final JSONObject json) {
    try {//from  ww w  .  j  av a 2s. com
        final JSONArray ids = json.getJSONArray("gameids");
        final ExecutorService pool = Executors.newCachedThreadPool();

        for (int i = 0, len = ids.length(); i < len; i++) {
            if (error)
                return;
            final NetworkClient nc = new NetworkClient(context, handle);
            nc.game_status(ids.getString(i));
            pool.submit(nc);

            lock++;
        }
        // Save sync time
        final PrefEdit pref = new PrefEdit(context);
        pref.putLong(R.array.pf_lastgamesync, json.getLong("time"));
        pref.commit();
    } catch (final JSONException e) {
        throw new RuntimeException(e.getMessage(), e);
    }
}

From source file:ok.MyService2.java

@Override
protected Task<BlockingQueue> createTask() {
    final Task<BlockingQueue> task;
    task = new Task<BlockingQueue>() {

        @Override//from  w ww  . j  av  a2 s .c  o m
        protected BlockingQueue call() throws Exception {
            BlockingQueue result = new LinkedBlockingQueue<String>();

            PoolingHttpClientConnectionManager cm = new PoolingHttpClientConnectionManager();
            cm.setMaxTotal(100);

            CloseableHttpClient httpclient = HttpClients.custom().setConnectionManager(cm).build();
            try {
                ExecutorService executor = Executors.newFixedThreadPool(sites.size());
                List<Future<String>> results = new ArrayList<Future<String>>();
                for (int i = 0; i < sites.size(); i++) {
                    HttpGet httpget = new HttpGet(sites.get(i));
                    Callable worker = new MyCallable(httpclient, httpget);
                    Future<String> res = executor.submit(worker);
                    results.add(res);
                    // String url = hostList[i];
                    //   Runnable worker = new MyRunnable(url);
                    //   executor.execute(worker);
                    //   executor.submit(null);

                }
                executor.shutdown();
                // Wait until all threads are finish
                //                   while (!executor.isTerminated()) {
                //
                //                   }
                for (Future<String> element : results) {
                    result.add(element.get());
                }
                System.out.println("\nFinished all threads");

            } finally {
                httpclient.close();
            }
            return result;
        }

    };
    return task;
}

From source file:com.jivesoftware.os.amza.service.storage.binary.BinaryRowReaderWriterTest.java

@Test(enabled = false)
public void testConcurrency() throws Exception {
    MemoryBackedWALFiler walFiler = new MemoryBackedWALFiler(
            new MultiAutoGrowingByteBufferBackedFiler(32, 1_024 * 1_024, new HeapByteBufferFactory()));
    IoStats ioStats = new IoStats();
    BinaryRowReader binaryRowReader = new BinaryRowReader(walFiler);
    BinaryRowWriter binaryRowWriter = new BinaryRowWriter(walFiler);

    ExecutorService executors = Executors.newFixedThreadPool(9);
    AtomicBoolean running = new AtomicBoolean(true);
    AtomicLong scanned = new AtomicLong();
    List<Future<?>> futures = Lists.newArrayList();
    for (int i = 0; i < 8; i++) {
        futures.add(executors.submit(() -> {
            try {
                while (running.get()) {
                    binaryRowReader.scan(ioStats, 0, false, (rowFP, rowTxId, rowType, row) -> {
                        scanned.incrementAndGet();
                        return true;
                    });/*from   www . j a  v a2 s .  c om*/
                }
                return true;
            } catch (Throwable t) {
                t.printStackTrace();
                throw t;
            }
        }));
    }
    futures.add(executors.submit(() -> {
        try {
            for (int i = 0; i < 1_000_000; i++) {
                byte[] row = UIO.intBytes(i);
                binaryRowWriter.write(ioStats, i, RowType.primary, 1, 16, stream -> stream.stream(row),
                        stream -> true,
                        (txId, prefix, key, value, valueTimestamp, valueTombstoned, valueVersion, fp) -> true,
                        false, false);
                if (i % 10_000 == 0) {
                    System.out.println("Finished i:" + i + " scanned:" + scanned.get());
                }
            }
        } finally {
            running.set(false);
        }
        return null;
    }));

    for (Future<?> future : futures) {
        future.get();
    }
}

From source file:com.netflix.priam.resources.BackupServlet.java

/**
 * Convert SSTable2Json and search for given key
 *//*  w  w  w  .  j  a  v  a2s .c o  m*/
public void checkSSTablesForKey(String rowkey, String keyspace, String cf, String fileExtension,
        String jsonFilePath) throws Exception {
    try {
        logger.info("Starting SSTable2Json conversion ...");
        //Setting timeout to 10 Mins
        long TIMEOUT_PERIOD = 10l;
        String unixCmd = formulateCommandToRun(rowkey, keyspace, cf, fileExtension, jsonFilePath);

        String[] cmd = { "/bin/sh", "-c", unixCmd.toString() };
        final Process p = Runtime.getRuntime().exec(cmd);

        Callable<Integer> callable = new Callable<Integer>() {
            @Override
            public Integer call() throws Exception {
                int returnCode = p.waitFor();
                return returnCode;
            }
        };

        ExecutorService exeService = Executors.newSingleThreadExecutor();
        try {
            Future<Integer> future = exeService.submit(callable);
            int returnVal = future.get(TIMEOUT_PERIOD, TimeUnit.MINUTES);
            if (returnVal == 0)
                logger.info("Finished SSTable2Json conversion and search.");
            else
                logger.error("Error occurred during SSTable2Json conversion and search.");
        } catch (TimeoutException e) {
            logger.error(ExceptionUtils.getFullStackTrace(e));
            throw e;
        } finally {
            p.destroy();
            exeService.shutdown();
        }

    } catch (IOException e) {
        logger.error(ExceptionUtils.getFullStackTrace(e));
    }
}

From source file:net.kungfoo.grizzly.proxy.impl.ProxyAdapter.java

/**
 * {@inheritDoc}//w w w.j av a2  s. co m
 */
public void service(Request request, Response response) throws Exception {
    String uri = request.unparsedURI().toString();

    final MessageBytes method = request.method();
    logURIAndMethod(uri, method);

    if (maxForwards(request, response, method))
        return;

    String targetHost = request.serverName().toString();
    int targetPort = request.getServerPort();

    ProxyProcessingInfo proxyTask = new ProxyProcessingInfo();

    // TODO: think of it.
    synchronized (proxyTask) {

        // from connected

        // Initialize connection state
        proxyTask.setTarget(new HttpHost(targetHost, targetPort));
        proxyTask.setRequest(convert(method.getString(), uri, request));
        proxyTask.setOriginalRequest(request);
        Runnable completion = (Runnable) request.getAttribute(CALLBACK_KEY);
        proxyTask.setCompletion(completion);
        proxyTask.setResponse(response);

        InetSocketAddress address = new InetSocketAddress(targetHost, targetPort);

        if (!IOReactorStatus.ACTIVE.equals(connectingIOReactor.getStatus())) {
            System.err.println("Connecting reactor not running.");
            response.setStatus(500);
            response.setMessage("Internal Booo");
            // complete request.
            ExecutorService executorService = Executors.newFixedThreadPool(1, new ThreadFactory() {
                @Override
                public Thread newThread(Runnable r) {
                    return new Thread(r, "EmergencyService"); //To change body of implemented methods use File | Settings | File Templates.
                }
            });
            executorService.submit(completion);
            return;
        } else {
            connectingIOReactor.connect(address, null, proxyTask, null);
        }

        // from requestReceived
        try {
            System.out.println(request + " [client->proxy] >> " + request.unparsedURI().toString());

            // Update connection state
            proxyTask.setClientState(ConnState.REQUEST_RECEIVED);

            if (request.getContentLength() != 0) {
                proxyTask.setClientState(ConnState.REQUEST_BODY_DONE);
            }
            // See if the client expects a 100-Continue
            if (isExpectContinue(request)) {
                response.setStatus(HttpStatus.SC_CONTINUE);
                response.sendHeaders();
            }
        } catch (IOException ignore) {
            System.out.println("err " + ignore.getMessage());
        }
    }

    // handle "Via", TODO: should go after we have headers from target server.
    response.setHeader(Via.name(), request.protocol() + " antares");// TODO hostname, and Via from response

}

From source file:com.splicemachine.stream.StreamableRDDTest.java

@Test
public void testConcurrentQueries() throws StandardException, ExecutionException, InterruptedException {
    final StreamListener<ExecRow> sl1 = new StreamListener<>();
    final StreamListener<ExecRow> sl2 = new StreamListener<>();
    final StreamListener<ExecRow> sl3 = new StreamListener<>();
    HostAndPort hostAndPort = server.getHostAndPort();
    server.register(sl1);// w  w w  . ja  v  a  2  s  .  co  m
    server.register(sl2);
    server.register(sl3);

    List<Tuple2<ExecRow, ExecRow>> manyRows = new ArrayList<>();
    for (int i = 0; i < 100000; ++i) {
        manyRows.add(new Tuple2<ExecRow, ExecRow>(getExecRow(i, 1), getExecRow(i, 2)));
    }

    JavaPairRDD<ExecRow, ExecRow> rdd = SpliceSpark.getContext().parallelizePairs(manyRows, 12);
    final StreamableRDD srdd1 = new StreamableRDD(rdd.values(), sl1.getUuid(), hostAndPort.getHostText(),
            hostAndPort.getPort());
    final StreamableRDD srdd2 = new StreamableRDD(rdd.values().map(new Function<ExecRow, ExecRow>() {
        @Override
        public ExecRow call(ExecRow o) throws Exception {
            o.getColumn(1).setValue(0);
            return o;
        }
    }), sl2.getUuid(), hostAndPort.getHostText(), hostAndPort.getPort());
    final StreamableRDD srdd3 = new StreamableRDD(rdd.values(), sl3.getUuid(), hostAndPort.getHostText(),
            hostAndPort.getPort());
    for (final StreamableRDD srdd : Arrays.asList(srdd1, srdd2, srdd3)) {
        new Thread() {
            @Override
            public void run() {
                try {
                    srdd.submit();
                } catch (Exception e) {
                    throw new RuntimeException(e);
                }

            }
        }.start();
    }
    // We collect them asynchronously into memory so we are able to iterate over them at the same time. Otherwise
    // tasks for the third RDD might be blocked by tasks in other RDDs, and we are not consuming elements from the
    // other iterators so they can become unblocked.
    ExecutorService executor = Executors.newFixedThreadPool(3);
    Future<List<ExecRow>> future1 = executor.submit(new Callable<List<ExecRow>>() {
        @Override
        public List<ExecRow> call() throws Exception {
            return IteratorUtils.toList(sl1.getIterator());
        }
    });
    Future<List<ExecRow>> future2 = executor.submit(new Callable<List<ExecRow>>() {
        @Override
        public List<ExecRow> call() throws Exception {
            return IteratorUtils.toList(sl2.getIterator());
        }
    });
    Future<List<ExecRow>> future3 = executor.submit(new Callable<List<ExecRow>>() {
        @Override
        public List<ExecRow> call() throws Exception {
            return IteratorUtils.toList(sl3.getIterator());
        }
    });
    Iterator<ExecRow> it1 = future1.get().iterator();
    Iterator<ExecRow> it2 = future2.get().iterator();
    Iterator<ExecRow> it3 = future3.get().iterator();
    int count = 0;
    while (it1.hasNext()) {
        ExecRow r1 = it1.next();
        ExecRow r2 = it2.next();
        ExecRow r3 = it3.next();
        count++;
        assertNotNull(r1);
        assertNotNull(r2);
        assertNotNull(r3);
        assertEquals(0, r2.getColumn(1).getInt());
        assertEquals(r1.getColumn(1), r3.getColumn(1));
        assertEquals(r1.getColumn(2), r2.getColumn(2));
    }
    assertEquals(100000, count);
}

From source file:com.splout.db.integration.TestMultiThreadedQueryAndDeploy.java

@Test
@Ignore // Causes some non-deterministic problems, to be analyzed
public void test() throws Throwable {
    FileUtils.deleteDirectory(new File(TMP_FOLDER));
    new File(TMP_FOLDER).mkdirs();

    createSploutEnsemble(N_QNODES, N_DNODES);
    String[] qNodeAddresses = new String[N_QNODES];
    for (int i = 0; i < N_QNODES; i++) {
        qNodeAddresses[i] = getqNodes().get(i).getAddress();
    }//  w  ww .jav a 2 s . co  m

    final SploutClient client = new SploutClient(qNodeAddresses);
    final Tablespace testTablespace = createTestTablespace(N_DNODES);
    final Random random = new Random(SEED);
    final AtomicBoolean failed = new AtomicBoolean(false);
    final AtomicInteger iteration = new AtomicInteger(0);
    final Set<Integer> iterationsSeen = new HashSet<Integer>();

    deployIteration(0, random, client, testTablespace);

    for (QNode qnode : getqNodes()) {
        // Make sure all QNodes are aware of the the first deploy
        // There might be some delay as they have to receive notifications via Hazelcast etc
        long waitedSoFar = 0;
        QueryStatus status = null;
        SploutClient perQNodeClient = new SploutClient(qnode.getAddress());
        do {
            status = perQNodeClient.query(TABLESPACE, "0", "SELECT * FROM " + TABLE + ";", null);
            Thread.sleep(100);
            waitedSoFar += 100;
            if (waitedSoFar > 5000) {
                throw new AssertionError("Waiting too much on a test condition");
            }
        } while (status == null || status.getError() != null);
        log.info("QNode [" + qnode.getAddress() + "] is ready to serve deploy 0.");
    }

    try {
        // Business logic here
        ExecutorService service = Executors.newFixedThreadPool(N_THREADS);

        // These threads will continuously perform queries and check that the results is consistent.
        // They will also count how many deploys have happened since the beginning.
        for (int i = 0; i < N_THREADS; i++) {
            service.submit(new Runnable() {
                @Override
                public void run() {
                    try {
                        while (true) {
                            int randomDNode = Math.abs(random.nextInt()) % N_DNODES;
                            QueryStatus status = client.query(TABLESPACE, (randomDNode * 10) + "",
                                    "SELECT * FROM " + TABLE + ";", null);
                            log.info("Query status -> " + status);
                            assertEquals(1, status.getResult().size());
                            Map<String, Object> jsonResult = (Map<String, Object>) status.getResult().get(0);
                            Integer seenIteration = (Integer) jsonResult.get("iteration");
                            synchronized (iterationsSeen) {
                                iterationsSeen.add(seenIteration);
                            }
                            assertTrue(seenIteration <= iteration.get());
                            assertEquals(randomDNode, jsonResult.get("dnode"));
                            Thread.sleep(100);
                        }
                    } catch (InterruptedException ie) {
                        // Bye bye
                        log.info("Bye bye!");
                    } catch (Throwable e) {
                        e.printStackTrace();
                        failed.set(true);
                    }
                }
            });
        }

        final SploutConfiguration config = SploutConfiguration.getTestConfig();
        final int iterationsToPerform = config.getInt(QNodeProperties.VERSIONS_PER_TABLESPACE) + 5;
        for (int i = 0; i < iterationsToPerform; i++) {
            iteration.incrementAndGet();
            log.info("Deploy iteration: " + iteration.get());
            deployIteration(iteration.get(), random, client, testTablespace);

            new TestUtils.NotWaitingForeverCondition() {
                @Override
                public boolean endCondition() {
                    synchronized (iterationsSeen) {
                        return iterationsSeen.size() == (iteration.get() + 1);
                    }
                }
            }.waitAtMost(5000);
        }

        assertEquals(false, failed.get());

        service.shutdownNow(); // will interrupt all threads
        while (!service.isTerminated()) {
            Thread.sleep(100);
        }

        CoordinationStructures coord = TestUtils.getCoordinationStructures(config);
        assertNotNull(coord.getCopyVersionsBeingServed().get(TABLESPACE));

        // Assert that there is only MAX_VERSIONS versions of the tablespace (due to old version cleanup)
        new TestUtils.NotWaitingForeverCondition() {

            @Override
            public boolean endCondition() {
                QNodeHandler handler = (QNodeHandler) qNodes.get(0).getHandler();
                int seenVersions = 0;
                for (Map.Entry<TablespaceVersion, Tablespace> tablespaceVersion : handler.getContext()
                        .getTablespaceVersionsMap().entrySet()) {
                    if (tablespaceVersion.getKey().getTablespace().equals(TABLESPACE)) {
                        seenVersions++;
                    }
                }
                return seenVersions <= config.getInt(QNodeProperties.VERSIONS_PER_TABLESPACE);
            }
        }.waitAtMost(5000);
    } finally {
        closeSploutEnsemble();
        FileUtils.deleteDirectory(new File(TMP_FOLDER));
    }
}