Example usage for java.util.concurrent Executors newFixedThreadPool

List of usage examples for java.util.concurrent Executors newFixedThreadPool

Introduction

In this page you can find the example usage for java.util.concurrent Executors newFixedThreadPool.

Prototype

public static ExecutorService newFixedThreadPool(int nThreads) 

Source Link

Document

Creates a thread pool that reuses a fixed number of threads operating off a shared unbounded queue.

Usage

From source file:com.damon.rocketmq.example.benchmark.Producer.java

public static void main(String[] args) throws MQClientException, UnsupportedEncodingException {

    Options options = ServerUtil.buildCommandlineOptions(new Options());
    CommandLine commandLine = ServerUtil.parseCmdLine("benchmarkProducer", args,
            buildCommandlineOptions(options), new PosixParser());
    if (null == commandLine) {
        System.exit(-1);//from  w w  w.ja v  a  2  s .  c  o  m
    }

    final String topic = commandLine.hasOption('t') ? commandLine.getOptionValue('t').trim() : "BenchmarkTest";
    final int threadCount = commandLine.hasOption('w') ? Integer.parseInt(commandLine.getOptionValue('w')) : 64;
    final int messageSize = commandLine.hasOption('s') ? Integer.parseInt(commandLine.getOptionValue('s'))
            : 128;
    final boolean keyEnable = commandLine.hasOption('k')
            && Boolean.parseBoolean(commandLine.getOptionValue('k'));

    System.out.printf("topic %s threadCount %d messageSize %d keyEnable %s%n", topic, threadCount, messageSize,
            keyEnable);

    final Logger log = ClientLogger.getLog();

    final Message msg = buildMessage(messageSize, topic);

    final ExecutorService sendThreadPool = Executors.newFixedThreadPool(threadCount);

    final StatsBenchmarkProducer statsBenchmark = new StatsBenchmarkProducer();

    final Timer timer = new Timer("BenchmarkTimerThread", true);

    final LinkedList<Long[]> snapshotList = new LinkedList<Long[]>();

    timer.scheduleAtFixedRate(new TimerTask() {
        @Override
        public void run() {
            snapshotList.addLast(statsBenchmark.createSnapshot());
            if (snapshotList.size() > 10) {
                snapshotList.removeFirst();
            }
        }
    }, 1000, 1000);

    timer.scheduleAtFixedRate(new TimerTask() {
        private void printStats() {
            if (snapshotList.size() >= 10) {
                Long[] begin = snapshotList.getFirst();
                Long[] end = snapshotList.getLast();

                final long sendTps = (long) (((end[3] - begin[3]) / (double) (end[0] - begin[0])) * 1000L);
                final double averageRT = (end[5] - begin[5]) / (double) (end[3] - begin[3]);

                System.out.printf(
                        "Send TPS: %d Max RT: %d Average RT: %7.3f Send Failed: %d Response Failed: %d%n",
                        sendTps, statsBenchmark.getSendMessageMaxRT().get(), averageRT, end[2], end[4]);
            }
        }

        @Override
        public void run() {
            try {
                this.printStats();
            } catch (Exception e) {
                e.printStackTrace();
            }
        }
    }, 10000, 10000);

    final DefaultMQProducer producer = new DefaultMQProducer("benchmark_producer");
    producer.setInstanceName(Long.toString(System.currentTimeMillis()));

    if (commandLine.hasOption('n')) {
        String ns = commandLine.getOptionValue('n');
        producer.setNamesrvAddr(ns);
    }

    producer.setCompressMsgBodyOverHowmuch(Integer.MAX_VALUE);

    producer.start();

    for (int i = 0; i < threadCount; i++) {
        sendThreadPool.execute(new Runnable() {
            @Override
            public void run() {
                while (true) {
                    try {
                        final long beginTimestamp = System.currentTimeMillis();
                        if (keyEnable) {
                            msg.setKeys(String.valueOf(beginTimestamp / 1000));
                        }
                        producer.send(msg);
                        statsBenchmark.getSendRequestSuccessCount().incrementAndGet();
                        statsBenchmark.getReceiveResponseSuccessCount().incrementAndGet();
                        final long currentRT = System.currentTimeMillis() - beginTimestamp;
                        statsBenchmark.getSendMessageSuccessTimeTotal().addAndGet(currentRT);
                        long prevMaxRT = statsBenchmark.getSendMessageMaxRT().get();
                        while (currentRT > prevMaxRT) {
                            boolean updated = statsBenchmark.getSendMessageMaxRT().compareAndSet(prevMaxRT,
                                    currentRT);
                            if (updated)
                                break;

                            prevMaxRT = statsBenchmark.getSendMessageMaxRT().get();
                        }
                    } catch (RemotingException e) {
                        statsBenchmark.getSendRequestFailedCount().incrementAndGet();
                        log.error("[BENCHMARK_PRODUCER] Send Exception", e);

                        try {
                            Thread.sleep(3000);
                        } catch (InterruptedException ignored) {
                        }
                    } catch (InterruptedException e) {
                        statsBenchmark.getSendRequestFailedCount().incrementAndGet();
                        try {
                            Thread.sleep(3000);
                        } catch (InterruptedException e1) {
                        }
                    } catch (MQClientException e) {
                        statsBenchmark.getSendRequestFailedCount().incrementAndGet();
                        log.error("[BENCHMARK_PRODUCER] Send Exception", e);
                    } catch (MQBrokerException e) {
                        statsBenchmark.getReceiveResponseFailedCount().incrementAndGet();
                        log.error("[BENCHMARK_PRODUCER] Send Exception", e);
                        try {
                            Thread.sleep(3000);
                        } catch (InterruptedException ignored) {
                        }
                    }
                }
            }
        });
    }
}

From source file:accumulo.AccumuloStuff.java

public static void main(String[] args) throws Exception {
    File tmp = new File(System.getProperty("user.dir") + "/target/mac-test");
    if (tmp.exists()) {
        FileUtils.deleteDirectory(tmp);/*from   w w w  .  j  av a 2 s . c  o  m*/
    }
    tmp.mkdirs();
    String passwd = "password";

    MiniAccumuloConfigImpl cfg = new MiniAccumuloConfigImpl(tmp, passwd);
    cfg.setNumTservers(1);
    //    cfg.useMiniDFS(true);

    final MiniAccumuloClusterImpl cluster = cfg.build();
    setCoreSite(cluster);
    cluster.start();

    ExecutorService svc = Executors.newFixedThreadPool(2);

    try {
        Connector conn = cluster.getConnector("root", passwd);
        String table = "table";
        conn.tableOperations().create(table);

        final BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
        final AtomicBoolean flushed = new AtomicBoolean(false);

        Runnable writer = new Runnable() {
            @Override
            public void run() {
                try {
                    Mutation m = new Mutation("row");
                    m.put("colf", "colq", "value");
                    bw.addMutation(m);
                    bw.flush();
                    flushed.set(true);
                } catch (Exception e) {
                    log.error("Got exception trying to flush mutation", e);
                }

                log.info("Exiting batchwriter thread");
            }
        };

        Runnable restarter = new Runnable() {
            @Override
            public void run() {
                try {
                    for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
                        cluster.killProcess(ServerType.TABLET_SERVER, proc);
                    }
                    cluster.exec(TabletServer.class);
                } catch (Exception e) {
                    log.error("Caught exception restarting tabletserver", e);
                }
                log.info("Exiting restart thread");
            }
        };

        svc.execute(writer);
        svc.execute(restarter);

        log.info("Waiting for shutdown");
        svc.shutdown();
        if (!svc.awaitTermination(120, TimeUnit.SECONDS)) {
            log.info("Timeout on shutdown exceeded");
            svc.shutdownNow();
        } else {
            log.info("Cleanly shutdown");
            log.info("Threadpool is terminated? " + svc.isTerminated());
        }

        if (flushed.get()) {
            log.info("****** BatchWriter was flushed *********");
        } else {
            log.info("****** BatchWriter was NOT flushed *********");
        }

        bw.close();

        log.info("Got record {}", Iterables.getOnlyElement(conn.createScanner(table, Authorizations.EMPTY)));
    } finally {
        cluster.stop();
    }
}

From source file:com.rk.grid.federation.FederatedCluster.java

/**
 * @param args/*from  w w  w  .j  a va2  s  .  c  om*/
 * @throws Exception
 */
public static void main(String[] args) throws Exception {
    int port = Integer.parseInt(args[0]);
    String clusterName = args[1];
    String masterBrokerServiceName = args[2];
    int masterPort = Integer.parseInt(args[3]);
    String masterHost = args[4];

    IBroker<Object> masterBroker = null;
    for (int i = 0; i < 100; i++) {
        try {
            masterBroker = getConnection(masterBrokerServiceName, masterPort, masterHost);
            if (masterBroker != null)
                break;
        } catch (RemoteLookupFailureException e) {
            if (i % 100 == 0)
                System.out.println("Sleeping....");
        }
        Thread.sleep(100);
    }

    if (masterBroker == null)
        throw new RuntimeException("Unable to find master broker " + masterBrokerServiceName);

    BrokerInfo brokerInfo = masterBroker.getBrokerInfo();
    GridConfig gridConfig = brokerInfo.getConfig();
    List<String> jvmNodeParams = masterBroker.getBrokerInfo().getJvmNodeParams();
    GridExecutorService cluster = new GridExecutorService(port, jvmNodeParams, gridConfig, clusterName);
    cluster.getBroker().unPause();

    final TaskExecutor taskExecutor = new TaskExecutor(cluster);

    final IRemoteResultsHandler<Object> callback = masterBroker.getCallback();
    IWorkQueue<Object> workQueue = masterBroker.getWorkQueue();

    ExecutorService pool = Executors.newFixedThreadPool(3);

    masterBroker.unPause();

    while (!Thread.currentThread().isInterrupted()) {
        final IExecutable<?> executable = workQueue.take();

        if (executable == null)
            continue;

        if (executable.equals(IExecutable.POISON)) {
            break;
        }

        Callable<Object> callable = new Callable<Object>() {
            @Override
            public Object call() throws Exception {
                Future<ITaskResult<?>> future = taskExecutor.submit(executable);
                ITaskResult<?> iResult = future.get();

                String uid = executable.getUID();
                try {
                    callback.accept(new RemoteResult<Object>(iResult, uid));
                } catch (Throwable t) {
                    t.printStackTrace();
                    try {
                        callback.accept(new RemoteResult<Object>(
                                new RemoteExecutorException("Error execution remote task '" + uid + "'", t),
                                uid));
                    } catch (RemoteException e) {
                        throw new RuntimeException(e);
                    }
                }
                return null;
            }

        };

        pool.submit(callable);
    }
    pool.shutdown();
    taskExecutor.shutdown();
    System.out.println("Finished...!");
}

From source file:com.fjn.helper.frameworkex.apache.commons.pool.connectionPool.ConnectionManager.java

public static void main(String[] args) {
    final ConnectionManager mgr = new ConnectionManager();
    mgr.connFactory = new ConnectionFactory();
    mgr.connFactory.setDriverClass("com.mysql.jdbc.Driver");
    mgr.connFactory.setPassword("mysql");
    mgr.connFactory.setUsername("mysql");
    mgr.connFactory.setUrl("url:localhost:3306"); // ?URL

    mgr.initConnectionPool(1000, 50, 5, 1000 * 60);
    mgr.pool = mgr.connPoolFactory.createPool();

    final AtomicInteger count = new AtomicInteger(0);

    int threadNum = Runtime.getRuntime().availableProcessors();
    ExecutorService client = Executors.newFixedThreadPool(threadNum);
    for (int i = 0; i < threadNum; i++) {
        client.submit(new Runnable() {
            @Override//  w  w  w.  j  a  va  2 s. c o m
            public void run() {
                while (true && count.get() < 100) {
                    try {
                        Thread.sleep(500);
                    } catch (InterruptedException e1) {
                        e1.printStackTrace();
                    }
                    Connection connection = null;

                    try {
                        connection = (Connection) mgr.pool.borrowObject();
                        try {

                            int value = count.incrementAndGet();
                            if (value < 100) {
                                String threadName = Thread.currentThread().getName();

                                int activeNum = mgr.pool.getNumActive();
                                int idleNum = mgr.pool.getNumIdle();
                                String content = "ThreadName: " + threadName + "\t SQL: "
                                        + "insert into tableA ( ct ) values ('" + value + "'); \t activeNum="
                                        + activeNum + "\t idleNum=" + idleNum;
                                System.out.println(content);
                            }

                        } catch (Exception e) {
                            mgr.pool.invalidateObject(connection);
                            connection = null;
                        } finally {
                            // make sure the object is returned to the pool
                            if (null != connection) {
                                mgr.pool.returnObject(connection);
                            }
                        }
                    } catch (Exception e) {
                        // failed to borrow an object
                    }

                }
            }
        });
    }
}

From source file:ParallelizedMatrixProduct.java

public static void main(String args[]) throws Exception {

    System.setSecurityManager(new YesSecurityManager());

    double[][] matrix1 = new double[MATRIX_SIZE][MATRIX_SIZE];
    double[][] matrix2 = new double[MATRIX_SIZE][MATRIX_SIZE];

    for (int i = 0; i < MATRIX_SIZE; ++i)
        for (int j = 0; j < MATRIX_SIZE; ++j) {
            matrix1[i][j] = Math.round(Math.random() * MATRIX_ELEMENT_MAX_VALUE);
            matrix2[i][j] = Math.round(Math.random() * MATRIX_ELEMENT_MAX_VALUE);
        }/*  ww  w. ja  v  a 2s .c o m*/

    ExecutorService exec = Executors.newFixedThreadPool(THREAD_POOL_SIZE);
    Future<Double>[][] futures = new Future[MATRIX_SIZE][MATRIX_SIZE];
    for (int i = 0; i < MATRIX_SIZE; ++i) {
        for (int j = 0; j < MATRIX_SIZE; ++j) {
            final double[] v1 = getRow(matrix1, i);
            final double[] v2 = getColumn(matrix2, j);

            if (i % 2 == 0) {
                futures[i][j] = exec.submit(new Callable<Double>() {
                    public Double call() {

                        RPFSessionInfo.get().put("USER", "USER FOR " + Thread.currentThread().getName());
                        RServices rp = null;
                        int replayCounter = NBR_REPLAY_ON_FAILURE;

                        while (replayCounter >= 0) {

                            try {

                                rp = (RServices) org.kchine.rpf.ServantProviderFactory.getFactory()
                                        .getServantProvider().borrowServantProxy();

                                rp.putAndAssign(new RNumeric(v1), "rv1");
                                rp.putAndAssign(new RNumeric(v2), "rv2");
                                RMatrix res = ((RMatrix) rp.getObject("rv1%*%rv2"));

                                return ((RNumeric) res.getValue()).getValue()[0];

                            } catch (TimeoutException e) {
                                e.printStackTrace();
                                return null;
                            } catch (RemoteException re) {
                                re.printStackTrace();
                                --replayCounter;

                            } finally {

                                try {
                                    if (rp != null) {
                                        ServantProviderFactory.getFactory().getServantProvider()
                                                .returnServantProxy(rp);
                                        log.info("<" + Thread.currentThread().getName()
                                                + "> returned resource : " + rp.getServantName());
                                    }
                                } catch (Exception e) {
                                    e.printStackTrace();
                                }

                            }

                        }

                        return null;

                    }
                });
            } else {
                futures[i][j] = exec.submit(new Callable<Double>() {
                    public Double call() {

                        try {
                            return vecprod(v1, v2);
                        } finally {
                            log.info("<" + Thread.currentThread().getName() + "> Java task ended successfully");
                        }
                    }
                });
            }
        }

    }

    while (true) {
        if (countDone(futures) == (MATRIX_SIZE * MATRIX_SIZE))
            break;
        try {
            Thread.sleep(20);
        } catch (Exception e) {
        }
    }

    log.info(" done --  product matrix -->");

    Double[][] matrix1_x_matrix2 = new Double[MATRIX_SIZE][MATRIX_SIZE];
    for (int i = 0; i < MATRIX_SIZE; ++i)
        for (int j = 0; j < MATRIX_SIZE; ++j)
            matrix1_x_matrix2[i][j] = futures[i][j].get();

    System.out.println(showMatrix(matrix1, "M1"));
    System.out.println(showMatrix(matrix2, "M2"));
    System.out.println(showMatrix(matrix1_x_matrix2, "M1 x M2"));

    System.exit(0);
}

From source file:com.linkedin.pinotdruidbenchmark.DruidThroughput.java

@SuppressWarnings("InfiniteLoopStatement")
public static void main(String[] args) throws Exception {
    if (args.length != 3 && args.length != 4) {
        System.err.println(/*w w w  . jav  a 2  s .c  o  m*/
                "3 or 4 arguments required: QUERY_DIR, RESOURCE_URL, NUM_CLIENTS, TEST_TIME (seconds).");
        return;
    }

    File queryDir = new File(args[0]);
    String resourceUrl = args[1];
    final int numClients = Integer.parseInt(args[2]);
    final long endTime;
    if (args.length == 3) {
        endTime = Long.MAX_VALUE;
    } else {
        endTime = System.currentTimeMillis() + Integer.parseInt(args[3]) * MILLIS_PER_SECOND;
    }

    File[] queryFiles = queryDir.listFiles();
    assert queryFiles != null;
    Arrays.sort(queryFiles);

    final int numQueries = queryFiles.length;
    final HttpPost[] httpPosts = new HttpPost[numQueries];
    for (int i = 0; i < numQueries; i++) {
        HttpPost httpPost = new HttpPost(resourceUrl);
        httpPost.addHeader("content-type", "application/json");
        StringBuilder stringBuilder = new StringBuilder();
        try (BufferedReader bufferedReader = new BufferedReader(new FileReader(queryFiles[i]))) {
            int length;
            while ((length = bufferedReader.read(CHAR_BUFFER)) > 0) {
                stringBuilder.append(new String(CHAR_BUFFER, 0, length));
            }
        }
        String query = stringBuilder.toString();
        httpPost.setEntity(new StringEntity(query));
        httpPosts[i] = httpPost;
    }

    final AtomicInteger counter = new AtomicInteger(0);
    final AtomicLong totalResponseTime = new AtomicLong(0L);
    final ExecutorService executorService = Executors.newFixedThreadPool(numClients);

    for (int i = 0; i < numClients; i++) {
        executorService.submit(new Runnable() {
            @Override
            public void run() {
                try (CloseableHttpClient httpClient = HttpClients.createDefault()) {
                    while (System.currentTimeMillis() < endTime) {
                        long startTime = System.currentTimeMillis();
                        CloseableHttpResponse httpResponse = httpClient
                                .execute(httpPosts[RANDOM.nextInt(numQueries)]);
                        httpResponse.close();
                        long responseTime = System.currentTimeMillis() - startTime;
                        counter.getAndIncrement();
                        totalResponseTime.getAndAdd(responseTime);
                    }
                } catch (IOException e) {
                    e.printStackTrace();
                }
            }
        });
    }
    executorService.shutdown();

    long startTime = System.currentTimeMillis();
    while (System.currentTimeMillis() < endTime) {
        Thread.sleep(REPORT_INTERVAL_MILLIS);
        double timePassedSeconds = ((double) (System.currentTimeMillis() - startTime)) / MILLIS_PER_SECOND;
        int count = counter.get();
        double avgResponseTime = ((double) totalResponseTime.get()) / count;
        System.out.println("Time Passed: " + timePassedSeconds + "s, Query Executed: " + count + ", QPS: "
                + count / timePassedSeconds + ", Avg Response Time: " + avgResponseTime + "ms");
    }
}

From source file:com.mycompany.asyncreq.MainApp.java

public static void main(String[] args) throws InterruptedException, ExecutionException, IOException {
    URIBuilder builder = new URIBuilder();
    builder.setScheme("http").setHost("comtrade.un.org").setPath("/api/get").setParameter("max", "50000")
            .setParameter("type", "C").setParameter("freq", "M").setParameter("px", "HS")
            .setParameter("ps", "2014").setParameter("r", "804").setParameter("p", "112")
            .setParameter("rg", "All").setParameter("cc", "All").setParameter("fmt", "json");
    URI requestURL = null;/*www.jav  a2  s.c  om*/
    try {
        requestURL = builder.build();
    } catch (URISyntaxException use) {
    }

    ExecutorService threadpool = Executors.newFixedThreadPool(2);
    Async async = Async.newInstance().use(threadpool);
    final Request request = Request.Get(requestURL);

    try {
        Future<Content> future = async.execute(request, new FutureCallback<Content>() {
            @Override
            public void failed(final Exception e) {
                System.out.println(e.getMessage() + ": " + request);
            }

            @Override
            public void completed(final Content content) {
                System.out.println("Request completed: " + request);
                System.out.println("Response:\n" + content.asString());
            }

            @Override
            public void cancelled() {
            }
        });
    } catch (Exception e) {
        System.out.println("Job threw exception: " + e.getCause());
    }

}

From source file:fr.tpt.s3.mcdag.scheduling.Main.java

public static void main(String[] args) throws IOException, InterruptedException {

    /* Command line options */
    Options options = new Options();

    Option input = new Option("i", "input", true, "MC-DAG XML Models");
    input.setRequired(true);/* w  w  w. j  av  a2s.c o  m*/
    input.setArgs(Option.UNLIMITED_VALUES); // Sets maximum number of threads to be launched
    options.addOption(input);

    Option outSched = new Option("os", "out-scheduler", false, "Write the scheduling tables into a file.");
    outSched.setRequired(false);
    options.addOption(outSched);

    Option outPrism = new Option("op", "out-prism", false, "Write PRISM model into a file.");
    outPrism.setRequired(false);
    options.addOption(outPrism);

    Option jobs = new Option("j", "jobs", true, "Number of threads to be launched.");
    jobs.setRequired(false);
    options.addOption(jobs);

    Option debugOpt = new Option("d", "debug", false, "Enabling debug.");
    debugOpt.setRequired(false);
    options.addOption(debugOpt);

    Option preemptOpt = new Option("p", "preempt", false, "Count for preemptions.");
    preemptOpt.setRequired(false);
    options.addOption(preemptOpt);

    CommandLineParser parser = new DefaultParser();
    HelpFormatter formatter = new HelpFormatter();
    CommandLine cmd;

    try {
        cmd = parser.parse(options, args);
    } catch (ParseException e) {
        System.err.println(e.getMessage());
        formatter.printHelp("MC-DAG framework", options);

        System.exit(1);
        return;
    }

    String inputFilePath[] = cmd.getOptionValues("input");
    boolean bOutSched = cmd.hasOption("out-scheduler");
    boolean bOutPrism = cmd.hasOption("out-prism");
    boolean debug = cmd.hasOption("debug");
    boolean preempt = cmd.hasOption("preempt");
    boolean levels = cmd.hasOption("n-levels");
    int nbFiles = inputFilePath.length;

    int nbJobs = 1;
    if (cmd.hasOption("jobs"))
        nbJobs = Integer.parseInt(cmd.getOptionValue("jobs"));

    if (debug)
        System.out.println("[DEBUG] Launching " + inputFilePath.length + " thread(s).");

    int i_files = 0;
    ExecutorService executor = Executors.newFixedThreadPool(nbJobs);

    /* Launch threads to solve allocation */
    while (i_files != nbFiles) {
        SchedulingThread ft = new SchedulingThread(inputFilePath[i_files], bOutSched, bOutPrism, debug,
                preempt);

        ft.setLevels(levels);
        executor.execute(ft);
        i_files++;
    }

    executor.shutdown();
    executor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
    System.out.println("[FRAMEWORK Main] DONE");
}

From source file:org.tommy.stationery.moracle.core.client.load.StompWebSocketLoadTestClient.java

public static void main(String[] args) throws Exception {

    // Modify host and port below to match wherever StompWebSocketServer.java is running!!
    // When StompWebSocketServer starts it prints the selected available

    String host = "localhost";
    if (args.length > 0) {
        host = args[0];/*ww  w  . java 2s .co m*/
    }

    int port = 59984;
    if (args.length > 1) {
        port = Integer.valueOf(args[1]);
    }

    String url = "http://" + host + ":" + port + "/home";
    logger.debug("Sending warm-up HTTP request to " + url);
    HttpStatus status = new RestTemplate().getForEntity(url, Void.class).getStatusCode();
    Assert.state(status == HttpStatus.OK);

    final CountDownLatch connectLatch = new CountDownLatch(NUMBER_OF_USERS);
    final CountDownLatch subscribeLatch = new CountDownLatch(NUMBER_OF_USERS);
    final CountDownLatch messageLatch = new CountDownLatch(NUMBER_OF_USERS);
    final CountDownLatch disconnectLatch = new CountDownLatch(NUMBER_OF_USERS);

    final AtomicReference<Throwable> failure = new AtomicReference<Throwable>();

    Executor executor = Executors.newFixedThreadPool(THREAD_POOL_SIZE);
    org.eclipse.jetty.websocket.client.WebSocketClient jettyClient = new WebSocketClient(executor);
    JettyWebSocketClient webSocketClient = new JettyWebSocketClient(jettyClient);
    webSocketClient.start();

    HttpClient jettyHttpClient = new HttpClient();
    jettyHttpClient.setMaxConnectionsPerDestination(1000);
    jettyHttpClient.setExecutor(new QueuedThreadPool(1000));
    jettyHttpClient.start();

    List<Transport> transports = new ArrayList<>();
    transports.add(new WebSocketTransport(webSocketClient));
    transports.add(new JettyXhrTransport(jettyHttpClient));

    SockJsClient sockJsClient = new SockJsClient(transports);

    try {
        URI uri = new URI("ws://" + host + ":" + port + "/stomp");
        WebSocketStompClient stompClient = new WebSocketStompClient(uri, null, sockJsClient);
        stompClient.setMessageConverter(new StringMessageConverter());

        logger.debug("Connecting and subscribing " + NUMBER_OF_USERS + " users ");
        StopWatch stopWatch = new StopWatch("STOMP Broker Relay WebSocket Load Tests");
        stopWatch.start();

        List<ConsumerStompMessageHandler> consumers = new ArrayList<>();
        for (int i = 0; i < NUMBER_OF_USERS; i++) {
            consumers.add(new ConsumerStompMessageHandler(BROADCAST_MESSAGE_COUNT, connectLatch, subscribeLatch,
                    messageLatch, disconnectLatch, failure));
            stompClient.connect(consumers.get(i));
        }

        if (failure.get() != null) {
            throw new AssertionError("Test failed", failure.get());
        }
        if (!connectLatch.await(5000, TimeUnit.MILLISECONDS)) {
            logger.info("Not all users connected, remaining: " + connectLatch.getCount());
        }
        if (!subscribeLatch.await(5000, TimeUnit.MILLISECONDS)) {
            logger.info("Not all users subscribed, remaining: " + subscribeLatch.getCount());
        }

        stopWatch.stop();
        logger.debug("Finished: " + stopWatch.getLastTaskTimeMillis() + " millis");

        logger.debug("Broadcasting " + BROADCAST_MESSAGE_COUNT + " messages to " + NUMBER_OF_USERS + " users ");
        stopWatch.start();

        ProducerStompMessageHandler producer = new ProducerStompMessageHandler(BROADCAST_MESSAGE_COUNT,
                failure);
        stompClient.connect(producer);

        if (failure.get() != null) {
            throw new AssertionError("Test failed", failure.get());
        }
        if (!messageLatch.await(1 * 60 * 1000, TimeUnit.MILLISECONDS)) {
            for (ConsumerStompMessageHandler consumer : consumers) {
                if (consumer.messageCount.get() < consumer.expectedMessageCount) {
                    logger.debug(consumer);
                }
            }
        }
        if (!messageLatch.await(1 * 60 * 1000, TimeUnit.MILLISECONDS)) {
            logger.info("Not all handlers received every message, remaining: " + messageLatch.getCount());
        }

        producer.session.disconnect();
        if (!disconnectLatch.await(5000, TimeUnit.MILLISECONDS)) {
            logger.info("Not all disconnects completed, remaining: " + disconnectLatch.getCount());
        }

        stopWatch.stop();
        logger.debug("Finished: " + stopWatch.getLastTaskTimeMillis() + " millis");

        System.out.println("\nPress any key to exit...");
        System.in.read();
    } catch (Throwable t) {
        t.printStackTrace();
    } finally {
        webSocketClient.stop();
        jettyHttpClient.stop();
    }

    logger.debug("Exiting");
    System.exit(0);
}

From source file:cloudworker.RemoteWorker.java

public static void main(String[] args) throws Exception {
    //Command interpreter
    CommandLineInterface cmd = new CommandLineInterface(args);
    final int poolSize = Integer.parseInt(cmd.getOptionValue("s"));
    long idle_time = Long.parseLong(cmd.getOptionValue("i")); //idle time = 60 sec

    init();//  w  w  w . j a  v  a  2s. c  om
    System.out.println("Initialized one remote worker.\n");

    //Create thread pool
    ExecutorService threadPool = Executors.newFixedThreadPool(poolSize);
    BlockingExecutor blockingPool = new BlockingExecutor(threadPool, poolSize);

    //Get queue url
    GetQueueUrlResult urlResult = sqs.getQueueUrl("JobQueue");
    String jobQueueUrl = urlResult.getQueueUrl();

    // Receive messages
    //System.out.println("Receiving messages from JobQueue.\n");

    //...Check idle state
    boolean terminate = false;
    boolean startClock = true;
    long start_time = 0, end_time;

    JSONParser parser = new JSONParser();
    Runtime runtime = Runtime.getRuntime();
    String task_id = null;
    boolean runAnimoto = false;

    while (!terminate || idle_time == 0) {
        while (getQueueSize(sqs, jobQueueUrl) > 0) {

            //Batch retrieving messages
            ReceiveMessageRequest receiveMessageRequest = new ReceiveMessageRequest().withQueueUrl(jobQueueUrl)
                    .withMaxNumberOfMessages(10);

            List<Message> messages = sqs.receiveMessage(receiveMessageRequest).getMessages();

            for (Message message : messages) {
                //System.out.println("  Message");
                //                  System.out.println("    MessageId:     " + message.getMessageId());
                //                  System.out.println("    ReceiptHandle: " + message.getReceiptHandle());
                //                  System.out.println("    MD5OfBody:     " + message.getMD5OfBody());
                //System.out.println("    Body:          " + message.getBody());

                //Get task
                String messageBody = message.getBody();
                JSONObject json = (JSONObject) parser.parse(messageBody);

                task_id = json.get("task_id").toString();
                String task = json.get("task").toString();

                try {
                    //Check duplicate task
                    dynamoDB.addTask(task_id, task);

                    //Execute task, will be blocked if no more thread is currently available 
                    blockingPool.submitTask(new Animoto(task_id, task, sqs));

                    // Delete the message
                    String messageRecieptHandle = message.getReceiptHandle();
                    sqs.deleteMessage(new DeleteMessageRequest(jobQueueUrl, messageRecieptHandle));
                } catch (ConditionalCheckFailedException ccf) {
                    //DO something...
                }

            }

            startClock = true;

        }

        //Start clock to measure idle time
        if (startClock) {
            startClock = false;
            start_time = System.currentTimeMillis();
        } else {
            end_time = System.currentTimeMillis();
            long elapsed_time = (end_time - start_time) / 1000;
            if (elapsed_time > idle_time) {
                terminate = true;
            }
        }
    }

    //System.out.println();

    threadPool.shutdown();
    // Wait until all threads are finished
    while (!threadPool.isTerminated()) {

    }

    //Terminate running instance
    cleanUpInstance();

}