Example usage for java.util.concurrent Executors newFixedThreadPool

List of usage examples for java.util.concurrent Executors newFixedThreadPool

Introduction

In this page you can find the example usage for java.util.concurrent Executors newFixedThreadPool.

Prototype

public static ExecutorService newFixedThreadPool(int nThreads) 

Source Link

Document

Creates a thread pool that reuses a fixed number of threads operating off a shared unbounded queue.

Usage

From source file:org.auraframework.integration.test.http.AuraResourceServletLoggingHttpTest.java

/**
 * test add for W-2792895//  w  w  w  .  j av a  2s .c o  m
   also since I ask cache to log something when hit miss, this kind of verify W-2105858 as well
 */
@Test
public void testConcurrentGetRequests() throws Exception {
    // I tried to use obtainGetMethod(url) then perform(HttpGet) , but
    // our default httpClient use BasicClientConnectionManager, which doesn't work well with MultiThread
    // let's use PoolingHttpClientConnectionManager instead.
    PoolingHttpClientConnectionManager cm = new PoolingHttpClientConnectionManager();
    // Increase max total connection to 200 -- just some big number
    cm.setMaxTotal(200);
    // Increase default max connection per route to 20 -- again, just some big numer
    cm.setDefaultMaxPerRoute(20);
    CloseableHttpClient httpClient = HttpClients.custom().setConnectionManager(cm).build();

    String modeAndContext = getSimpleContext(Format.JS, false);
    String url = "/l/" + AuraTextUtil.urlencode(modeAndContext) + "/app.js";

    int numOfRequest = 5;
    List<Request> requests = new ArrayList<>();
    for (int i = 1; i <= numOfRequest; i++) {
        requests.add(new Request(httpClient, url));
    }

    ExecutorService excutor = Executors.newFixedThreadPool(numOfRequest);
    List<Future<Integer>> responses = new ArrayList<>();
    for (Request request : requests) {
        responses.add(excutor.submit(request));
    }
    for (Future<Integer> response : responses) {
        response.get();
    }

    int counter = 0;
    String message;
    List<LoggingEvent> logs = appender.getLog();
    for (LoggingEvent le : logs) {
        message = le.getMessage().toString();
        if (message.contains("StringsCache")) {
            counter++;
            assertTrue("get unexpected logging message for cache miss:" + message,
                    message.contains("cache miss for key: JS:DEV:"));
        }
    }
    //run this test right after server is up, we get one miss. second time, what we looking for is cached already, no miss.
    assertTrue("we should only have no more than one cache miss, instead we have " + counter, counter <= 1);
}

From source file:com.hortonworks.hbase.BufferedMutatorExample.java

@Override
public int run(String[] args) throws InterruptedException, ExecutionException, TimeoutException {

    /** a callback invoked when an asynchronous write fails. */
    final BufferedMutator.ExceptionListener listener = new BufferedMutator.ExceptionListener() {
        @Override//ww  w .j  a  v a2  s  .  com
        public void onException(RetriesExhaustedWithDetailsException e, BufferedMutator mutator) {
            for (int i = 0; i < e.getNumExceptions(); i++) {
                LOG.info("Failed to send put " + e.getRow(i) + ".");
            }
        }
    };
    BufferedMutatorParams params = new BufferedMutatorParams(TABLE).listener(listener);

    //
    // step 1: create a single Connection and a BufferedMutator, shared by all worker threads.
    //
    Configuration conf = new Configuration();
    try (final Connection conn = ConnectionFactory.createConnection(conf);
            final BufferedMutator mutator = conn.getBufferedMutator(params)) {

        conf.set("hbase.zookeeper.quorum", "sandbox.hortonworks.com");
        conf.set("hbase.zookeeper.property.clientPort", "2181");
        conf.set("zookeeper.znode.parent", "/hbase-unsecure");

        //        conf.set("hbase.zookeeper.quorum", "jetmaster2.jetnetname.artem.com,jetslave5.jetnetname.artem.com,jetslave1.jetnetname.artem.com");
        //        conf.set("hbase.zookeeper.property.clientPort", "2181");
        //        conf.set("zookeeper.znode.parent", "/hbase-unsecure");

        /** worker pool that operates on BufferedTable instances */
        final ExecutorService workerPool = Executors.newFixedThreadPool(POOL_SIZE);
        List<Future<Void>> futures = new ArrayList<>(TASK_COUNT);

        for (int i = 0; i < TASK_COUNT; i++) {
            futures.add(workerPool.submit(new Callable<Void>() {
                @Override
                public Void call() throws Exception {
                    //
                    // step 2: each worker sends edits to the shared BufferedMutator instance. They all use
                    // the same backing buffer, call-back "listener", and RPC executor pool.
                    //
                    Put p = new Put(Bytes.toBytes("someRow"));
                    p.addColumn(FAMILY, Bytes.toBytes("someQualifier"), Bytes.toBytes("some value"));
                    mutator.mutate(p);
                    // do work... maybe you want to call mutator.flush() after many edits to ensure any of
                    // this worker's edits are sent before exiting the Callable
                    return null;
                }
            }));
        }

        //
        // step 3: clean up the worker pool, shut down.
        //
        for (Future<Void> f : futures) {
            f.get(5, TimeUnit.MINUTES);
        }
        workerPool.shutdown();
    } catch (IOException e) {
        // exception while creating/destroying Connection or BufferedMutator
        LOG.info("exception while creating/destroying Connection or BufferedMutator", e);
    } // BufferedMutator.close() ensures all work is flushed. Could be the custom listener is
      // invoked from here.
    return 0;
}

From source file:com.netflix.spinnaker.front50.config.AzureStorageConfig.java

@Bean
public ServiceAccountDAO serviceAccountDAO(AzureStorageService storageService, Registry registry) {
    return new DefaultServiceAccountDAO(storageService, Schedulers.from(Executors.newFixedThreadPool(20)),
            30000, registry);/*www .  ja  va  2 s.c  o  m*/
}

From source file:com.legstar.host.server.PoolingTest.java

/**
 * Address a request to an invalid address.
 * @throws Exception if test fails//from  w w  w.  ja v  a2 s .c om
 */
public void testScheduleWorkInvalidAddress() throws Exception {
    ExecutorService executor = Executors.newFixedThreadPool(CLIENT_THREADS);
    WorkManager wm = new WorkManagerImpl(executor);
    EngineHandler engHandler = new EngineHandler(getPoolingEngineConfig());
    engHandler.init();

    LegStarAddress address = new LegStarAddress("ThereIsNoSuchMainframe");
    address.setHostUserID(HOST_USERID);
    address.setHostPassword(HOST_PASSWORD);

    LegStarRequest request = new LegStarRequest("Request01", address, getLsfileaeRequestMessage());
    Client client = new Client(engHandler.getEngine(), "Client01", request);
    wm.schedule(client, new ClientListener());

    Thread.sleep(5000L);
    engHandler.stop();
    executor.shutdownNow();
    assertEquals(
            "com.legstar.pool.manager.ConnectionPoolException:"
                    + " No host endpoints matches Address=[hostEndpoint=ThereIsNoSuchMainframe,"
                    + "hostCharset=null," + "hostUserID=P390," + "hostTraceMode=false]",
            request.getException().getMessage());

}

From source file:com.opentransport.rdfmapper.nmbs.ScrapeTrip.java

private void requestJsons(Map trainDelays) {
    String trainName;/* w ww  .j a  va 2s .  co  m*/
    Iterator iterator = trainDelays.entrySet().iterator();

    ExecutorService pool = Executors.newFixedThreadPool(NUMBER_OF_CONNECTIONS_TO_IRAIL_API);
    while (iterator.hasNext()) {
        Map.Entry mapEntry = (Map.Entry) iterator.next();
        trainName = returnCorrectTrainFormat((String) mapEntry.getKey());
        url = "https://api.irail.be/vehicle/?id=BE.NMBS." + trainName + "&format=json";
        System.out.println("HTTP GET - " + url);
        countConnections++;
        pool.submit(new DownloadDelayedTrains(trainName, url));
    }
    pool.shutdown();

    try {
        pool.awaitTermination(Long.MAX_VALUE, TimeUnit.MILLISECONDS);
        // all tasks have now finished (unless an exception is thrown abo
    } catch (InterruptedException ex) {
        Logger.getLogger(ScrapeTrip.class.getName()).log(Level.SEVERE, null, ex);
        errorWriter.writeError(ex.toString());
    }
}

From source file:com.splout.db.integration.TestMultiThreadedFailover.java

@Test
public void test() throws Throwable {
    FileUtils.deleteDirectory(new File(TMP_FOLDER));
    new File(TMP_FOLDER).mkdirs();

    createSploutEnsemble(N_QNODES, N_DNODES);
    String[] qNodeAddresses = new String[N_QNODES];
    for (int i = 0; i < N_QNODES; i++) {
        qNodeAddresses[i] = getqNodes().get(i).getAddress();
    }//from   w w  w .ja  v a2  s .  c om

    final SploutClient client = new SploutClient(qNodeAddresses);
    final Tablespace testTablespace = createTestTablespace(N_DNODES);
    final Random random = new Random(SEED);
    final AtomicBoolean failed = new AtomicBoolean(false);

    deployIteration(0, random, client, testTablespace);

    for (QNode qnode : getqNodes()) {
        // Make sure all QNodes are aware of the the first deploy
        // There might be some delay as they have to receive notifications via
        // Hazelcast etc
        long waitedSoFar = 0;
        QueryStatus status = null;
        SploutClient perQNodeClient = new SploutClient(qnode.getAddress());
        do {
            status = perQNodeClient.query(TABLESPACE, "0", "SELECT * FROM " + TABLE + ";", null);
            Thread.sleep(100);
            waitedSoFar += 100;
            if (waitedSoFar > 5000) {
                throw new AssertionError("Waiting too much on a test condition");
            }
        } while (status == null || status.getError() != null);
        log.info("QNode [" + qnode.getAddress() + "] is ready to serve deploy 0.");
    }

    try {
        // Business logic here
        ExecutorService service = Executors.newFixedThreadPool(N_THREADS);

        // This is the "mother-fucker" thread.
        // It will bring DNodes down on purpose.
        // And then bring them up again.
        service.submit(new Runnable() {

            @Override
            public void run() {

                while (true) {
                    try {
                        Thread.sleep(1000);
                        log.info("Time to kill some DNode...");
                        int whichOne = (int) (Math.random() * getdNodes().size());
                        getdNodes().get(whichOne).testCommand(TestCommands.SHUTDOWN.toString());
                        Thread.sleep(1000);
                        log.info("Time to bring the DNode back to life...");
                        getdNodes().get(whichOne).testCommand(TestCommands.RESTART.toString());
                    } catch (InterruptedException e) {
                        log.info("MFT - Bye bye!");
                    } catch (DNodeException e) {
                        failed.set(true);
                        e.printStackTrace();
                        throw new RuntimeException(e);
                    } catch (TException e) {
                        failed.set(true);
                        e.printStackTrace();
                        throw new RuntimeException(e);
                    }
                }
            }

        });

        // These threads will continuously perform queries and check that the
        // results are consistent.
        for (int i = 0; i < N_THREADS; i++) {
            service.submit(new Runnable() {
                @SuppressWarnings("unchecked")
                @Override
                public void run() {
                    try {
                        while (true) {
                            int randomDNode = Math.abs(random.nextInt()) % N_DNODES;
                            QueryStatus status = client.query(TABLESPACE, ((randomDNode * 10) - 1) + "",
                                    "SELECT * FROM " + TABLE + ";", null);
                            log.info("Query status -> " + status);
                            assertEquals(1, status.getResult().size());
                            Map<String, Object> jsonResult = (Map<String, Object>) status.getResult().get(0);
                            assertEquals(randomDNode, jsonResult.get("dnode"));
                            Thread.sleep(100);
                        }
                    } catch (InterruptedException ie) {
                        // Bye bye
                        log.info("Bye bye!");
                    } catch (Throwable e) {
                        e.printStackTrace();
                        failed.set(true);
                    }
                }
            });
        }

        Thread.sleep(15000);

        assertEquals(false, failed.get());

    } finally {
        closeSploutEnsemble();
        FileUtils.deleteDirectory(new File(TMP_FOLDER));
    }
}

From source file:io.druid.indexing.overlord.ForkingTaskRunner.java

@Inject
public ForkingTaskRunner(ForkingTaskRunnerConfig config, TaskConfig taskConfig, WorkerConfig workerConfig,
        Properties props, TaskLogPusher taskLogPusher, ObjectMapper jsonMapper, @Self DruidNode node) {
    this.config = config;
    this.taskConfig = taskConfig;
    this.props = props;
    this.taskLogPusher = taskLogPusher;
    this.jsonMapper = jsonMapper;
    this.node = node;
    this.portFinder = new PortFinder(config.getStartPort());

    this.exec = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(workerConfig.getCapacity()));
}

From source file:com.geeker.door.imgcache.ImageDownloader.java

public ImageDownloader(Context context) {
    mImageSDCacher = ImageSDCacher.getImageSDCacher();
    mContext = context;/*w w  w .ja  v  a2s  . com*/
    mScreen = ScreenUtil.getScreenPix(context);
    mExecutorService = Executors.newFixedThreadPool(5);
}

From source file:org.apache.streams.facebook.provider.FacebookProvider.java

@Override
public void prepare(Object configurationObject) {
    this.datums = Queues.newLinkedBlockingQueue();
    this.isComplete = new AtomicBoolean(false);
    this.executor = Executors.newFixedThreadPool(1);
}

From source file:com.smartmarmot.orabbix.Orabbixmon.java

@Override
public void run() {
    try {//  w ww . j  a va2s .com
        Configurator cfg = null;
        try {
            cfg = new Configurator(configFile);
        } catch (Exception e) {
            SmartLogger.logThis(Level.ERROR, "Error while creating configurator with " + configFile + " " + e);
        }
        RuntimeMXBean rmxb = ManagementFactory.getRuntimeMXBean();
        String pid = rmxb.getName();
        SmartLogger.logThis(Level.INFO,
                Constants.PROJECT_NAME + " started with pid:" + pid.split("@")[0].toString());
        // System.out.print("pid: "+pid.split("@")[0].toString());
        String pidfile = cfg.getPidFile();
        try {
            Utility.writePid(pid.split("@")[0].toString(), pidfile);
        } catch (Exception e) {
            SmartLogger.logThis(Level.ERROR, "Error while trying to write pidfile " + e);
        }

        Locale.setDefault(Locale.US);

        DBConn[] myDBConn = cfg.getConnections();

        if (myDBConn == null) {
            SmartLogger.logThis(Level.ERROR, "ERROR on main - Connections is null");
            throw new Exception("ERROR on main - Connections is null");

        } else if (myDBConn.length == 0) {
            SmartLogger.logThis(Level.ERROR, "ERROR on main - Connections is empty");
            throw new Exception("ERROR on main - Connections is empty");
        }

        /**
         * retrieve maxThread
         */
        Integer maxThread = 0;
        try {
            maxThread = cfg.getMaxThread();
        } catch (Exception e) {
            SmartLogger.logThis(Level.WARN,
                    "MaxThread not defined calculated maxThread = " + myDBConn.length * 3);
        }
        if (maxThread == null)
            maxThread = 0;
        if (maxThread == 0) {
            maxThread = myDBConn.length * 3;
        }

        ExecutorService executor = Executors.newFixedThreadPool(maxThread.intValue());
        /**
         * populate qbox
         */
        Hashtable<String, Querybox> qbox = new Hashtable<String, Querybox>();
        for (int i = 0; i < myDBConn.length; i++) {
            Querybox qboxtmp = Configurator.buildQueryBoxbyDBName(myDBConn[i].getName());
            qbox.put(myDBConn[i].getName(), qboxtmp);
        } // for (int i = 0; i < myDBConn.length; i++) {

        cfg = null;
        /**
         * daemon begin here
         */
        while (running) {
            /**
             * istantiate a new configurator
             */
            Configurator c = new Configurator(configFile);

            /*
             * here i rebuild DB's List
             */
            if (!c.isEqualsDBList(myDBConn)) {

                // rebuild connections DBConn[]

                myDBConn = c.rebuildDBList(myDBConn);
                for (int i = 1; i < myDBConn.length; i++) {
                    if (!qbox.containsKey(myDBConn[i].getName())) {
                        Querybox qboxtmp = Configurator.buildQueryBoxbyDBName(myDBConn[i].getName());
                        qbox.put(myDBConn[i].getName(), qboxtmp);
                    }
                }
            } // if (!c.isEqualsDBList(myDBConn)) {

            /*
             * ready to run query
             */

            for (int i = 0; i < myDBConn.length; i++) {
                Querybox actqb = qbox.get(myDBConn[i].getName());
                actqb.refresh();
                Query[] q = actqb.getQueries();

                SharedPoolDataSource spds = myDBConn[i].getSPDS();

                Hashtable<String, Integer> zabbixServers = c.getZabbixServers();
                SmartLogger.logThis(Level.DEBUG, "Ready to run DBJob for dbname ->" + myDBConn[i].getName());
                Runnable runner = new DBJob(spds, q, Constants.QUERY_LIST, zabbixServers,
                        myDBConn[i].getName());
                executor.execute(runner);

            } // for (int i = 0; i < myDBConn.length; i++) {
            Thread.sleep(60 * 1000);
            SmartLogger.logThis(Level.DEBUG, "Waking up Goood Morning");
        }
    } catch (Exception e1) {
        // TODO Auto-generated catch block
        System.out.println("Stopping");
        e1.printStackTrace();
        stopped = true;
    }

}