Example usage for java.util.concurrent Executors newCachedThreadPool

List of usage examples for java.util.concurrent Executors newCachedThreadPool

Introduction

In this page you can find the example usage for java.util.concurrent Executors newCachedThreadPool.

Prototype

public static ExecutorService newCachedThreadPool() 

Source Link

Document

Creates a thread pool that creates new threads as needed, but will reuse previously constructed threads when they are available.

Usage

From source file:com.linkedin.pinot.integration.tests.HybridClusterIntegrationTest.java

@BeforeClass
public void setUp() throws Exception {
    //Clean up//from ww w. j  a  v  a 2 s. c o m
    ensureDirectoryExistsAndIsEmpty(_tmpDir);
    ensureDirectoryExistsAndIsEmpty(_segmentDir);
    ensureDirectoryExistsAndIsEmpty(_tarDir);

    // Start Zk, Kafka and Pinot
    startHybridCluster();

    // Unpack the Avro files
    TarGzCompressionUtils.unTar(new File(TestUtils.getFileFromResourceUrl(OfflineClusterIntegrationTest.class
            .getClassLoader().getResource("On_Time_On_Time_Performance_2014_100k_subset_nonulls.tar.gz"))),
            _tmpDir);

    _tmpDir.mkdirs();

    final List<File> avroFiles = getAllAvroFiles();

    File schemaFile = getSchemaFile();
    schema = Schema.fromFile(schemaFile);
    addSchema(schemaFile, schema.getSchemaName());
    final List<String> invertedIndexColumns = makeInvertedIndexColumns();
    final String sortedColumn = makeSortedColumn();

    // Create Pinot table
    addHybridTable("mytable", "DaysSinceEpoch", "daysSinceEpoch", KafkaStarterUtils.DEFAULT_ZK_STR, KAFKA_TOPIC,
            schema.getSchemaName(), TENANT_NAME, TENANT_NAME, avroFiles.get(0), sortedColumn,
            invertedIndexColumns, null);
    LOGGER.info("Running with Sorted column=" + sortedColumn + " and inverted index columns = "
            + invertedIndexColumns);

    // Create a subset of the first 8 segments (for offline) and the last 6 segments (for realtime)
    final List<File> offlineAvroFiles = getOfflineAvroFiles(avroFiles);
    final List<File> realtimeAvroFiles = getRealtimeAvroFiles(avroFiles);

    // Load data into H2
    ExecutorService executor = Executors.newCachedThreadPool();
    setupH2AndInsertAvro(avroFiles, executor);

    // Create segments from Avro data
    LOGGER.info("Creating offline segments from avro files " + offlineAvroFiles);
    buildSegmentsFromAvro(offlineAvroFiles, executor, 0, _segmentDir, _tarDir, "mytable", false, null);

    // Initialize query generator
    setupQueryGenerator(avroFiles, executor);

    executor.shutdown();
    executor.awaitTermination(10, TimeUnit.MINUTES);

    // Set up a Helix spectator to count the number of segments that are uploaded and unlock the latch once 12 segments are online
    final CountDownLatch latch = new CountDownLatch(1);
    HelixManager manager = HelixManagerFactory.getZKHelixManager(getHelixClusterName(), "test_instance",
            InstanceType.SPECTATOR, ZkStarter.DEFAULT_ZK_STR);
    manager.connect();
    manager.addExternalViewChangeListener(new ExternalViewChangeListener() {
        @Override
        public void onExternalViewChange(List<ExternalView> externalViewList,
                NotificationContext changeContext) {
            for (ExternalView externalView : externalViewList) {
                if (externalView.getId().contains("mytable")) {

                    Set<String> partitionSet = externalView.getPartitionSet();
                    if (partitionSet.size() == offlineSegmentCount) {
                        int onlinePartitionCount = 0;

                        for (String partitionId : partitionSet) {
                            Map<String, String> partitionStateMap = externalView.getStateMap(partitionId);
                            if (partitionStateMap.containsValue("ONLINE")) {
                                onlinePartitionCount++;
                            }
                        }

                        if (onlinePartitionCount == offlineSegmentCount) {
                            System.out.println("Got " + offlineSegmentCount
                                    + " online tables, unlatching the main thread");
                            latch.countDown();
                        }
                    }
                }
            }
        }
    });

    // Upload the segments
    int i = 0;
    for (String segmentName : _tarDir.list()) {
        System.out.println("Uploading segment " + (i++) + " : " + segmentName);
        File file = new File(_tarDir, segmentName);
        FileUploadUtils.sendSegmentFile("localhost", "8998", segmentName, new FileInputStream(file),
                file.length());
    }

    // Wait for all offline segments to be online
    latch.await();

    // Load realtime data into Kafka
    LOGGER.info("Pushing data from realtime avro files " + realtimeAvroFiles);
    pushAvroIntoKafka(realtimeAvroFiles, KafkaStarterUtils.DEFAULT_KAFKA_BROKER, KAFKA_TOPIC);

    // Wait until the Pinot event count matches with the number of events in the Avro files
    int pinotRecordCount, h2RecordCount;
    long timeInFiveMinutes = System.currentTimeMillis() + 5 * 60 * 1000L;

    Statement statement = _connection.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
    statement.execute("select count(*) from mytable");
    ResultSet rs = statement.getResultSet();
    rs.first();
    h2RecordCount = rs.getInt(1);
    rs.close();

    waitForRecordCountToStabilizeToExpectedCount(h2RecordCount, timeInFiveMinutes);
}

From source file:com.chess.genesis.net.SyncClient.java

private void sync_recent(final JSONObject json) {
    try {/*from  w  w w. j  a  v a  2s  .  c  o  m*/
        final JSONArray ids = json.getJSONArray("gameids");
        final ExecutorService pool = Executors.newCachedThreadPool();

        for (int i = 0, len = ids.length(); i < len; i++) {
            if (error)
                return;
            final NetworkClient nc = new NetworkClient(context, handle);
            nc.game_status(ids.getString(i));
            pool.submit(nc);

            lock++;
        }
        // Save sync time
        final PrefEdit pref = new PrefEdit(context);
        pref.putLong(R.array.pf_lastgamesync, json.getLong("time"));
        pref.commit();
    } catch (final JSONException e) {
        throw new RuntimeException(e.getMessage(), e);
    }
}

From source file:com.amazonaws.services.kinesis.multilang.MessageWriterTest.java

@Test
public void objectMapperFails() throws JsonProcessingException, InterruptedException, ExecutionException {
    ObjectMapper mapper = Mockito.mock(ObjectMapper.class);
    Mockito.doThrow(JsonProcessingException.class).when(mapper).writeValueAsString(Mockito.any(Message.class));
    messageWriter = new MessageWriter().initialize(stream, shardId, mapper, Executors.newCachedThreadPool());

    try {/*from   www.j a va  2  s . c  om*/
        messageWriter.writeShutdownMessage(ShutdownReason.ZOMBIE);
        Assert.fail("The mapper failed so no write method should be able to succeed.");
    } catch (Exception e) {
        // Note that this is different than the stream failing. The stream is expected to fail, so we handle it
        // gracefully, but the JSON mapping should always succeed.
    }

}

From source file:com.google.apphosting.vmruntime.VmApiProxyDelegate.java

VmApiProxyDelegate(HttpClient httpclient) {
    this.defaultTimeoutMs = DEFAULT_RPC_TIMEOUT_MS;
    this.executor = Executors.newCachedThreadPool();
    this.httpclient = httpclient;
    this.monitorThread = new IdleConnectionMonitorThread(httpclient.getConnectionManager());
    this.monitorThread.start();
}

From source file:com.netflix.curator.framework.recipes.locks.TestInterProcessSemaphore.java

@Test
public void testMaxPerSession() throws Exception {
    final int CLIENT_QTY = 10;
    final int LOOP_QTY = 100;
    final Random random = new Random();
    final int SESSION_MAX = random.nextInt(75) + 25;

    List<Future<Object>> futures = Lists.newArrayList();
    ExecutorService service = Executors.newCachedThreadPool();
    final Counter counter = new Counter();
    final AtomicInteger available = new AtomicInteger(SESSION_MAX);
    for (int i = 0; i < CLIENT_QTY; ++i) {
        futures.add(service.submit(new Callable<Object>() {
            @Override//  w  w  w .  j  a  v  a 2  s  .  c o  m
            public Object call() throws Exception {
                CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(),
                        new RetryOneTime(1));
                client.start();
                try {
                    InterProcessSemaphoreV2 semaphore = new InterProcessSemaphoreV2(client, "/test",
                            SESSION_MAX);

                    for (int i = 0; i < LOOP_QTY; ++i) {
                        long start = System.currentTimeMillis();
                        int thisQty;
                        synchronized (available) {
                            if ((System.currentTimeMillis() - start) > 10000) {
                                throw new TimeoutException();
                            }
                            while (available.get() == 0) {
                                available.wait(10000);
                            }

                            thisQty = (available.get() > 1) ? (random.nextInt(available.get()) + 1) : 1;

                            available.addAndGet(-1 * thisQty);
                            Assert.assertTrue(available.get() >= 0);
                        }
                        Collection<Lease> leases = semaphore.acquire(thisQty, 10, TimeUnit.SECONDS);
                        Assert.assertNotNull(leases);
                        try {
                            synchronized (counter) {
                                counter.currentCount += thisQty;
                                if (counter.currentCount > counter.maxCount) {
                                    counter.maxCount = counter.currentCount;
                                }
                            }
                            Thread.sleep(random.nextInt(25));
                        } finally {
                            synchronized (counter) {
                                counter.currentCount -= thisQty;
                            }
                            semaphore.returnAll(leases);
                            synchronized (available) {
                                available.addAndGet(thisQty);
                                available.notifyAll();
                            }
                        }
                    }
                } finally {
                    client.close();
                }
                return null;
            }
        }));
    }

    for (Future<Object> f : futures) {
        f.get();
    }

    synchronized (counter) {
        Assert.assertTrue(counter.currentCount == 0);
        Assert.assertTrue(counter.maxCount > 0);
        Assert.assertTrue(counter.maxCount <= SESSION_MAX);
        System.out.println(counter.maxCount);
    }
}

From source file:org.xserver.bootstrap.XServerBootstrap.java

public void initSystem() {
    XServerBootstrap xServerBootstrap = (XServerBootstrap) SpringUtil.getBean(XServerBootstrap.class);

    ServerBootstrap serverBootstrap = new ServerBootstrap(new NioServerSocketChannelFactory(
            Executors.newCachedThreadPool(), Executors.newCachedThreadPool()));

    serverBootstrap.setOption("tcpNoDelay", true);

    serverBootstrap.setPipelineFactory(xServerBootstrap.getXServerHttpFactory());

    serverBootstrap.bind(new InetSocketAddress(xServerHttpConfig.getPort()));
    logger.info("XServer bind port {}.", xServerHttpConfig.getPort());
}

From source file:com.ning.http.client.providers.NettyAsyncHttpProvider.java

public NettyAsyncHttpProvider(AsyncHttpClientConfig config) {
    // TODO: Should we expose the Executors.
    bootstrap = new ClientBootstrap(
            new NioClientSocketChannelFactory(Executors.newCachedThreadPool(), config.executorService()));
    this.config = config;
}

From source file:no.dusken.aranea.web.spring.ChainedController.java

/**
 * Spawns multiple threads, one for each controller in the list of
 * controllers, and within each thread, delegates to the controller's
 * handleRequest() method. Once all the threads are complete, the
 * ModelAndView objects returned from each of the handleRequest() methods
 * are merged into a single view. The view name for the model is set to the
 * specified view name. If an exception is thrown by any of the controllers
 * in the chain, this exception is propagated up from the handleRequest()
 * method of the ChainedController.//from  w w w . ja va 2  s  . co  m
 *
 * @param request  the HttpServletRequest object.
 * @param response the HttpServletResponse object.
 * @return a merged ModelAndView object.
 * @throws Exception if one is thrown from the controllers in the chain.
 */
@SuppressWarnings("unchecked")
private ModelAndView handleRequestParallely(HttpServletRequest request, HttpServletResponse response)
        throws Exception {
    ExecutorService service = Executors.newCachedThreadPool();
    int numberOfControllers = controllers.size();
    CallableController[] callables = new CallableController[numberOfControllers];
    Future<ModelAndView>[] futures = new Future[numberOfControllers];
    for (int i = 0; i < numberOfControllers; i++) {
        callables[i] = new CallableController(controllers.get(i), request, response);
        futures[i] = service.submit(callables[i]);
    }
    ModelAndView mergedModel = new ModelAndView();
    for (Future<ModelAndView> future : futures) {
        ModelAndView model = future.get();
        if (model != null) {
            mergedModel.addAllObjects(model.getModel());
        }
    }
    if (StringUtils.isNotEmpty(this.viewName)) {
        mergedModel.setViewName(this.viewName);
    }
    return mergedModel;
}

From source file:edu.umass.cs.gnsserver.httpserver.GNSHttpServer.java

/**
 * Try to start the http server at the port.
 *
 * @param port//from w ww  .j  a  v a2s  . c o  m
 * @return true if it was started
 */
public boolean tryPort(int port) {
    try {
        InetSocketAddress addr = new InetSocketAddress(port);
        httpServer = HttpServer.create(addr, 0);

        httpServer.createContext("/", new EchoHttpHandler());
        httpServer.createContext("/" + GNS_PATH, new DefaultHttpHandler());
        httpServer.setExecutor(Executors.newCachedThreadPool());
        httpServer.start();
        // Need to do this for the places where we expose the insecure http service to the user
        requestHandler.setHttpServerPort(port);
        LOGGER.log(Level.INFO, "HTTP server is listening on port {0}", port);
        return true;
    } catch (IOException e) {
        LOGGER.log(Level.FINE, "HTTP server failed to start on port {0} due to {1}",
                new Object[] { port, e.getMessage() });
        return false;
    }
}

From source file:com.mxhero.plugin.cloudstorage.onedrive.api.OneDrive.java

/**
 * Redeem daemon./*from  ww w .jav a 2 s. co m*/
 *
 * @param redeemDaemonRequest the redeem daemon request
 * @return the Access Token redeemed it
 * @throws AuthenticationException the authentication exception
 */
public static String redeemDaemon(RedeemDaemonRequest redeemDaemonRequest) throws AuthenticationException {
    ExecutorService service = Executors.newCachedThreadPool();
    AuthenticationResult authenticationResult = null;
    String authority = String.format(ApiEnviroment.tokenDaemonBaseUrl.getValue(),
            redeemDaemonRequest.getTenantId());
    logger.debug("Trying to get App Only token for {}", redeemDaemonRequest);
    try {
        AuthenticationContext authenticationContext = new AuthenticationContext(authority, false, service);
        String filePkcs12 = ApiEnviroment.fileUrlPkcs12Certificate.getValue();
        if (StringUtils.isNotEmpty(redeemDaemonRequest.getFileUrlPkcs12Certificate())) {
            filePkcs12 = redeemDaemonRequest.getFileUrlPkcs12Certificate();
        }

        String filePkcs12Secret = ApiEnviroment.pkcs12CertificateSecret.getValue();
        if (StringUtils.isNotEmpty(redeemDaemonRequest.getCertificateSecret())) {
            filePkcs12Secret = redeemDaemonRequest.getCertificateSecret();
        }

        Validate.notEmpty(filePkcs12,
                "Pkcs12 Key file path must be provided or configured. You can set it on environment var 'ONEDRIVE_DAEMON_PKCS12_FILE_URL' or through Java System Property 'onedrive.daemon.pkcs12.file.url'");
        Validate.notEmpty(filePkcs12Secret,
                "Pkcs12 Secret Key file must be provided or configured. You can set it on environment var 'ONEDRIVE_DAEMON_PKCS12_FILE_SECRET' or through Java System Property 'onedrive.daemon.pkcs12.file.secret'");

        InputStream pkcs12Certificate = new FileInputStream(filePkcs12);
        AsymmetricKeyCredential credential = AsymmetricKeyCredential.create(redeemDaemonRequest.getClientId(),
                pkcs12Certificate, filePkcs12Secret);

        Future<AuthenticationResult> future = authenticationContext
                .acquireToken(redeemDaemonRequest.getResourceSharepointId(), credential, null);
        authenticationResult = future.get(10, TimeUnit.SECONDS);
        logger.debug("Token retrieved {}",
                ToStringBuilder.reflectionToString(authenticationResult, ToStringStyle.SHORT_PREFIX_STYLE));
        return authenticationResult.getAccessToken();
    } catch (Exception e) {
        logger.error("Error trying to get new App Only Token", e);
        throw new AuthenticationException(
                String.format("Error trying to get new App Only Token for tenantId %s and sharepointUri %s",
                        redeemDaemonRequest.getTenantId(), redeemDaemonRequest.getResourceSharepointId()));
    } finally {
        service.shutdown();
    }

}