Example usage for java.util.concurrent Executors newSingleThreadExecutor

List of usage examples for java.util.concurrent Executors newSingleThreadExecutor

Introduction

In this page you can find the example usage for java.util.concurrent Executors newSingleThreadExecutor.

Prototype

public static ExecutorService newSingleThreadExecutor() 

Source Link

Document

Creates an Executor that uses a single worker thread operating off an unbounded queue.

Usage

From source file:com.linkedin.pinot.integration.tests.HybridClusterScanComparisonIntegrationTest.java

@Override
@BeforeClass//from w w  w.  j  a  v a2  s .  c o  m
public void setUp() throws Exception {
    //Clean up
    ensureDirectoryExistsAndIsEmpty(_tmpDir);
    ensureDirectoryExistsAndIsEmpty(_segmentDir);
    ensureDirectoryExistsAndIsEmpty(_offlineSegmentDir);
    ensureDirectoryExistsAndIsEmpty(_realtimeSegmentDir);
    ensureDirectoryExistsAndIsEmpty(_offlineTarDir);
    ensureDirectoryExistsAndIsEmpty(_realtimeTarDir);
    ensureDirectoryExistsAndIsEmpty(_unpackedSegments);

    // Start Zk, Kafka and Pinot
    startHybridCluster();

    extractAvroIfNeeded();

    int avroFileCount = getAvroFileCount();
    Preconditions.checkArgument(3 <= avroFileCount, "Need at least three Avro files for this test");

    setSegmentCount(avroFileCount);
    setOfflineSegmentCount(2);
    setRealtimeSegmentCount(avroFileCount - 1);

    final List<File> avroFiles = getAllAvroFiles();

    _schemaFile = getSchemaFile();
    _schema = Schema.fromFile(_schemaFile);

    // Create Pinot table
    setUpTable("mytable", getTimeColumnName(), getTimeColumnType(), KafkaStarterUtils.DEFAULT_ZK_STR,
            KAFKA_TOPIC, _schemaFile, avroFiles.get(0), getSortedColumn(), invertedIndexColumns);

    final List<File> offlineAvroFiles = getOfflineAvroFiles(avroFiles);
    final List<File> realtimeAvroFiles = getRealtimeAvroFiles(avroFiles);

    // Create segments from Avro data
    ExecutorService executor;
    if (_createSegmentsInParallel) {
        executor = Executors.newCachedThreadPool();
    } else {
        executor = Executors.newSingleThreadExecutor();

    }
    Future<Map<File, File>> offlineAvroToSegmentMapFuture = buildSegmentsFromAvro(offlineAvroFiles, executor, 0,
            _offlineSegmentDir, _offlineTarDir, "mytable", false, _schema);
    Future<Map<File, File>> realtimeAvroToSegmentMapFuture = buildSegmentsFromAvro(realtimeAvroFiles, executor,
            0, _realtimeSegmentDir, _realtimeTarDir, "mytable", false, _schema);

    // Initialize query generator
    setupQueryGenerator(avroFiles, executor);

    // Redeem futures
    _offlineAvroToSegmentMap = offlineAvroToSegmentMapFuture.get();
    _realtimeAvroToSegmentMap = realtimeAvroToSegmentMapFuture.get();

    LOGGER.info("Offline avro to segment map: {}", _offlineAvroToSegmentMap);
    LOGGER.info("Realtime avro to segment map: {}", _realtimeAvroToSegmentMap);

    executor.shutdown();
    executor.awaitTermination(10, TimeUnit.MINUTES);

    // Set up a Helix spectator to count the number of segments that are uploaded and unlock the latch once 12 segments are online
    final CountDownLatch latch = setupSegmentCountCountDownLatch("mytable", getOfflineSegmentCount());

    // Upload the offline segments
    int i = 0;
    for (String segmentName : _offlineTarDir.list()) {
        i++;
        LOGGER.info("Uploading segment {} : {}", i, segmentName);
        File file = new File(_offlineTarDir, segmentName);
        FileUploadUtils.sendSegmentFile("localhost", "8998", segmentName, new FileInputStream(file),
                file.length());
    }

    // Wait for all offline segments to be online
    latch.await();

    _compareStatusFileWriter = getLogWriter();
    _scanRspFileWriter = getScanRspRecordFileWriter();
    _compareStatusFileWriter.write("Start time:" + System.currentTimeMillis() + "\n");
    _compareStatusFileWriter.flush();
    startTimeMs = System.currentTimeMillis();
    LOGGER.info("Setup completed");
}

From source file:com.facebook.react.views.slider.ReactSlider.java

private void getImageFromUri(final String uri, final PictureSubscriber subscriber) {
    ImageRequest imageRequest = ImageRequestBuilder.newBuilderWithSource(Uri.parse(uri))
            .setAutoRotateEnabled(true).build();
    DataSource<CloseableReference<CloseableImage>> dataSource = Fresco.getImagePipeline()
            .fetchDecodedImage(imageRequest, null);
    Executor executor = Executors.newSingleThreadExecutor();
    dataSource.subscribe(new BaseBitmapDataSubscriber() {
        @Override//from   w w w .  j  a v a 2 s.  c o m
        public void onNewResultImpl(@Nullable Bitmap bitmap) {
            if (bitmap != null) {
                subscriber.doBitmap(bitmap);
            }
        }

        @Override
        public void onFailureImpl(DataSource dataSource) {
            Log.e("ReactSlider", String.format("onFailureImpl:uri-> %s is error", uri));
        }
    }, executor);
}

From source file:org.jboss.as.test.clustering.cluster.web.ClusteredWebSimpleTestCase.java

private void abstractGracefulServe(URL baseURL1, boolean undeployOnly) throws Exception {

    final DefaultHttpClient client = HttpClientUtils.relaxedCookieHttpClient();
    String url1 = baseURL1.toString() + "simple";

    // Make sure a normal request will succeed
    HttpResponse response = client.execute(new HttpGet(url1));
    Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
    response.getEntity().getContent().close();

    // Send a long request - in parallel
    String longRunningUrl = url1 + "?" + SimpleServlet.REQUEST_DURATION_PARAM + "=" + REQUEST_DURATION;
    ExecutorService executor = Executors.newSingleThreadExecutor();
    Future<HttpResponse> future = executor.submit(new RequestTask(client, longRunningUrl));

    // Make sure long request has started
    Thread.sleep(1000);/* w ww  .  j  a v  a  2 s . c om*/

    if (undeployOnly) {
        // Undeploy the app only.
        undeploy(DEPLOYMENT_1);
    } else {
        // Shutdown server.
        stop(CONTAINER_1);
    }

    // Get result of long request
    // This request should succeed since it initiated before server shutdown
    try {
        response = future.get();
        Assert.assertEquals("Request should succeed since it initiated before undeply or shutdown.",
                HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
        response.getEntity().getContent().close();
    } catch (ExecutionException e) {
        e.printStackTrace(System.err);
        Assert.fail(e.getCause().getMessage());
    }

    if (undeployOnly) {
        // If we are only undeploying, then subsequent requests should return 404.
        response = client.execute(new HttpGet(url1));
        Assert.assertEquals("If we are only undeploying, then subsequent requests should return 404.",
                HttpServletResponse.SC_NOT_FOUND, response.getStatusLine().getStatusCode());
        response.getEntity().getContent().close();
    }
}

From source file:io.fabric8.agent.DownloadManagerTest.java

/**
 * Prepares DownloadManager to test//from   w  w w  . j av a  2  s .c o m
 *
 * @param remoteRepo
 * @param settingsFile
 * @param props
 * @return
 * @throws IOException
 */
private DownloadManager createDownloadManager(String remoteRepo, String settingsFile, Properties props)
        throws IOException {
    File mavenSettings = new File(karafHome, settingsFile);
    Properties properties = new Properties();
    if (props != null) {
        properties.putAll(props);
    }
    properties.setProperty("org.ops4j.pax.url.mvn.localRepository", systemRepoUri);
    properties.setProperty("org.ops4j.pax.url.mvn.repositories", remoteRepo);
    properties.setProperty("org.ops4j.pax.url.mvn.defaultRepositories", systemRepoUri);
    PropertiesPropertyResolver propertyResolver = new PropertiesPropertyResolver(properties);
    MavenConfigurationImpl mavenConfiguration = new MavenConfigurationImpl(propertyResolver,
            "org.ops4j.pax.url.mvn");
    mavenConfiguration.setSettings(new MavenSettingsImpl(mavenSettings.toURI().toURL()));
    return new DownloadManager(mavenConfiguration, Executors.newSingleThreadExecutor());
}

From source file:com.github.kubernetes.java.client.live.KubernetesApiClientLiveTest.java

@Test
public void testCreatePod() throws Exception {
    log.info("Testing Pods ....");

    if (log.isDebugEnabled()) {
        log.debug("Creating a Pod " + pod);
    }/*from www .jav a2  s . c o  m*/
    Pod createPod = getClient().createPod(pod);
    assertEquals(pod.getId(), createPod.getId());
    assertNotNull(getClient().getPod(pod.getId()));
    assertEquals("Waiting", createPod.getCurrentState().getStatus());

    ExecutorService executor = Executors.newSingleThreadExecutor();
    Future<Pod> future = executor.submit(new Callable<Pod>() {
        public Pod call() throws Exception {
            Pod newPod;
            do {
                log.info("Waiting for Pod to be ready: " + pod.getId());
                Thread.sleep(1000);
                newPod = getClient().getPod(pod.getId());
                StateInfo info = newPod.getCurrentState().getInfo("master");
                if (info.getState("waiting") != null) {
                    throw new RuntimeException("Pod is waiting due to " + info.getState("waiting"));
                }
            } while (!"Running".equals(newPod.getCurrentState().getStatus()));
            return newPod;
        }
    });

    try {
        createPod = future.get(90, TimeUnit.SECONDS);
    } finally {
        executor.shutdownNow();
    }
    assertNotNull(createPod.getCurrentState().getInfo("master").getState("running"));
    assertNotNull(createPod.getCurrentState().getNetInfo().getState("running"));

    // test recreation from same id
    try {
        getClient().createPod(pod);
        fail("Should have thrown exception");
    } catch (Exception e) {
        // ignore
    }
    assertNotNull(getClient().getPod(pod.getId()));
}

From source file:com.netflix.curator.framework.recipes.locks.TestInterProcessMutexBase.java

@Test
public void testReentrant2Threads() throws Exception {
    CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), new RetryOneTime(1));
    client.start();//from   w w w  .  ja  v a2  s  . c  o m
    try {
        waitLatchForBar = new CountDownLatch(1);
        countLatchForBar = new CountDownLatch(1);

        final InterProcessLock mutex = makeLock(client);
        Executors.newSingleThreadExecutor().submit(new Callable<Object>() {
            @Override
            public Object call() throws Exception {
                Assert.assertTrue(countLatchForBar.await(10, TimeUnit.SECONDS));
                try {
                    mutex.acquire(10, TimeUnit.SECONDS);
                    Assert.fail();
                } catch (Exception e) {
                    // correct
                } finally {
                    waitLatchForBar.countDown();
                }
                return null;
            }
        });

        foo(mutex);
        Assert.assertFalse(mutex.isAcquiredInThisProcess());
    } finally {
        client.close();
    }
}

From source file:org.metaeffekt.dcc.agent.DccAgentTest.java

private static void startAgent() {

    agent = new DccAgent(port);

    ExecutorService executorService = Executors.newSingleThreadExecutor();

    final long begin = System.currentTimeMillis();

    executorService.execute(new Runnable() {

        @Override/*from  w ww . ja  v  a  2 s .c o  m*/
        public void run() {
            try {
                agent.start();
            } catch (Exception e) {
                throw new RuntimeException(e);
            }
        }
    });

    LOG.info("Waiting for agent to start");
    while (!agent.isStarted()) {

        if (System.currentTimeMillis() - begin > STARTUP_TIMEOUT) {
            throw new IllegalStateException("Failed to start agent. Timeout expired.");
        }

        try {
            Thread.sleep(500);
            LOG.info(".");
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
        }
    }
    LOG.info("");
    LOG.info("Agent is started");
}

From source file:org.jboss.as.test.clustering.cluster.web.DistributableTestCase.java

private void testGracefulServe(URL baseURL, Lifecycle lifecycle)
        throws URISyntaxException, IOException, InterruptedException {

    try (CloseableHttpClient client = TestHttpClientUtils.promiscuousCookieHttpClient()) {
        URI uri = SimpleServlet.createURI(baseURL);

        // Make sure a normal request will succeed
        HttpResponse response = client.execute(new HttpGet(uri));
        try {/*from   ww w  .  j  a v a  2s  .  c o m*/
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
        } finally {
            HttpClientUtils.closeQuietly(response);
        }

        // Send a long request - in parallel
        URI longRunningURI = SimpleServlet.createURI(baseURL, REQUEST_DURATION);
        ExecutorService executor = Executors.newSingleThreadExecutor();
        Future<HttpResponse> future = executor.submit(new RequestTask(client, longRunningURI));

        // Make sure long request has started
        Thread.sleep(1000);

        lifecycle.stop(NODE_1);

        // Get result of long request
        // This request should succeed since it initiated before server shutdown
        try {
            response = future.get();
            try {
                Assert.assertEquals("Request should succeed since it initiated before undeply or shutdown.",
                        HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            } finally {
                HttpClientUtils.closeQuietly(response);
            }
        } catch (ExecutionException e) {
            e.printStackTrace(System.err);
            Assert.fail(e.getCause().getMessage());
        }
    }
}

From source file:com.sap.research.connectivity.gw.GWOperationsUtils.java

public String getMetadataString(String url, String user, String pass, String host, String port, int timeOut)
        throws Exception {
    String returnString = "";

    try {//from   w ww.j  a  va2  s .  com
        String execArgs[] = new String[] { "java", "-jar",
                System.getProperty("user.home") + SEPARATOR + "appToRetrieveOdataMetadata.jar", url, user, pass,
                host, port };

        final Process theProcess = Runtime.getRuntime().exec(execArgs);

        Callable<String> call = new Callable<String>() {
            public String call() throws Exception {
                String returnString = "";
                try {
                    BufferedReader inStream = new BufferedReader(
                            new InputStreamReader(theProcess.getInputStream()));
                    returnString = IOUtils.toString(inStream);
                    IOUtils.closeQuietly(inStream);
                    //if (theProcess.exitValue() != 0)
                    theProcess.waitFor();
                } catch (InterruptedException e) {
                    throw new TimeoutException();
                    //log.severe("The call to the Gateway Service was interrupted.");
                }
                return returnString;
            }
        };

        final ExecutorService theExecutor = Executors.newSingleThreadExecutor();
        Future<String> futureResultOfCall = theExecutor.submit(call);
        try {
            returnString = futureResultOfCall.get(timeOut, TimeUnit.SECONDS);
        } catch (TimeoutException ex) {
            throw new TimeoutException(
                    "The Gateway Service call timed out. Please try again or check your settings.");
        } catch (ExecutionException ex) {
            throw new RuntimeException("The Gateway Service call did not complete due to an execution error. "
                    + ex.getCause().getLocalizedMessage());
        } finally {
            theExecutor.shutdownNow();
        }
    } catch (InterruptedException ex) {
        throw new InterruptedException(
                "The Gateway Service call did not complete due to an unexpected interruption.");
    } catch (IOException e) {
        throw new IOException("Error when retrieving metadata from the Gateway Service.");
    }

    return returnString;
}