Example usage for java.util.concurrent ExecutorService awaitTermination

List of usage examples for java.util.concurrent ExecutorService awaitTermination

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService awaitTermination.

Prototype

boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException;

Source Link

Document

Blocks until all tasks have completed execution after a shutdown request, or the timeout occurs, or the current thread is interrupted, whichever happens first.

Usage

From source file:org.apache.hadoop.hbase.client.ConnectionImplementation.java

private void shutdownBatchPool(ExecutorService pool) {
    pool.shutdown();/*from   ww  w  . j a  v a 2s  .  co m*/
    try {
        if (!pool.awaitTermination(10, TimeUnit.SECONDS)) {
            pool.shutdownNow();
        }
    } catch (InterruptedException e) {
        pool.shutdownNow();
    }
}

From source file:org.muehleisen.hannes.taxiapp.TaxiRoute.java

@Override
public void run() {
    log.info(this.getClass().getSimpleName() + " starting...");

    BlockingQueue<Runnable> taskQueue = new LinkedBlockingDeque<Runnable>(100);
    ExecutorService ex = new ThreadPoolExecutor(Runtime.getRuntime().availableProcessors(),
            Runtime.getRuntime().availableProcessors(), Integer.MAX_VALUE, TimeUnit.DAYS, taskQueue,
            new ThreadPoolExecutor.DiscardPolicy());
    // create rainbow table for driver lookup
    log.info("Creating driver license rainbow table.");
    RouteLogEntry.initLrt();//ww w  . ja v  a2  s. c  om

    // bring up routing service
    log.info("Bringing up OTP Graph Service from '" + graph + "'.");
    GraphServiceImpl graphService = new GraphServiceImpl();
    graphService.setPath(graph);
    graphService.startup();
    ps = new RetryingPathServiceImpl(graphService, new EarliestArrivalSPTService());

    // read taxi files
    log.info("Reading taxi files from '" + taxilog + "'.");
    Collection<File> files = FileUtils.listFiles(new File(taxilog), new SuffixFileFilter(".csv.zip"),
            TrueFileFilter.INSTANCE);
    for (File f : files) {
        log.info("Reading '" + f + "'.");
        try {
            ZipInputStream z = new ZipInputStream(new FileInputStream(f));
            z.getNextEntry(); // ZIP files have many entries. In this case,
                              // only one
            BufferedReader r = new BufferedReader(new InputStreamReader(z));
            r.readLine(); // header
            String line = null;
            while ((line = r.readLine()) != null) {
                RouteLogEntry rle = new RouteLogEntry(line);
                if (!rle.hasGeo()) {
                    continue;
                }
                while (taskQueue.remainingCapacity() < 1) {
                    Thread.sleep(100);
                }
                ex.submit(new RouteTask(rle));
            }
            r.close();
            z.close();
        } catch (Exception e) {
            log.error("Failed to read taxi file from '" + taxilog + "'.", e);
        }
    }
    ex.shutdown();
    try {
        ex.awaitTermination(Integer.MAX_VALUE, TimeUnit.DAYS);
    } catch (InterruptedException e) {
        // ...
    }
    log.info(deliveries);
}

From source file:com.fluidops.iwb.HTMLProvider.HTMLProvider.java

/**
 * HINT: The gather(List<Statement> res) method collects the statements
 * extracted by the provider. Use the following guidelinges:
 * /*from w ww  .  ja  v a  2 s .  c  o m*/
 * 1.) Make sure to have a clear documentation, structure, and
 * modularization. Use helper methods wherever possible to increase
 * readability of the method.
 * 
 * 2.) Whenever there is a need to create statements, use the helper methods
 * in {@link ProviderUtils}. This class helps you in generating "safe" URIs,
 * replacing invalid characters etc. It also offers common functionality for
 * filtering statements, e.g. removing statements containing null values.
 * 
 * 3.) Re-use existing ontologies! The {@link Vocabulary} class provides a
 * mix of vocabulary from common ontologies and can be easily extended. You
 * should not define URIs inside the provider itself, except these URIs are
 * absolutely provider-specific.
 * 
 * 4.) Concerning exception handling, it is best practice to throw
 * exceptions whenever the provider run cannot be finished in a regular way.
 * Since these exception will be propagated to the UI, it is recommended to
 * catch Exceptions locally first, log them, and wrap them into
 * (Runtime)Exceptions with a human-readable description. When logging
 * exceptions, the log level "warn" is appropriate.
 */
@Override
public void gather(List<Statement> res) throws Exception {
    URL registryUrl = new URL(config.location);
    HttpURLConnection registryConnection = (HttpURLConnection) registryUrl.openConnection();
    registryConnection.setRequestMethod("GET");

    // //////////////////////////////////////////////////////////////////////
    // /////////////////////////////////////////////////////////////// STEP
    // 1
    logger.info("Retrieving packages from CKAN...");

    if (registryConnection.getResponseCode() != HttpURLConnection.HTTP_OK) {
        String msg = "Connection with the registry could not be established. ("
                + registryConnection.getResponseCode() + ", " + registryConnection.getResponseMessage() + ")";
        logger.warn(msg);
        throw new RuntimeException(msg); // propagate to UI
    }

    String siteContent = GenUtil.readUrl(registryConnection.getInputStream());

    JSONObject groupAsJson = null;
    JSONArray packageListJsonArray = null;
    try {
        groupAsJson = new JSONObject(new JSONTokener(siteContent));
        packageListJsonArray = groupAsJson.getJSONArray("packages");
    } catch (JSONException e) {
        String msg = "Returned content " + siteContent
                + " is not valid JSON. Check if the registry URL is valid.";
        logger.warn(msg);
        throw new RuntimeException(msg); // propagate to UI
    }

    logger.info("-> found " + packageListJsonArray.length() + " packages");

    // //////////////////////////////////////////////////////////////////////
    // /////////////////////////////////////////////////////////////// STEP
    // 2
    logger.info("Registering LOD catalog in metadata repository");

    /**
     * HINT: the method createStatement allows to create statements if
     * subject, predicate and object are all known; use this method instead
     * of opening a value factory
     */
    res.add(ProviderUtils.createStatement(CKANVocabulary.CKAN_CATALOG, RDF.TYPE, Vocabulary.DCAT.CATALOG));
    res.add(ProviderUtils.createStatement(CKANVocabulary.CKAN_CATALOG, RDFS.LABEL,
            CKANVocabulary.CKAN_CATALOG_LABEL));

    logger.info("-> done");

    // //////////////////////////////////////////////////////////////////////
    // /////////////////////////////////////////////////////////////// STEP
    // 3
    logger.info("Extracting metdata for the individual data sets listed in CKAN");

    /**
     * HINT: Set up an Apache HTTP client with a manager for multiple
     * threads; as a general guideline, use parallelization whenever
     * crawling web sources!
     */
    MultiThreadedHttpConnectionManager connectionManager = new MultiThreadedHttpConnectionManager();
    HttpClient client = new HttpClient(connectionManager);
    ExecutorService pool = Executors.newFixedThreadPool(10);

    // we store the data in a temporary memory store, which allows us
    // to perform transformation on the result set
    Repository repository = null;
    RepositoryConnection connection = null;
    try {
        // initialize repository and connection
        repository = new SailRepository(new MemoryStore());
        repository.initialize();
        connection = repository.getConnection();

        // Fire up a thread for every package
        logger.info("-> Fire up threads for the individual packages...");
        for (int i = 0; i < packageListJsonArray.length(); i++) {
            // we use the JSON representation to get a base URI to resolve
            // relative
            // URIs in the XML later on. (and a fallback solution)
            String host = "http://www.ckan.net/package/" + packageListJsonArray.get(i).toString();
            String baseUri = findBaseUri(
                    "http://www.ckan.net/api/rest/package/" + packageListJsonArray.get(i).toString());
            baseUri = (baseUri == null) ? host : baseUri;
            pool.execute(new MetadataReader(client, host, baseUri, CKANVocabulary.CKAN_CATALOG, connection));
        }

        logger.info("-> Waiting for all tasks to complete (" + packageListJsonArray.length()
                + "tasks/data sources)...");
        pool.shutdown();
        pool.awaitTermination(4, TimeUnit.HOURS);

        /**
         * Now the extraction has finished, all statements are available in
         * our temporary repository. We apply some conversions and
         * transformations to align the extracted statements with our target
         * ontology.
         * 
         * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
         * !!!!!!!!!!!!! !!! NOTE: this code is /NOT/ best practice, we
         * should eventually extend !!! !!! ProviderUtils to deal with at
         * least lightweight transformations !!! !!! (such as changing
         * property names) or realize such tasks using !!! !!! an integrated
         * mapping framework. !!!
         * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
         * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
         */

        // Extraction from temporary repository, phase 1:
        logger.info(
                "-> Extract dcterms:title AS rdfs:label, dcterms:contributor AS dcterms:creator, and dcterms:rights AS dcterms:license");
        String mappingQuery = mappingQuery();
        GraphQuery mappingGraphQuery = connection.prepareGraphQuery(QueryLanguage.SPARQL, mappingQuery);
        GraphQueryResult result = mappingGraphQuery.evaluate();

        logger.info("-> Appending extracted result to statement list");
        ProviderUtils.appendGraphQueryResultToListAndClose(result, res);

        // Label the distribution nodes
        logger.info("-> Generate labels for distributions");
        String labelDistributionQuery = labelDistributionQuery();
        GraphQuery labelDistributionGraphQuery = connection.prepareGraphQuery(QueryLanguage.SPARQL,
                labelDistributionQuery);
        GraphQueryResult result2 = labelDistributionGraphQuery.evaluate();

        logger.info("-> Appending extracted result to statement list");
        ProviderUtils.appendGraphQueryResultToListAndClose(result2, res);

        // Extraction from temporary repository, phase 2:
        logger.info("-> Deleting previously extracted triples and additional, not required information...");
        String deleteQuery = deleteQuery();
        Update deleteGraphQuery = connection.prepareUpdate(QueryLanguage.SPARQL, deleteQuery);
        deleteGraphQuery.execute();

        // Extraction from temporary repository, phase 3:
        logger.info("-> Deleting dcat:distribution and dcat:accessUrl information from"
                + "temp repository for which format information is missing...");
        String cleanDistQuery = cleanDistQuery();
        Update cleanupGraphQuery = connection.prepareUpdate(QueryLanguage.SPARQL, cleanDistQuery);
        cleanupGraphQuery.execute();

        logger.info("-> Appending remaining statements to result...");
        connection.getStatements(null, null, null, false).addTo(res);

        logger.info("Provider run finished successfully");
    } catch (Exception e) {
        logger.warn(e.getMessage());
        throw new RuntimeException(e);
    } finally {
        if (connection != null)
            connection.close();
        if (repository != null)
            repository.shutDown();
    }

    // in the end, make sure there are no statements containing null in
    // any of the position (did not take special care when creating
    // statements)
    logger.info("-> cleaning up null statements");
    res = ProviderUtils.filterNullStatements(res);
}

From source file:com.enigmastation.ml.bayes.CorpusTest.java

@Test(groups = { "fulltest" })
public void testCorpus() throws URISyntaxException, IOException, InterruptedException {
    final Classifier classifier = new FisherClassifierImpl();
    ExecutorService service = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors());
    // first we expand the test dataset
    URL resource = this.getClass().getResource("/src/test/resources/publiccorpus");
    File resourceFile = new File(resource.toURI());
    String[] dataFileNames = resourceFile.list(new FilenameFilter() {
        @Override/*from w  ww. j a  v  a2 s  .  c  om*/
        public boolean accept(File dir, String name) {
            return name.endsWith(".bz2");
        }
    });

    List<String> directories = new ArrayList<String>();
    final List<File> trainingFiles = new ArrayList<>();

    for (String fileName : dataFileNames) {
        directories.add(expandFile(fileName));
    }
    // collect every name, plus mark to delete on exit
    for (String inputDirectory : directories) {
        URL url = this.getClass().getResource(inputDirectory);
        File[] dataFiles = new File(url.toURI()).listFiles();
        for (File f : dataFiles) {
            handleFiles(f, trainingFiles);
        }
    }
    long startTime = System.currentTimeMillis();
    final int[] counter = { 0 };
    final int[] marker = { 0 };
    // now let's walk through a training cycle
    for (final File file : trainingFiles) {
        service.submit(new Runnable() {
            @Override
            public void run() {
                if ((++marker[0]) % 100 == 0) {
                    System.out.println("Progress training: " + marker[0] + " of " + trainingFiles.size());
                }
                if (counter[0] > 2) {
                    try {
                        trainWith(classifier, file);
                    } catch (IOException e) {
                        throw new RuntimeException(e);
                    }
                }
                counter[0] = (counter[0] + 1) % 10;
            }
        });
    }
    service.shutdown();
    service.awaitTermination(2, TimeUnit.HOURS);
    service = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors());
    long endTime = System.currentTimeMillis();
    System.out.printf("Training took %d ms%n", (endTime - startTime));
    startTime = System.currentTimeMillis();
    marker[0] = 0;
    // now test against the training data
    for (final File file : trainingFiles) {
        service.submit(new Runnable() {
            public void run() {
                if ((++marker[0]) % 100 == 0) {
                    System.out.println("Progress evaluating: " + marker[0] + " of " + trainingFiles.size());
                }
                if (counter[0] < 3) {
                    try {
                        classify(classifier, file);
                    } catch (IOException e) {
                        throw new RuntimeException(e);
                    }
                }
                counter[0] = (counter[0] + 1) % 10;
            }
        });
    }
    service.shutdown();
    service.awaitTermination(2, TimeUnit.HOURS);
    endTime = System.currentTimeMillis();
    System.out.printf("Training accuracy: %d tests, %f%% accuracy%n", tests, (hits * 100.0) / tests);
    System.out.printf("Training took %d ms%n", (endTime - startTime));
}

From source file:org.sakaiproject.contentreview.turnitin.oc.ContentReviewServiceTurnitinOC.java

public void processQueue() {
    log.info("Processing Turnitin OC submission queue");
    // Create new session object to ensure permissions are carried correctly to each new thread
    final Session session = sessionManager.getCurrentSession();
    ExecutorService executor = Executors.newFixedThreadPool(2);
    executor.execute(new Runnable() {
        @Override/*from   w  w w.j  a  v  a2 s .  c  o  m*/
        public void run() {
            sessionManager.setCurrentSession(session);
            processUnsubmitted();
        }
    });
    executor.execute(new Runnable() {
        @Override
        public void run() {
            sessionManager.setCurrentSession(session);
            checkForReport();
        }
    });
    executor.shutdown();
    // wait:
    try {
        if (!executor.awaitTermination(30, TimeUnit.MINUTES)) {
            log.error("ContentReviewServiceTurnitinOC.processQueue: time out waiting for executor to complete");
        }
    } catch (InterruptedException e) {
        log.error(e.getMessage(), e);
    }
}

From source file:org.wso2.appserver.integration.lazy.loading.artifacts.WebApplicationGhostDeploymentTestCase.java

@Test(groups = "wso2.as.lazy.loading", description = "Send concurrent requests  when tenant context is not loaded."
        + "All request should  get expected output", dependsOnMethods = "testTenantUnloadInIdleTimeAfterWebAPPUsageInGhostDeployment", enabled = false)
public void testConcurrentWebAPPInvocationsWhenTenantContextNotLoadedInGhostDeployment() throws Exception {
    serverManager.restartGracefully();/*www  . ja  v  a  2 s.co m*/
    assertFalse(getTenantStatus(tenantDomain1).isTenantContextLoaded(),
            "Tenant context is  loaded before access. Tenant name: " + tenantDomain1);
    ExecutorService executorService = Executors.newFixedThreadPool(CONCURRENT_THREAD_COUNT);
    log.info("Concurrent invocation Start");
    log.info("Expected Response Data:" + WEB_APP1_RESPONSE);
    for (int i = 0; i < CONCURRENT_THREAD_COUNT; i++) {
        final int requestId = i;
        executorService.execute(new Runnable() {

            public void run() {
                HttpResponse httpResponse = null;
                try {
                    httpResponse = HttpURLConnectionClient.sendGetRequest(tenant1WebApp1URL, null);
                } catch (IOException e) {
                    log.error("Error  when sending a  get request  for :" + tenant1WebApp1URL, e);
                }
                synchronized (this) {
                    String responseDetailedInfo;
                    String responseData;
                    if (httpResponse != null) {
                        responseDetailedInfo = "Request ID " + requestId + "Response Data :"
                                + httpResponse.getData() + "\tResponse Code:" + httpResponse.getResponseCode();
                        responseData = httpResponse.getData();
                    } else {
                        responseDetailedInfo = "Request ID " + requestId
                                + "Response Data : NULL Object return from " + "HttpURLConnectionClient";
                        responseData = "NULL Object return";
                    }
                    responseDataList.add(responseData);
                    log.info(responseDetailedInfo);
                    responseDetailedInfoList.add(responseDetailedInfo);
                }
            }

        });
    }
    executorService.shutdown();
    executorService.awaitTermination(5, TimeUnit.MINUTES);
    log.info("Concurrent invocation End");

    int correctResponseCount = 0;
    for (String responseData : responseDataList) {
        if (WEB_APP1_RESPONSE.equals(responseData)) {
            correctResponseCount += 1;
        }
    }
    StringBuilder allDetailResponseStringBuffer = new StringBuilder();
    allDetailResponseStringBuffer.append("\n");

    for (String responseInfo : responseDetailedInfoList) {
        allDetailResponseStringBuffer.append(responseInfo);
        allDetailResponseStringBuffer.append("\n");
    }
    String allDetailResponse = allDetailResponseStringBuffer.toString();
    WebAppStatusBean webAppStatusTenant1WebApp1 = getWebAppStatus(tenantDomain1, WEB_APP_FILE_NAME1);
    assertTrue(webAppStatusTenant1WebApp1.getTenantStatus().isTenantContextLoaded(),
            " Tenant Context is" + " not loaded. Tenant:" + tenantDomain1);
    assertTrue(webAppStatusTenant1WebApp1.isWebAppStarted(),
            "Web-App: " + WEB_APP_FILE_NAME1 + " is not" + " started in Tenant:" + tenantDomain1);
    assertFalse(webAppStatusTenant1WebApp1.isWebAppGhost(), "Web-App: " + WEB_APP_FILE_NAME1 + " is in "
            + "ghost mode after invoking in Tenant:" + tenantDomain1);
    assertEquals(correctResponseCount, CONCURRENT_THREAD_COUNT,
            allDetailResponse + "All the concurrent requests " + "not get correct response.");
}

From source file:com.wavemaker.tools.apidocs.tools.spring.SpringSwaggerParserTest.java

@Test
public void testMultiThread3() throws InterruptedException {
    ExecutorService service = Executors.newFixedThreadPool(4);
    List<Class<?>> controllerClasses = new ArrayList<>();
    controllerClasses.add(VacationController.class);
    controllerClasses.add(com.wavemaker.tools.apidocs.tools.spring.controller2.VacationController.class);
    final Pattern namePattern = Pattern.compile("(\\w)*.(\\w*)$");
    for (int i = 0; i < 5; i++) {
        for (final Class<?> controllerClass : controllerClasses) {
            final int finalI = i;
            service.execute(new Runnable() {
                public void run() {
                    Swagger swagger;//from w ww.j  av a  2s .c  o m
                    try {
                        swagger = runForSingleClass(controllerClass);
                    } catch (SwaggerParserException e) {
                        throw new RuntimeException("Exception while parsing class:" + controllerClass.getName(),
                                e);
                    }
                    Assert.assertNotNull(swagger);
                    assertEquals(1, swagger.getTags().size());
                    assertEquals(controllerClass.getName(), swagger.getTags().get(0).getFullyQualifiedName());
                    try {
                        String name = controllerClass.getName();
                        Matcher nameMatcher = namePattern.matcher(name);
                        if (nameMatcher.find()) {
                            name = nameMatcher.group(0);
                        }
                        name = name.replace('.', '_');

                        writeToFile(swagger, "mul_package_class_" + name + "_" + finalI + "" + ".json");
                    } catch (IOException e) {
                        throw new RuntimeException("Error while writing to file", e);
                    }
                }
            });
        }
    }

    service.shutdown();
    service.awaitTermination(10, TimeUnit.SECONDS);
}

From source file:org.apache.reef.io.network.NetworkConnectionServiceTest.java

/**
 * NetworkService messaging rate benchmark.
 *///from   w ww .  j a v  a 2s.  co  m
@Test
public void testMessagingNetworkConnServiceRateDisjoint() throws Exception {

    Assume.assumeFalse("Use log level INFO to run benchmarking", LOG.isLoggable(Level.FINEST));

    LOG.log(Level.FINEST, name.getMethodName());

    final BlockingQueue<Object> barrier = new LinkedBlockingQueue<>();

    final int numThreads = 4;
    final int size = 2000;
    final int numMessages = 300000 / (Math.max(1, size / 512));
    final int totalNumMessages = numMessages * numThreads;
    final String message = StringUtils.repeat('1', size);

    final ExecutorService e = Executors.newCachedThreadPool();
    for (int t = 0; t < numThreads; t++) {
        final int tt = t;

        e.submit(new Runnable() {
            public void run() {
                try (final NetworkMessagingTestService messagingTestService = new NetworkMessagingTestService(
                        localAddress)) {
                    final Monitor monitor = new Monitor();
                    final Codec<String> codec = new StringCodec();

                    messagingTestService.registerTestConnectionFactory(groupCommClientId, numMessages, monitor,
                            codec);
                    try (final Connection<String> conn = messagingTestService
                            .getConnectionFromSenderToReceiver(groupCommClientId)) {
                        try {
                            conn.open();
                            for (int count = 0; count < numMessages; ++count) {
                                // send messages to the receiver.
                                conn.write(message);
                            }
                            monitor.mwait();
                        } catch (final NetworkException e) {
                            e.printStackTrace();
                            throw new RuntimeException(e);
                        }
                    }
                } catch (final Exception e) {
                    throw new RuntimeException(e);
                }
            }
        });
    }

    // start and time
    final long start = System.currentTimeMillis();
    final Object ignore = new Object();
    for (int i = 0; i < numThreads; i++) {
        barrier.add(ignore);
    }
    e.shutdown();
    e.awaitTermination(100, TimeUnit.SECONDS);
    final long end = System.currentTimeMillis();
    final double runtime = ((double) end - start) / 1000;
    LOG.log(Level.INFO, "size: " + size + "; messages/s: " + totalNumMessages / runtime
            + " bandwidth(bytes/s): " + ((double) totalNumMessages * 2 * size) / runtime); // x2 for unicode chars
}

From source file:org.wso2.appserver.integration.lazy.loading.artifacts.WebApplicationGhostDeploymentTestCase.java

@Test(groups = "wso2.as.lazy.loading", description = "Send concurrent requests  when tenant context is loaded."
        + " But Web-App is in Ghost form. All request should  get expected output", dependsOnMethods = "testConcurrentWebAPPInvocationsWhenTenantContextNotLoadedInGhostDeployment", enabled = false)
public void testConcurrentWebAPPInvocationsWhenTenantContextLoadedInGhostDeployment() throws Exception {
    //This test method case disable because of CARBON-15270
    serverManager.restartGracefully();/*from www  .  ja  v  a2s .  c  o  m*/
    responseDataList.clear();
    responseDetailedInfoList.clear();
    assertFalse(getTenantStatus(tenantDomain1).isTenantContextLoaded(),
            "Tenant context is  loaded before access. Tenant name: " + tenantDomain1);

    HttpResponse httpResponseApp2 = HttpURLConnectionClient.sendGetRequest(tenant1WebApp2URL, null);
    assertTrue(httpResponseApp2.getData().contains(WEB_APP2_RESPONSE),
            "Invocation of Web-App fail :" + tenant1WebApp2URL);
    assertTrue(getTenantStatus(tenantDomain1).isTenantContextLoaded(),
            "Tenant context is  not loaded after access. Tenant name: " + tenantDomain1);

    WebAppStatusBean webAppStatusTenant1WebApp2 = getWebAppStatus(tenantDomain1, WEB_APP_FILE_NAME2);
    assertTrue(webAppStatusTenant1WebApp2.isWebAppStarted(),
            "Web-App: " + WEB_APP_FILE_NAME2 + " is not started in Tenant:" + tenantDomain1);
    assertFalse(webAppStatusTenant1WebApp2.isWebAppGhost(),
            "Web-App: " + WEB_APP_FILE_NAME2 + " is in ghost mode after invoking in Tenant:" + tenantDomain1);

    WebAppStatusBean webAppStatusTenant1WebApp1 = getWebAppStatus(tenantDomain1, WEB_APP_FILE_NAME1);
    assertTrue(webAppStatusTenant1WebApp1.isWebAppStarted(),
            "Web-App: " + WEB_APP_FILE_NAME1 + " is not started in Tenant:" + tenantDomain1);
    assertTrue(webAppStatusTenant1WebApp1.isWebAppGhost(), "Web-App: " + WEB_APP_FILE_NAME1
            + " is in not ghost mode before invoking in Tenant:" + tenantDomain1);

    ExecutorService executorService = Executors.newFixedThreadPool(CONCURRENT_THREAD_COUNT);
    log.info("Concurrent invocation Start");
    log.info("Expected Response Data:" + WEB_APP1_RESPONSE);
    for (int i = 0; i < CONCURRENT_THREAD_COUNT; i++) {
        final int requestId = i;
        executorService.execute(new Runnable() {

            public void run() {
                HttpResponse httpResponse = null;
                try {
                    httpResponse = HttpURLConnectionClient.sendGetRequest(tenant1WebApp1URL, null);
                } catch (IOException e) {
                    log.error("Error  when sending a  get request  for :" + tenant1WebApp1URL, e);
                }
                synchronized (this) {
                    String responseDetailedInfo;
                    String responseData;
                    if (httpResponse != null) {
                        responseDetailedInfo = "Request ID " + requestId + "Response Data :"
                                + httpResponse.getData() + "\tResponse Code:" + httpResponse.getResponseCode();
                        responseData = httpResponse.getData();
                    } else {
                        responseDetailedInfo = "Request ID " + requestId
                                + "Response Data : NULL Object return from " + "HttpURLConnectionClient";
                        responseData = "NULL Object return";
                    }
                    responseDataList.add(responseData);
                    log.info(responseDetailedInfo);
                    responseDetailedInfoList.add(responseDetailedInfo);
                }
            }

        });
    }
    executorService.shutdown();
    executorService.awaitTermination(5, TimeUnit.MINUTES);
    log.info("Concurrent invocation End");
    int correctResponseCount = 0;
    for (String responseData : responseDataList) {
        if (WEB_APP1_RESPONSE.equals(responseData)) {
            correctResponseCount += 1;
        }
    }
    StringBuilder allDetailResponseStringBuffer = new StringBuilder();
    allDetailResponseStringBuffer.append("\n");
    for (String responseInfo : responseDetailedInfoList) {
        allDetailResponseStringBuffer.append(responseInfo);
        allDetailResponseStringBuffer.append("\n");
    }
    String allDetailResponse = allDetailResponseStringBuffer.toString();
    webAppStatusTenant1WebApp1 = getWebAppStatus(tenantDomain1, WEB_APP_FILE_NAME1);
    assertTrue(webAppStatusTenant1WebApp1.getTenantStatus().isTenantContextLoaded(),
            " Tenant Context " + "is not loaded. Tenant:" + tenantDomain1);
    assertTrue(webAppStatusTenant1WebApp1.isWebAppStarted(),
            "Web-App: " + WEB_APP_FILE_NAME1 + " is not started in Tenant:" + tenantDomain1);
    assertFalse(webAppStatusTenant1WebApp1.isWebAppGhost(),
            "Web-App: " + WEB_APP_FILE_NAME1 + " is in ghost mode after invoking in Tenant:" + tenantDomain1);
    assertEquals(correctResponseCount, CONCURRENT_THREAD_COUNT,
            allDetailResponse + "All the concurrent" + " requests not get correct response.");

}

From source file:org.apache.hadoop.hbase.master.assignment.SplitTableRegionProcedure.java

/**
 * Create Split directory//from  w w w . j a  v  a 2s  .  c  o m
 * @param env MasterProcedureEnv
 * @throws IOException
 */
private Pair<Integer, Integer> splitStoreFiles(final MasterProcedureEnv env, final HRegionFileSystem regionFs)
        throws IOException {
    final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
    final Configuration conf = env.getMasterConfiguration();
    // The following code sets up a thread pool executor with as many slots as
    // there's files to split. It then fires up everything, waits for
    // completion and finally checks for any exception
    //
    // Note: splitStoreFiles creates daughter region dirs under the parent splits dir
    // Nothing to unroll here if failure -- re-run createSplitsDir will
    // clean this up.
    int nbFiles = 0;
    final Map<String, Collection<StoreFileInfo>> files = new HashMap<String, Collection<StoreFileInfo>>(
            regionFs.getFamilies().size());
    for (String family : regionFs.getFamilies()) {
        Collection<StoreFileInfo> sfis = regionFs.getStoreFiles(family);
        if (sfis == null)
            continue;
        Collection<StoreFileInfo> filteredSfis = null;
        for (StoreFileInfo sfi : sfis) {
            // Filter. There is a lag cleaning up compacted reference files. They get cleared
            // after a delay in case outstanding Scanners still have references. Because of this,
            // the listing of the Store content may have straggler reference files. Skip these.
            // It should be safe to skip references at this point because we checked above with
            // the region if it thinks it is splittable and if we are here, it thinks it is
            // splitable.
            if (sfi.isReference()) {
                LOG.info("Skipping split of " + sfi + "; presuming ready for archiving.");
                continue;
            }
            if (filteredSfis == null) {
                filteredSfis = new ArrayList<StoreFileInfo>(sfis.size());
                files.put(family, filteredSfis);
            }
            filteredSfis.add(sfi);
            nbFiles++;
        }
    }
    if (nbFiles == 0) {
        // no file needs to be splitted.
        return new Pair<Integer, Integer>(0, 0);
    }
    // Max #threads is the smaller of the number of storefiles or the default max determined above.
    int maxThreads = Math.min(
            conf.getInt(HConstants.REGION_SPLIT_THREADS_MAX,
                    conf.getInt(HStore.BLOCKING_STOREFILES_KEY, HStore.DEFAULT_BLOCKING_STOREFILE_COUNT)),
            nbFiles);
    LOG.info("pid=" + getProcId() + " splitting " + nbFiles + " storefiles, region="
            + getParentRegion().getShortNameToLog() + ", threads=" + maxThreads);
    final ExecutorService threadPool = Executors.newFixedThreadPool(maxThreads,
            Threads.getNamedThreadFactory("StoreFileSplitter-%1$d"));
    final List<Future<Pair<Path, Path>>> futures = new ArrayList<Future<Pair<Path, Path>>>(nbFiles);

    // Split each store file.
    final TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName());
    for (Map.Entry<String, Collection<StoreFileInfo>> e : files.entrySet()) {
        byte[] familyName = Bytes.toBytes(e.getKey());
        final ColumnFamilyDescriptor hcd = htd.getColumnFamily(familyName);
        final Collection<StoreFileInfo> storeFiles = e.getValue();
        if (storeFiles != null && storeFiles.size() > 0) {
            final CacheConfig cacheConf = new CacheConfig(conf, hcd);
            for (StoreFileInfo storeFileInfo : storeFiles) {
                StoreFileSplitter sfs = new StoreFileSplitter(regionFs, familyName, new HStoreFile(
                        mfs.getFileSystem(), storeFileInfo, conf, cacheConf, hcd.getBloomFilterType(), true));
                futures.add(threadPool.submit(sfs));
            }
        }
    }
    // Shutdown the pool
    threadPool.shutdown();

    // Wait for all the tasks to finish
    long fileSplitTimeout = conf.getLong("hbase.master.fileSplitTimeout", 30000);
    try {
        boolean stillRunning = !threadPool.awaitTermination(fileSplitTimeout, TimeUnit.MILLISECONDS);
        if (stillRunning) {
            threadPool.shutdownNow();
            // wait for the thread to shutdown completely.
            while (!threadPool.isTerminated()) {
                Thread.sleep(50);
            }
            throw new IOException(
                    "Took too long to split the" + " files and create the references, aborting split");
        }
    } catch (InterruptedException e) {
        throw (InterruptedIOException) new InterruptedIOException().initCause(e);
    }

    int daughterA = 0;
    int daughterB = 0;
    // Look for any exception
    for (Future<Pair<Path, Path>> future : futures) {
        try {
            Pair<Path, Path> p = future.get();
            daughterA += p.getFirst() != null ? 1 : 0;
            daughterB += p.getSecond() != null ? 1 : 0;
        } catch (InterruptedException e) {
            throw (InterruptedIOException) new InterruptedIOException().initCause(e);
        } catch (ExecutionException e) {
            throw new IOException(e);
        }
    }

    if (LOG.isDebugEnabled()) {
        LOG.debug("pid=" + getProcId() + " split storefiles for region " + getParentRegion().getShortNameToLog()
                + " Daughter A: " + daughterA + " storefiles, Daughter B: " + daughterB + " storefiles.");
    }
    return new Pair<Integer, Integer>(daughterA, daughterB);
}