Example usage for java.util.concurrent ExecutorService awaitTermination

List of usage examples for java.util.concurrent ExecutorService awaitTermination

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService awaitTermination.

Prototype

boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException;

Source Link

Document

Blocks until all tasks have completed execution after a shutdown request, or the timeout occurs, or the current thread is interrupted, whichever happens first.

Usage

From source file:com.gs.collections.impl.parallel.SerialParallelLazyPerformanceTest.java

private void groupBy(UnifiedSet<String> words) {
    MutableList<Runnable> runnables = FastList.newList();
    runnables.add(() -> this.basicSerialGroupByPerformance(words, SERIAL_RUN_COUNT));
    int cores = Runtime.getRuntime().availableProcessors();
    ExecutorService service = Executors.newFixedThreadPool(cores);
    runnables.add(() -> this.basicParallelLazyGroupByPerformance(words, "Lambda", ALPHAGRAM_LAMBDA,
            PARALLEL_RUN_COUNT, cores, service));
    runnables.add(() -> this.basicParallelLazyGroupByPerformance(words, "Function", ALPHAGRAM_FUNCTION,
            PARALLEL_RUN_COUNT, cores, service));
    runnables.add(() -> this.basicParallelLazyGroupByPerformance(words, "MethodRef", ALPHAGRAM_METHOD_REF,
            PARALLEL_RUN_COUNT, cores, service));
    runnables.add(() -> this.basicParallelLazyJava8GroupByPerformance(words, "Lambda", JAVA_ALPHAGRAM_LAMBDA,
            PARALLEL_RUN_COUNT));/*w ww  . j  ava  2 s  .  co  m*/
    Set<String> hashSet = new HashSet<>(words);
    runnables.add(() -> this.basicParallelLazyJava8GroupByPerformance(hashSet, "Lambda", JAVA_ALPHAGRAM_LAMBDA,
            PARALLEL_RUN_COUNT));
    runnables.add(() -> this.basicParallelLazyJava8GroupByPerformance(hashSet, "Function",
            JAVA_ALPHAGRAM_FUNCTION, PARALLEL_RUN_COUNT));
    runnables.add(() -> this.basicParallelLazyJava8GroupByPerformance(hashSet, "MethodRef",
            JAVA_ALPHAGRAM_METHOD_REF, PARALLEL_RUN_COUNT));
    this.shuffleAndRun(runnables);
    service.shutdown();
    try {
        service.awaitTermination(1, TimeUnit.MINUTES);
    } catch (InterruptedException e) {
        throw new RuntimeException(e);
    }
}

From source file:com.gs.collections.impl.parallel.SerialParallelLazyPerformanceTest.java

private void collect(FastList<Integer> collection) {
    MutableList<Runnable> runnables = FastList.newList();
    runnables.add(() -> this.basicSerialCollectPerformance(collection, SERIAL_RUN_COUNT));
    int cores = Runtime.getRuntime().availableProcessors();
    ExecutorService service = Executors.newFixedThreadPool(cores);
    runnables.add(() -> this.basicParallelLazyCollectPerformance(collection, "Lambda", FUNCTIONS_LAMBDA,
            PARALLEL_RUN_COUNT, cores, service));
    runnables.add(() -> this.basicParallelLazyCollectPerformance(collection, "Function", FUNCTIONS,
            PARALLEL_RUN_COUNT, cores, service));
    runnables.add(() -> this.basicParallelLazyCollectPerformance(collection, "MethodRef", FUNCTIONS_METHOD_REF,
            PARALLEL_RUN_COUNT, cores, service));
    runnables.add(() -> this.basicJava8ParallelLazyCollectPerformance(collection, "Lambda",
            JAVA_FUNCTIONS_LAMBDA, PARALLEL_RUN_COUNT));
    List<Integer> arrayList = new ArrayList<>(collection);
    runnables.add(() -> this.basicJava8ParallelLazyCollectPerformance(arrayList, "Lambda",
            JAVA_FUNCTIONS_LAMBDA, PARALLEL_RUN_COUNT));
    runnables.add(() -> this.basicJava8ParallelLazyCollectPerformance(arrayList, "Function", JAVA_FUNCTIONS,
            PARALLEL_RUN_COUNT));/*from  w w w .jav  a  2  s  .c o m*/
    runnables.add(() -> this.basicJava8ParallelLazyCollectPerformance(arrayList, "MethodRef",
            JAVA_FUNCTIONS_METHOD_REF, PARALLEL_RUN_COUNT));
    this.shuffleAndRun(runnables);
    service.shutdown();
    try {
        service.awaitTermination(1, TimeUnit.MINUTES);
    } catch (InterruptedException e) {
        throw new RuntimeException(e);
    }
}

From source file:com.gs.collections.impl.parallel.SerialParallelLazyPerformanceTest.java

private void select(FastList<Integer> collection) {
    MutableList<Runnable> runnables = FastList.newList();
    runnables.add(() -> this.basicSerialSelectPerformance(collection, PREDICATES_LAMBDA, SERIAL_RUN_COUNT));
    int cores = Runtime.getRuntime().availableProcessors();
    ExecutorService service = Executors.newFixedThreadPool(cores);
    runnables.add(() -> this.basicParallelLazySelectPerformance(collection, "Lambda", PREDICATES_LAMBDA,
            PARALLEL_RUN_COUNT, cores, service));
    runnables.add(() -> this.basicParallelLazySelectPerformance(collection, "Predicate", PREDICATES,
            PARALLEL_RUN_COUNT, cores, service));
    runnables.add(() -> this.basicParallelLazySelectPerformance(collection, "MethodRef", PREDICATES_METHOD_REF,
            PARALLEL_RUN_COUNT, cores, service));
    runnables.add(() -> this.basicJava8ParallelLazySelectPerformance(collection, "Lambda",
            JAVA_PREDICATES_LAMBDA, PARALLEL_RUN_COUNT));
    List<Integer> arrayList = new ArrayList<>(collection);
    runnables.add(() -> this.basicJava8ParallelLazySelectPerformance(arrayList, "Lambda",
            JAVA_PREDICATES_LAMBDA, PARALLEL_RUN_COUNT));
    runnables.add(() -> this.basicJava8ParallelLazySelectPerformance(arrayList, "Predicate", JAVA_PREDICATES,
            PARALLEL_RUN_COUNT));/*from  w w  w .  ja  v a  2 s.c o  m*/
    runnables.add(() -> this.basicJava8ParallelLazySelectPerformance(arrayList, "MethodRef",
            JAVA_PREDICATES_METHOD_REF, PARALLEL_RUN_COUNT));
    this.shuffleAndRun(runnables);
    service.shutdown();
    try {
        service.awaitTermination(1, TimeUnit.MINUTES);
    } catch (InterruptedException e) {
        throw new RuntimeException(e);
    }
}

From source file:com.gs.collections.impl.parallel.SerialParallelLazyPerformanceTest.java

private void reject(FastList<Integer> collection) {
    MutableList<Runnable> runnables = FastList.newList();
    runnables.add(() -> this.basicSerialRejectPerformance(collection, PREDICATES_LAMBDA, SERIAL_RUN_COUNT));
    int cores = Runtime.getRuntime().availableProcessors();
    ExecutorService service = Executors.newFixedThreadPool(cores);
    runnables.add(() -> this.basicParallelLazyRejectPerformance(collection, "Lambda", PREDICATES_LAMBDA,
            PARALLEL_RUN_COUNT, cores, service));
    runnables.add(() -> this.basicParallelLazyRejectPerformance(collection, "Predicate", PREDICATES,
            PARALLEL_RUN_COUNT, cores, service));
    runnables.add(() -> this.basicParallelLazyRejectPerformance(collection, "MethodRef", PREDICATES_METHOD_REF,
            PARALLEL_RUN_COUNT, cores, service));
    runnables.add(() -> this.basicJava8ParallelLazyRejectPerformance(collection, "Lambda",
            JAVA_PREDICATES_LAMBDA, PARALLEL_RUN_COUNT));
    List<Integer> arrayList = new ArrayList<>(collection);
    runnables.add(() -> this.basicJava8ParallelLazyRejectPerformance(arrayList, "Lambda",
            JAVA_PREDICATES_LAMBDA, PARALLEL_RUN_COUNT));
    runnables.add(() -> this.basicJava8ParallelLazyRejectPerformance(arrayList, "Predicate", JAVA_PREDICATES,
            PARALLEL_RUN_COUNT));//from w  ww.  j  a v  a  2 s.  c  om
    runnables.add(() -> this.basicJava8ParallelLazyRejectPerformance(arrayList, "MethodRef",
            JAVA_PREDICATES_METHOD_REF, PARALLEL_RUN_COUNT));
    this.shuffleAndRun(runnables);
    service.shutdown();
    try {
        service.awaitTermination(1, TimeUnit.MINUTES);
    } catch (InterruptedException e) {
        throw new RuntimeException(e);
    }
}

From source file:org.apache.hadoop.hbase.regionserver.TestHStore.java

@Test
public void testCreateScannerAndSnapshotConcurrently() throws IOException, InterruptedException {
    Configuration conf = HBaseConfiguration.create();
    conf.set(HStore.MEMSTORE_CLASS_NAME, MyCompactingMemStore.class.getName());
    init(name.getMethodName(), conf, ColumnFamilyDescriptorBuilder.newBuilder(family)
            .setInMemoryCompaction(MemoryCompactionPolicy.BASIC).build());
    byte[] value = Bytes.toBytes("value");
    MemStoreSize memStoreSize = new MemStoreSize();
    long ts = EnvironmentEdgeManager.currentTime();
    long seqId = 100;
    // older data whihc shouldn't be "seen" by client
    store.add(createCell(qf1, ts, seqId, value), memStoreSize);
    store.add(createCell(qf2, ts, seqId, value), memStoreSize);
    store.add(createCell(qf3, ts, seqId, value), memStoreSize);
    TreeSet<byte[]> quals = new TreeSet<>(Bytes.BYTES_COMPARATOR);
    quals.add(qf1);/*from  www  .ja va2s  .c o  m*/
    quals.add(qf2);
    quals.add(qf3);
    StoreFlushContext storeFlushCtx = store.createFlushContext(id++);
    MyCompactingMemStore.START_TEST.set(true);
    Runnable flush = () -> {
        // this is blocked until we create first scanner from pipeline and snapshot -- phase (1/5)
        // recreate the active memstore -- phase (4/5)
        storeFlushCtx.prepare();
    };
    ExecutorService service = Executors.newSingleThreadExecutor();
    service.submit(flush);
    // we get scanner from pipeline and snapshot but they are empty. -- phase (2/5)
    // this is blocked until we recreate the active memstore -- phase (3/5)
    // we get scanner from active memstore but it is empty -- phase (5/5)
    InternalScanner scanner = (InternalScanner) store.getScanner(new Scan(new Get(row)), quals, seqId + 1);
    service.shutdown();
    service.awaitTermination(20, TimeUnit.SECONDS);
    try {
        try {
            List<Cell> results = new ArrayList<>();
            scanner.next(results);
            assertEquals(3, results.size());
            for (Cell c : results) {
                byte[] actualValue = CellUtil.cloneValue(c);
                assertTrue("expected:" + Bytes.toStringBinary(value) + ", actual:"
                        + Bytes.toStringBinary(actualValue), Bytes.equals(actualValue, value));
            }
        } finally {
            scanner.close();
        }
    } finally {
        MyCompactingMemStore.START_TEST.set(false);
        storeFlushCtx.flushCache(Mockito.mock(MonitoredTask.class));
        storeFlushCtx.commit(Mockito.mock(MonitoredTask.class));
    }
}

From source file:org.springframework.integration.jdbc.store.channel.AbstractTxTimeoutMessageStoreTests.java

public void testInt2993IdCacheConcurrency() throws InterruptedException, ExecutionException {
    final String groupId = "testInt2993Group";
    for (int i = 0; i < 100; i++) {
        this.jdbcChannelMessageStore.addMessageToGroup(groupId,
                new GenericMessage<String>("testInt2993Message"));
    }//from  w  w  w.j a  v  a2  s  .  co m

    ExecutorService executorService = Executors.newCachedThreadPool();
    CompletionService<Boolean> completionService = new ExecutorCompletionService<Boolean>(executorService);

    final int concurrency = 5;

    final TransactionTemplate transactionTemplate = new TransactionTemplate(transactionManager);

    for (int i = 0; i < concurrency; i++) {
        completionService.submit(new Callable<Boolean>() {
            @Override
            public Boolean call() throws Exception {
                for (int i = 0; i < 100; i++) {
                    boolean result = transactionTemplate.execute(new TransactionCallback<Boolean>() {
                        @Override
                        public Boolean doInTransaction(TransactionStatus status) {
                            Message<?> message = null;
                            try {
                                message = jdbcChannelMessageStore.pollMessageFromGroup(groupId);
                            } catch (Exception e) {
                                log.error("IdCache race condition.", e);
                                return false;
                            }
                            try {
                                Thread.sleep(10);
                            } catch (InterruptedException e) {
                                log.error(e);
                            }
                            if (message != null) {
                                jdbcChannelMessageStore
                                        .removeFromIdCache(message.getHeaders().getId().toString());
                            }
                            return true;
                        }
                    });
                    if (!result)
                        return false;
                }

                return true;
            }
        });
    }

    for (int j = 0; j < concurrency; j++) {
        assertTrue(completionService.take().get());
    }

    executorService.shutdown();
    assertTrue(executorService.awaitTermination(5, TimeUnit.SECONDS));
}

From source file:MSUmpire.DIA.DIAPack.java

public void AssignQuant(boolean export) throws IOException, SQLException {
    Logger.getRootLogger().info("Assign peak cluster to identified peptides");
    GenerateClusterScanNomapping();//from w  ww.j av  a  2s .  c om

    ExecutorService executorPool = null;
    for (PeakCluster cluster : MS1FeatureMap.PeakClusters) {
        cluster.Identified = false;
    }

    for (PepIonID pepIonID : IDsummary.GetPepIonList().values()) {
        pepIonID.MS1PeakClusters = new ArrayList<>();
        pepIonID.MS2UnfragPeakClusters = new ArrayList<>();
    }

    //Assign precursor features and grouped fragments for all identified peptide ions for a isolation window
    for (LCMSPeakDIAMS2 DIAWindow : DIAWindows) {
        DIA_window_Quant dia_w = new DIA_window_Quant(GetQ1Name(), GetQ2Name(), GetQ3Name(), ScanClusterMap_Q1,
                ScanClusterMap_Q2, ScanClusterMap_Q3, MS1FeatureMap, DIAWindow, IDsummary, NoCPUs);
        dia_w.run();
    }

    executorPool = Executors.newFixedThreadPool(NoCPUs);

    //Match fragments and calculate quantification for each peptide ion
    for (PepIonID pepIonID : IDsummary.GetPepIonList().values()) {
        DIAAssignQuantUnit quantunit = new DIAAssignQuantUnit(pepIonID, MS1FeatureMap, parameter);
        executorPool.execute(quantunit);
    }
    executorPool.shutdown();

    try {
        executorPool.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
    } catch (InterruptedException e) {
        Logger.getRootLogger().info("interrupted..");
    }

    if (export) {
        ExportID();
    }
}

From source file:com.github.cukedoctor.jenkins.CukedoctorPublisher.java

@Override
public void perform(Run<?, ?> build, FilePath workspace, Launcher launcher, TaskListener listener)
        throws IOException, InterruptedException {

    FilePath workspaceJsonSourceDir;//most of the time on slave
    FilePath workspaceJsonTargetDir;//always on master
    if (!hasText(featuresDir)) {
        workspaceJsonSourceDir = workspace;
        workspaceJsonTargetDir = getMasterWorkspaceDir(build);
    } else {//from w  ww .ja  va 2s . c  o m
        workspaceJsonSourceDir = new FilePath(workspace, featuresDir);
        workspaceJsonTargetDir = new FilePath(getMasterWorkspaceDir(build), featuresDir);
    }

    logger = listener.getLogger();
    workspaceJsonSourceDir.copyRecursiveTo("**/*.json,**/cukedoctor-intro.adoc,**/cukedoctor.properties",
            workspaceJsonTargetDir);

    System.setProperty("INTRO_CHAPTER_DIR", workspaceJsonTargetDir.getRemote());

    logger.println("");
    logger.println("Generating living documentation for " + build.getFullDisplayName()
            + " with the following arguments: ");
    logger.println("Features dir: " + workspaceJsonSourceDir.getRemote());
    logger.println("Format: " + format.getFormat());
    logger.println("Toc: " + toc.getToc());
    logger.println("Title: " + title);
    logger.println("Numbered: " + Boolean.toString(numbered));
    logger.println("Section anchors: " + Boolean.toString(sectAnchors));
    logger.println("Hide features section: " + Boolean.toString(hideFeaturesSection));
    logger.println("Hide summary: " + Boolean.toString(hideSummary));
    logger.println("Hide scenario keyword: " + Boolean.toString(hideScenarioKeyword));
    logger.println("Hide step time: " + Boolean.toString(hideStepTime));
    logger.println("Hide tags: " + Boolean.toString(hideTags));
    logger.println("");

    Result result = Result.SUCCESS;
    List<Feature> features = FeatureParser.findAndParse(workspaceJsonTargetDir.getRemote());
    if (!features.isEmpty()) {
        if (!hasText(title)) {
            title = "Living Documentation";
        }

        logger.println("Found " + features.size() + " feature(s)...");

        File targetBuildDirectory = new File(build.getRootDir(), CukedoctorBaseAction.BASE_URL);
        if (!targetBuildDirectory.exists()) {
            boolean created = targetBuildDirectory.mkdirs();
            if (!created) {
                listener.error("Could not create file at location: " + targetBuildDirectory.getAbsolutePath());
                result = Result.UNSTABLE;
            }
        }

        GlobalConfig globalConfig = GlobalConfig.getInstance();
        DocumentAttributes documentAttributes = globalConfig.getDocumentAttributes().backend(format.getFormat())
                .toc(toc.getToc()).numbered(numbered).sectAnchors(sectAnchors).docTitle(title);

        globalConfig.getLayoutConfig().setHideFeaturesSection(hideFeaturesSection);

        globalConfig.getLayoutConfig().setHideSummarySection(hideSummary);

        globalConfig.getLayoutConfig().setHideScenarioKeyword(hideScenarioKeyword);

        globalConfig.getLayoutConfig().setHideStepTime(hideStepTime);

        globalConfig.getLayoutConfig().setHideTags(hideTags);

        String outputPath = targetBuildDirectory.getAbsolutePath();
        CukedoctorBuildAction action = new CukedoctorBuildAction(build);
        final ExecutorService pool = Executors.newFixedThreadPool(4);
        if ("all".equals(format.getFormat())) {
            File allHtml = new File(
                    outputPath + System.getProperty("file.separator") + CukedoctorBaseAction.ALL_DOCUMENTATION);
            if (!allHtml.exists()) {
                boolean created = allHtml.createNewFile();
                if (!created) {
                    listener.error("Could not create file at location: " + allHtml.getAbsolutePath());
                    result = Result.UNSTABLE;
                }
            }
            InputStream is = null;
            OutputStream os = null;
            try {
                is = getClass().getResourceAsStream("/" + CukedoctorBaseAction.ALL_DOCUMENTATION);
                os = new FileOutputStream(allHtml);

                int copyResult = IOUtils.copy(is, os);
                if (copyResult == -1) {
                    listener.error("File is too big.");//will never reach here but findbugs forced it...
                    result = Result.UNSTABLE;
                }
            } finally {
                if (is != null) {
                    is.close();
                }
                if (os != null) {
                    os.close();
                }
            }

            action.setDocumentationPage(CukedoctorBaseAction.ALL_DOCUMENTATION);
            pool.execute(runAll(features, documentAttributes, outputPath));
        } else {
            action.setDocumentationPage("documentation." + format.getFormat());
            pool.execute(run(features, documentAttributes, outputPath));
        }

        build.addAction(action);
        pool.shutdown();
        try {
            if (format.equals(FormatType.HTML)) {
                pool.awaitTermination(5, TimeUnit.MINUTES);
            } else {
                pool.awaitTermination(15, TimeUnit.MINUTES);
            }
        } catch (final InterruptedException e) {
            Thread.interrupted();
            listener.error(
                    "Your documentation is taking too long to be generated. Halting the generation now to not throttle Jenkins.");
            result = Result.FAILURE;
        }

        if (result.equals(Result.SUCCESS)) {
            listener.hyperlink("../" + CukedoctorBaseAction.BASE_URL, "Documentation generated successfully!");
            logger.println("");
        }

    } else {
        logger.println(String.format("No features Found in %s. %sLiving documentation will not be generated.",
                workspaceJsonTargetDir.getRemote(), "\n"));

    }

    build.setResult(result);
}

From source file:com.thruzero.common.web.model.container.builder.xml.XmlPanelSetBuilder.java

protected PanelSet buildConcurrently() throws Exception {
    PanelSet result = new PanelSet(panelSetId);

    if (!panelNodes.isEmpty()) {
        // Build the panels in parallel (e.g., RSS Feed panels should be created in parallel).
        ExecutorService executorService = Executors.newFixedThreadPool(panelNodes.size());
        logHelper.logExecutorServiceCreated(panelSetId);

        final Map<String, AbstractPanel> panels = new HashMap<String, AbstractPanel>();
        for (final InfoNodeElement panelNode : panelNodes) {
            final AbstractXmlPanelBuilder panelBuilder = panelBuilderTypeRegistry
                    .createBuilder(panelNode.getName(), panelNode);
            final String panelKey = Integer.toHexString(panelNode.hashCode());

            if (panelBuilder == null) {
                panels.put(panelKey, new ErrorHtmlPanel("error", "Panel ERROR",
                        "PanelBuilder not found for panel type " + panelNode.getName()));
            } else {
                //logger.debug("  - prepare to build: " + panelNode.getName());
                executorService.execute(new Runnable() {
                    @Override/*from   w w  w.j  a  v a  2s . c o  m*/
                    public void run() {
                        try {
                            AbstractPanel panel = panelBuilder.build();
                            panels.put(panelKey, panel);
                        } catch (Exception e) {
                            panels.put(panelKey, panelBuilder.buildErrorPanel(panelBuilder.getPanelId(),
                                    "Panel ERROR",
                                    "PanelBuilder encountered an Exception: " + e.getClass().getSimpleName()));
                        }
                    }

                    @Override
                    public String toString() {
                        return panelBuilder.getPanelInfoForError();
                    }
                });
            }
        }

        // Wait for all panels to be built
        executorService.shutdown();
        logHelper.logExecutorServiceShutdown(panelSetId);
        try {
            executorService.awaitTermination(timeoutInSeconds, TimeUnit.SECONDS);
        } catch (InterruptedException e) {
            // ignore (handled below)
            logHelper.logExecutorServiceInterrupted(panelSetId);
        }

        if (executorService.isTerminated()) {
            logHelper.logExecutorServiceIsTerminated(panelSetId);
        } else {
            logHelper.logExecutorServiceIsNotTerminated(executorService, executorService.shutdownNow(),
                    panelSetId);
        }

        // add panels in the same order as defined
        for (InfoNodeElement panelNode : panelNodes) {
            String panelKey = Integer.toHexString(panelNode.hashCode());
            AbstractPanel panel = panels.get(panelKey);
            if (panel == null) {
                // if it wasn't added to the panelNodes map, then there must have been a timeout error
                AbstractXmlPanelBuilder panelBuilder = panelBuilderTypeRegistry
                        .createBuilder(panelNode.getName(), panelNode);

                result.addPanel(panelBuilder.buildErrorPanel(panelKey, "Panel ERROR",
                        "PanelBuilder encountered a timeout error: " + panelNode.getName()));
            } else {
                result.addPanel(panel);
            }
        }
    }
    logHelper.logPanelSetCompleted(panelSetId);

    return result;
}

From source file:org.apache.hadoop.hbase.util.RegionMover.java

private void loadRegions(Admin admin, String hostname, int port, List<HRegionInfo> regionsToMove, boolean ack)
        throws Exception {
    String server = null;/*from ww  w  . ja va 2s  . com*/
    List<HRegionInfo> movedRegions = Collections.synchronizedList(new ArrayList<HRegionInfo>());
    int maxWaitInSeconds = admin.getConfiguration().getInt(SERVERSTART_WAIT_MAX_KEY,
            DEFAULT_SERVERSTART_WAIT_MAX);
    long maxWait = EnvironmentEdgeManager.currentTime() + maxWaitInSeconds * 1000;
    while ((EnvironmentEdgeManager.currentTime() < maxWait) && (server == null)) {
        try {
            ArrayList<String> regionServers = getServers(admin);
            // Remove the host Region server from target Region Servers list
            server = stripServer(regionServers, hostname, port);
            if (server != null) {
                break;
            }
        } catch (IOException e) {
            LOG.warn("Could not get list of region servers", e);
        } catch (Exception e) {
            LOG.info("hostname=" + hostname + " is not up yet, waiting");
        }
        try {
            Thread.sleep(500);
        } catch (InterruptedException e) {
            LOG.error("Interrupted while waiting for " + hostname + " to be up.Quitting now", e);
            throw e;
        }
    }
    if (server == null) {
        LOG.error("Host:" + hostname + " is not up.Giving up.");
        throw new Exception("Host to load regions not online");
    }
    LOG.info("Moving " + regionsToMove.size() + " regions to " + server + " using " + this.maxthreads
            + " threads.Ack mode:" + this.ack);
    ExecutorService moveRegionsPool = Executors.newFixedThreadPool(this.maxthreads);
    List<Future<Boolean>> taskList = new ArrayList<Future<Boolean>>();
    int counter = 0;
    while (counter < regionsToMove.size()) {
        HRegionInfo region = regionsToMove.get(counter);
        String currentServer = getServerNameForRegion(admin, region);
        if (currentServer == null) {
            LOG.warn("Could not get server for Region:" + region.getEncodedName() + " moving on");
            counter++;
            continue;
        } else if (server.equals(currentServer)) {
            LOG.info("Region " + region.getRegionNameAsString() + "already on target server=" + server);
            counter++;
            continue;
        }
        if (ack) {
            Future<Boolean> task = moveRegionsPool
                    .submit(new MoveWithAck(admin, region, currentServer, server, movedRegions));
            taskList.add(task);
        } else {
            Future<Boolean> task = moveRegionsPool
                    .submit(new MoveWithoutAck(admin, region, currentServer, server, movedRegions));
            taskList.add(task);
        }
        counter++;
    }
    moveRegionsPool.shutdown();
    long timeoutInSeconds = regionsToMove.size()
            * admin.getConfiguration().getInt(MOVE_WAIT_MAX_KEY, DEFAULT_MOVE_WAIT_MAX);
    try {
        if (!moveRegionsPool.awaitTermination(timeoutInSeconds, TimeUnit.SECONDS)) {
            moveRegionsPool.shutdownNow();
        }
    } catch (InterruptedException e) {
        moveRegionsPool.shutdownNow();
        Thread.currentThread().interrupt();
    }
    for (Future<Boolean> future : taskList) {
        try {
            // if even after shutdownNow threads are stuck we wait for 5 secs max
            if (!future.get(5, TimeUnit.SECONDS)) {
                LOG.error("Was Not able to move region....Exiting Now");
                throw new Exception("Could not move region Exception");
            }
        } catch (InterruptedException e) {
            LOG.error("Interrupted while waiting for Thread to Complete " + e.getMessage(), e);
            throw e;
        } catch (ExecutionException e) {
            LOG.error("Got Exception From Thread While moving region " + e.getMessage(), e);
            throw e;
        } catch (CancellationException e) {
            LOG.error(
                    "Thread for moving region cancelled. Timeout for cancellation:" + timeoutInSeconds + "secs",
                    e);
            throw e;
        }
    }
}