Example usage for java.util.concurrent ThreadPoolExecutor shutdownNow

List of usage examples for java.util.concurrent ThreadPoolExecutor shutdownNow

Introduction

In this page you can find the example usage for java.util.concurrent ThreadPoolExecutor shutdownNow.

Prototype

public List<Runnable> shutdownNow() 

Source Link

Document

Attempts to stop all actively executing tasks, halts the processing of waiting tasks, and returns a list of the tasks that were awaiting execution.

Usage

From source file:org.apache.hadoop.hbase.regionserver.HStore.java

/**
 * Creates an unsorted list of StoreFile loaded in parallel
 * from the given directory.//from w w w.  j  a va2s  . c  o  m
 * @throws IOException
 */
private List<StoreFile> loadStoreFiles() throws IOException {
    Collection<StoreFileInfo> files = fs.getStoreFiles(getColumnFamilyName());
    if (files == null || files.size() == 0) {
        return new ArrayList<StoreFile>();
    }

    // initialize the thread pool for opening store files in parallel..
    ThreadPoolExecutor storeFileOpenerThreadPool = this.region
            .getStoreFileOpenAndCloseThreadPool("StoreFileOpenerThread-" + this.getColumnFamilyName());
    CompletionService<StoreFile> completionService = new ExecutorCompletionService<StoreFile>(
            storeFileOpenerThreadPool);

    int totalValidStoreFile = 0;
    for (final StoreFileInfo storeFileInfo : files) {
        // open each store file in parallel
        completionService.submit(new Callable<StoreFile>() {
            @Override
            public StoreFile call() throws IOException {
                StoreFile storeFile = createStoreFileAndReader(storeFileInfo);
                return storeFile;
            }
        });
        totalValidStoreFile++;
    }

    ArrayList<StoreFile> results = new ArrayList<StoreFile>(files.size());
    IOException ioe = null;
    try {
        for (int i = 0; i < totalValidStoreFile; i++) {
            try {
                Future<StoreFile> future = completionService.take();
                StoreFile storeFile = future.get();
                long length = storeFile.getReader().length();
                this.storeSize += length;
                this.totalUncompressedBytes += storeFile.getReader().getTotalUncompressedBytes();
                if (LOG.isDebugEnabled()) {
                    LOG.debug("loaded " + storeFile.toStringDetailed());
                }
                results.add(storeFile);
            } catch (InterruptedException e) {
                if (ioe == null)
                    ioe = new InterruptedIOException(e.getMessage());
            } catch (ExecutionException e) {
                if (ioe == null)
                    ioe = new IOException(e.getCause());
            }
        }
    } finally {
        storeFileOpenerThreadPool.shutdownNow();
    }
    if (ioe != null) {
        // close StoreFile readers
        for (StoreFile file : results) {
            try {
                if (file != null)
                    file.closeReader(true);
            } catch (IOException e) {
                LOG.warn(e.getMessage());
            }
        }
        throw ioe;
    }

    return results;
}

From source file:org.apache.hadoop.hbase.regionserver.HStore.java

@Override
public ImmutableCollection<StoreFile> close() throws IOException {
    this.lock.writeLock().lock();
    try {//  www.  ja v  a2  s  .c  om
        // Clear so metrics doesn't find them.
        ImmutableCollection<StoreFile> result = storeEngine.getStoreFileManager().clearFiles();

        if (!result.isEmpty()) {
            // initialize the thread pool for closing store files in parallel.
            ThreadPoolExecutor storeFileCloserThreadPool = this.region
                    .getStoreFileOpenAndCloseThreadPool("StoreFileCloserThread-" + this.getColumnFamilyName());

            // close each store file in parallel
            CompletionService<Void> completionService = new ExecutorCompletionService<Void>(
                    storeFileCloserThreadPool);
            for (final StoreFile f : result) {
                completionService.submit(new Callable<Void>() {
                    @Override
                    public Void call() throws IOException {
                        f.closeReader(true);
                        return null;
                    }
                });
            }

            IOException ioe = null;
            try {
                for (int i = 0; i < result.size(); i++) {
                    try {
                        Future<Void> future = completionService.take();
                        future.get();
                    } catch (InterruptedException e) {
                        if (ioe == null) {
                            ioe = new InterruptedIOException();
                            ioe.initCause(e);
                        }
                    } catch (ExecutionException e) {
                        if (ioe == null)
                            ioe = new IOException(e.getCause());
                    }
                }
            } finally {
                storeFileCloserThreadPool.shutdownNow();
            }
            if (ioe != null)
                throw ioe;
        }
        LOG.info("Closed " + this);
        return result;
    } finally {
        this.lock.writeLock().unlock();
    }
}

From source file:org.apache.hadoop.hbase.regionserver.IndexSplitTransaction.java

private void splitStoreFiles(final Map<byte[], List<StoreFile>> hstoreFilesToSplit) throws IOException {
    if (hstoreFilesToSplit == null) {
        // Could be null because close didn't succeed -- for now consider it fatal
        throw new IOException("Close returned empty list of StoreFiles");
    }//from  w  w  w.java  2  s .co  m
    // The following code sets up a thread pool executor with as many slots as
    // there's files to split. It then fires up everything, waits for
    // completion and finally checks for any exception
    int nbFiles = hstoreFilesToSplit.size();
    if (nbFiles == 0) {
        // no file needs to be splitted.
        return;
    }
    ThreadFactoryBuilder builder = new ThreadFactoryBuilder();
    builder.setNameFormat("StoreFileSplitter-%1$d");
    ThreadFactory factory = builder.build();
    ThreadPoolExecutor threadPool = (ThreadPoolExecutor) Executors.newFixedThreadPool(nbFiles, factory);
    List<Future<Void>> futures = new ArrayList<Future<Void>>(nbFiles);

    // Split each store file.
    for (Map.Entry<byte[], List<StoreFile>> entry : hstoreFilesToSplit.entrySet()) {
        for (StoreFile sf : entry.getValue()) {
            StoreFileSplitter sfs = new StoreFileSplitter(entry.getKey(), sf);
            futures.add(threadPool.submit(sfs));
        }
    }
    // Shutdown the pool
    threadPool.shutdown();

    // Wait for all the tasks to finish
    try {
        boolean stillRunning = !threadPool.awaitTermination(this.fileSplitTimeout, TimeUnit.MILLISECONDS);
        if (stillRunning) {
            threadPool.shutdownNow();
            // wait for the thread to shutdown completely.
            while (!threadPool.isTerminated()) {
                Thread.sleep(50);
            }
            throw new IOException(
                    "Took too long to split the" + " files and create the references, aborting split");
        }
    } catch (InterruptedException e) {
        throw (InterruptedIOException) new InterruptedIOException().initCause(e);
    }

    // Look for any exception
    for (Future<Void> future : futures) {
        try {
            future.get();
        } catch (InterruptedException e) {
            throw (InterruptedIOException) new InterruptedIOException().initCause(e);
        } catch (ExecutionException e) {
            throw new IOException(e);
        }
    }
}

From source file:org.apache.hadoop.hbase.regionserver.SplitTransactionImpl.java

/**
 * Creates reference files for top and bottom half of the
 * @param hstoreFilesToSplit map of store files to create half file references for.
 * @return the number of reference files that were created.
 * @throws IOException//from   w w  w .ja  v a2  s  .c om
 */
private Pair<Integer, Integer> splitStoreFiles(final Map<byte[], List<StoreFile>> hstoreFilesToSplit)
        throws IOException {
    if (hstoreFilesToSplit == null) {
        // Could be null because close didn't succeed -- for now consider it fatal
        throw new IOException("Close returned empty list of StoreFiles");
    }
    // The following code sets up a thread pool executor with as many slots as
    // there's files to split. It then fires up everything, waits for
    // completion and finally checks for any exception
    int nbFiles = 0;
    for (Map.Entry<byte[], List<StoreFile>> entry : hstoreFilesToSplit.entrySet()) {
        nbFiles += entry.getValue().size();
    }
    if (nbFiles == 0) {
        // no file needs to be splitted.
        return new Pair<Integer, Integer>(0, 0);
    }
    // Default max #threads to use is the smaller of table's configured number of blocking store
    // files or the available number of logical cores.
    int defMaxThreads = Math.min(
            parent.conf.getInt(HStore.BLOCKING_STOREFILES_KEY, HStore.DEFAULT_BLOCKING_STOREFILE_COUNT),
            Runtime.getRuntime().availableProcessors());
    // Max #threads is the smaller of the number of storefiles or the default max determined above.
    int maxThreads = Math.min(parent.conf.getInt(HConstants.REGION_SPLIT_THREADS_MAX, defMaxThreads), nbFiles);
    LOG.info("Preparing to split " + nbFiles + " storefiles for region " + this.parent + " using " + maxThreads
            + " threads");
    ThreadFactoryBuilder builder = new ThreadFactoryBuilder();
    builder.setNameFormat("StoreFileSplitter-%1$d");
    ThreadFactory factory = builder.build();
    ThreadPoolExecutor threadPool = (ThreadPoolExecutor) Executors.newFixedThreadPool(maxThreads, factory);
    List<Future<Pair<Path, Path>>> futures = new ArrayList<Future<Pair<Path, Path>>>(nbFiles);

    // Split each store file.
    for (Map.Entry<byte[], List<StoreFile>> entry : hstoreFilesToSplit.entrySet()) {
        for (StoreFile sf : entry.getValue()) {
            StoreFileSplitter sfs = new StoreFileSplitter(entry.getKey(), sf);
            futures.add(threadPool.submit(sfs));
        }
    }
    // Shutdown the pool
    threadPool.shutdown();

    // Wait for all the tasks to finish
    try {
        boolean stillRunning = !threadPool.awaitTermination(this.fileSplitTimeout, TimeUnit.MILLISECONDS);
        if (stillRunning) {
            threadPool.shutdownNow();
            // wait for the thread to shutdown completely.
            while (!threadPool.isTerminated()) {
                Thread.sleep(50);
            }
            throw new IOException(
                    "Took too long to split the" + " files and create the references, aborting split");
        }
    } catch (InterruptedException e) {
        throw (InterruptedIOException) new InterruptedIOException().initCause(e);
    }

    int created_a = 0;
    int created_b = 0;
    // Look for any exception
    for (Future<Pair<Path, Path>> future : futures) {
        try {
            Pair<Path, Path> p = future.get();
            created_a += p.getFirst() != null ? 1 : 0;
            created_b += p.getSecond() != null ? 1 : 0;
        } catch (InterruptedException e) {
            throw (InterruptedIOException) new InterruptedIOException().initCause(e);
        } catch (ExecutionException e) {
            throw new IOException(e);
        }
    }

    if (LOG.isDebugEnabled()) {
        LOG.debug("Split storefiles for region " + this.parent + " Daughter A: " + created_a
                + " storefiles, Daughter B: " + created_b + " storefiles.");
    }
    return new Pair<Integer, Integer>(created_a, created_b);
}

From source file:org.apache.hadoop.hbase.regionserver.Store.java

/**
 * Creates an unsorted list of StoreFile loaded in parallel
 * from the given directory./*from   w w w .j a  va2 s .  c o m*/
 * @throws IOException
 */
private List<StoreFile> loadStoreFiles() throws IOException {
    ArrayList<StoreFile> results = new ArrayList<StoreFile>();
    FileStatus files[] = getStoreFiles();

    if (files == null || files.length == 0) {
        return results;
    }
    // initialize the thread pool for opening store files in parallel..
    ThreadPoolExecutor storeFileOpenerThreadPool = this.region
            .getStoreFileOpenAndCloseThreadPool("StoreFileOpenerThread-" + this.family.getNameAsString());
    CompletionService<StoreFile> completionService = new ExecutorCompletionService<StoreFile>(
            storeFileOpenerThreadPool);

    int totalValidStoreFile = 0;
    for (int i = 0; i < files.length; i++) {
        // Skip directories.
        if (files[i].isDir()) {
            continue;
        }
        final Path p = files[i].getPath();
        // Check for empty hfile. Should never be the case but can happen
        // after data loss in hdfs for whatever reason (upgrade, etc.): HBASE-646
        // NOTE: that the HFileLink is just a name, so it's an empty file.
        if (!HFileLink.isHFileLink(p) && this.fs.getFileStatus(p).getLen() <= 0) {
            LOG.warn("Skipping " + p + " because its empty. HBASE-646 DATA LOSS?");
            continue;
        }

        // open each store file in parallel
        completionService.submit(new Callable<StoreFile>() {
            public StoreFile call() throws IOException {
                StoreFile storeFile = new StoreFile(fs, p, conf, cacheConf, family.getBloomFilterType(),
                        dataBlockEncoder, isAssistant());
                passSchemaMetricsTo(storeFile);
                storeFile.createReader();
                return storeFile;
            }
        });
        totalValidStoreFile++;
    }

    try {
        for (int i = 0; i < totalValidStoreFile; i++) {
            Future<StoreFile> future = completionService.take();
            StoreFile storeFile = future.get();
            long length = storeFile.getReader().length();
            this.storeSize += length;
            this.totalUncompressedBytes += storeFile.getReader().getTotalUncompressedBytes();
            if (LOG.isDebugEnabled()) {
                LOG.debug("loaded " + storeFile.toStringDetailed());
            }
            results.add(storeFile);
        }
    } catch (InterruptedException e) {
        throw new IOException(e);
    } catch (ExecutionException e) {
        throw new IOException(e.getCause());
    } finally {
        storeFileOpenerThreadPool.shutdownNow();
    }

    return results;
}

From source file:org.apache.hadoop.hbase.regionserver.Store.java

/**
 * Close all the readers/*www.  j  a  v a2  s.co  m*/
 *
 * We don't need to worry about subsequent requests because the HRegion holds
 * a write lock that will prevent any more reads or writes.
 *
 * @throws IOException
 */
ImmutableList<StoreFile> close() throws IOException {
    this.lock.writeLock().lock();
    try {
        ImmutableList<StoreFile> result = storefiles;

        // Clear so metrics doesn't find them.
        storefiles = ImmutableList.of();

        if (!result.isEmpty()) {
            // initialize the thread pool for closing store files in parallel.
            ThreadPoolExecutor storeFileCloserThreadPool = this.region.getStoreFileOpenAndCloseThreadPool(
                    "StoreFileCloserThread-" + this.family.getNameAsString());

            // close each store file in parallel
            CompletionService<Void> completionService = new ExecutorCompletionService<Void>(
                    storeFileCloserThreadPool);
            for (final StoreFile f : result) {
                completionService.submit(new Callable<Void>() {
                    public Void call() throws IOException {
                        f.closeReader(true);
                        return null;
                    }
                });
            }

            try {
                for (int i = 0; i < result.size(); i++) {
                    Future<Void> future = completionService.take();
                    future.get();
                }
            } catch (InterruptedException e) {
                throw new IOException(e);
            } catch (ExecutionException e) {
                throw new IOException(e.getCause());
            } finally {
                storeFileCloserThreadPool.shutdownNow();
            }
        }
        LOG.info("Closed " + this);
        return result;
    } finally {
        this.lock.writeLock().unlock();
    }
}

From source file:org.apache.hadoop.hbase.util.ModifyRegionUtils.java

/**
 * Create new set of regions on the specified file-system.
 * NOTE: that you should add the regions to hbase:meta after this operation.
 *
 * @param conf {@link Configuration}//from w  ww . j  a va 2  s  .  c om
 * @param rootDir Root directory for HBase instance
 * @param tableDir table directory
 * @param hTableDescriptor description of the table
 * @param newRegions {@link HRegionInfo} that describes the regions to create
 * @param task {@link RegionFillTask} custom code to populate region after creation
 * @throws IOException
 */
public static List<HRegionInfo> createRegions(final Configuration conf, final Path rootDir, final Path tableDir,
        final HTableDescriptor hTableDescriptor, final HRegionInfo[] newRegions, final RegionFillTask task)
        throws IOException {
    if (newRegions == null)
        return null;
    int regionNumber = newRegions.length;
    ThreadPoolExecutor exec = getRegionOpenAndInitThreadPool(conf,
            "RegionOpenAndInitThread-" + hTableDescriptor.getTableName(), regionNumber);
    try {
        return createRegions(exec, conf, rootDir, tableDir, hTableDescriptor, newRegions, task);
    } finally {
        exec.shutdownNow();
    }
}

From source file:org.apache.hadoop.hbase.wal.LogRecoveredEditsOutputSink.java

/**
 * Close all of the output streams./*from w ww  . j  a  va  2  s.  co  m*/
 * @return the list of paths written.
 */
List<Path> close() throws IOException {
    Preconditions.checkState(!closeAndCleanCompleted);

    final List<Path> paths = new ArrayList<>();
    final List<IOException> thrown = Lists.newArrayList();
    ThreadPoolExecutor closeThreadPool = Threads.getBoundedCachedThreadPool(numThreads, 30L, TimeUnit.SECONDS,
            new ThreadFactory() {
                private int count = 1;

                @Override
                public Thread newThread(Runnable r) {
                    Thread t = new Thread(r, "split-log-closeStream-" + count++);
                    return t;
                }
            });
    CompletionService<Void> completionService = new ExecutorCompletionService<>(closeThreadPool);
    boolean progress_failed;
    try {
        progress_failed = executeCloseTask(completionService, thrown, paths);
    } catch (InterruptedException e) {
        IOException iie = new InterruptedIOException();
        iie.initCause(e);
        throw iie;
    } catch (ExecutionException e) {
        throw new IOException(e.getCause());
    } finally {
        closeThreadPool.shutdownNow();
    }
    if (!thrown.isEmpty()) {
        throw MultipleIOException.createIOException(thrown);
    }
    writersClosed = true;
    closeAndCleanCompleted = true;
    if (progress_failed) {
        return null;
    }
    return paths;
}

From source file:org.apache.jmeter.protocol.http.sampler.HTTPSamplerBaseClassifier.java

/**
 * Download the resources of an HTML page.
 * /*from  w  w w .  j a  v a  2 s.c o m*/
 * @param res
 *            result of the initial request - must contain an HTML response
 * @param container
 *            for storing the results, if any
 * @param frameDepth
 *            Depth of this target in the frame structure. Used only to
 *            prevent infinite recursion.
 * @return res if no resources exist, otherwise the "Container" result with
 *         one subsample per request issued
 */
protected HTTPSampleResult downloadPageResources(HTTPSampleResult res, HTTPSampleResult container,
        int frameDepth) {
    Iterator<URL> urls = null;
    try {
        final byte[] responseData = res.getResponseData();
        if (responseData.length > 0) { // Bug 39205
            String parserName = getParserClass(res);
            if (parserName != null) {
                final HTMLParser parser = parserName.length() > 0 ? // we
                // have
                // a
                // name
                        HTMLParser.getParser(parserName) : HTMLParser.getParser(); // we don't; use the
                // default parser
                urls = parser.getEmbeddedResourceURLs(responseData, res.getURL(),
                        res.getDataEncodingWithDefault());
            }
        }
    } catch (HTMLParseException e) {
        // Don't break the world just because this failed:
        res.addSubResult(errorResult(e, new HTTPSampleResult(res)));
        setParentSampleSuccess(res, false);
    }

    // Iterate through the URLs and download each image:
    if (urls != null && urls.hasNext()) {
        if (container == null) {
            // TODO needed here because currently done on sample completion
            // in JMeterThread,
            // but that only catches top-level samples.
            res.setThreadName(Thread.currentThread().getName());
            container = new HTTPSampleResult(res);
            container.addRawSubResult(res);
        }
        res = container;

        // Get the URL matcher
        String re = getEmbeddedUrlRE();
        Perl5Matcher localMatcher = null;
        Pattern pattern = null;
        if (re.length() > 0) {
            try {
                pattern = JMeterUtils.getPattern(re);
                localMatcher = JMeterUtils.getMatcher();// don't fetch
                // unless pattern
                // compiles
            } catch (MalformedCachePatternException e) {
                log.warn("Ignoring embedded URL match string: " + e.getMessage());
            }
        }

        // For concurrent get resources
        final List<Callable<AsynSamplerResultHolder>> liste = new ArrayList<Callable<AsynSamplerResultHolder>>();

        while (urls.hasNext()) {
            Object binURL = urls.next(); // See catch clause below
            try {
                URL url = (URL) binURL;
                if (url == null) {
                    log.warn("Null URL detected (should not happen)");
                } else {
                    String urlstr = url.toString();
                    String urlStrEnc = encodeSpaces(urlstr);
                    if (!urlstr.equals(urlStrEnc)) {// There were some
                        // spaces in the URL
                        try {
                            url = new URL(urlStrEnc);
                        } catch (MalformedURLException e) {
                            res.addSubResult(errorResult(new Exception(urlStrEnc + " is not a correct URI"),
                                    new HTTPSampleResult(res)));
                            setParentSampleSuccess(res, false);
                            continue;
                        }
                    }
                    // I don't think localMatcher can be null here, but
                    // check just in case
                    if (pattern != null && localMatcher != null && !localMatcher.matches(urlStrEnc, pattern)) {
                        continue; // we have a pattern and the URL does not
                                  // match, so skip it
                    }

                    if (isConcurrentDwn()) {
                        // if concurrent download emb. resources, add to a
                        // list for async gets later
                        liste.add(new ASyncSample(url, HTTPConstants.GET, false, frameDepth + 1,
                                getCookieManager(), this));
                    } else {
                        // default: serial download embedded resources
                        HTTPSampleResult binRes = sample(url, HTTPConstants.GET, false, frameDepth + 1);
                        res.addSubResult(binRes);
                        setParentSampleSuccess(res, res.isSuccessful() && binRes.isSuccessful());
                    }

                }
            } catch (ClassCastException e) { // TODO can this happen?
                res.addSubResult(errorResult(new Exception(binURL + " is not a correct URI"),
                        new HTTPSampleResult(res)));
                setParentSampleSuccess(res, false);
                continue;
            }
        }
        // IF for download concurrent embedded resources
        if (isConcurrentDwn()) {
            int poolSize = CONCURRENT_POOL_SIZE; // init with default value
            try {
                poolSize = Integer.parseInt(getConcurrentPool());
            } catch (NumberFormatException nfe) {
                log.warn("Concurrent download resources selected, "// $NON-NLS-1$
                        + "but pool size value is bad. Use default value");// $NON-NLS-1$
            }
            // Thread pool Executor to get resources
            // use a LinkedBlockingQueue, note: max pool size doesn't effect
            final ThreadPoolExecutor exec = new ThreadPoolExecutor(poolSize, poolSize, KEEPALIVETIME,
                    TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new ThreadFactory() {

                        public Thread newThread(final Runnable r) {
                            Thread t = new CleanerThread(new Runnable() {

                                public void run() {
                                    try {
                                        r.run();
                                    } finally {
                                        ((CleanerThread) Thread.currentThread()).notifyThreadEnd();
                                    }
                                }
                            });
                            return t;
                        }
                    });

            boolean tasksCompleted = false;
            try {
                // sample all resources with threadpool
                final List<Future<AsynSamplerResultHolder>> retExec = exec.invokeAll(liste);
                // call normal shutdown (wait ending all tasks)
                exec.shutdown();
                // put a timeout if tasks couldn't terminate
                exec.awaitTermination(AWAIT_TERMINATION_TIMEOUT, TimeUnit.SECONDS);
                CookieManager cookieManager = getCookieManager();
                // add result to main sampleResult
                for (Future<AsynSamplerResultHolder> future : retExec) {
                    AsynSamplerResultHolder binRes;
                    try {
                        binRes = future.get(1, TimeUnit.MILLISECONDS);
                        if (cookieManager != null) {
                            CollectionProperty cookies = binRes.getCookies();
                            PropertyIterator iter = cookies.iterator();
                            while (iter.hasNext()) {
                                Cookie cookie = (Cookie) iter.next().getObjectValue();
                                cookieManager.add(cookie);
                            }
                        }
                        res.addSubResult(binRes.getResult());
                        setParentSampleSuccess(res, res.isSuccessful() && binRes.getResult().isSuccessful());
                    } catch (TimeoutException e) {
                        errorResult(e, res);
                    }
                }
                tasksCompleted = exec.awaitTermination(1, TimeUnit.MILLISECONDS); // did all the tasks finish?
            } catch (InterruptedException ie) {
                log.warn("Interruped fetching embedded resources", ie); // $NON-NLS-1$
            } catch (ExecutionException ee) {
                log.warn("Execution issue when fetching embedded resources", ee); // $NON-NLS-1$
            } finally {
                if (!tasksCompleted) {
                    exec.shutdownNow(); // kill any remaining tasks
                }
            }
        }
    }
    return res;
}

From source file:org.batoo.jpa.benchmark.BenchmarkTest.java

private void waitUntilFinish(ThreadPoolExecutor executor) {
    final BlockingQueue<Runnable> workQueue = executor.getQueue();
    try {//from   w w  w.  j a va  2s. c o m
        final long started = System.currentTimeMillis();

        int lastToGo = workQueue.size();

        final int total = workQueue.size();
        int performed = 0;

        int maxStatusMessageLength = 0;
        while (!workQueue.isEmpty()) {
            final float doneNow = lastToGo - workQueue.size();
            performed += doneNow;

            final float elapsed = (System.currentTimeMillis() - started) / 1000;

            lastToGo = workQueue.size();

            if (performed > 0) {
                final float throughput = performed / elapsed;
                final float eta = ((elapsed * total) / performed) - elapsed;

                final float percentDone = (100 * (float) lastToGo) / total;
                final int gaugeDone = (int) ((100 - percentDone) / 5);
                final String gauge = "[" + StringUtils.repeat("", gaugeDone)
                        + StringUtils.repeat("-", 20 - gaugeDone) + "]";

                final String sampling = this.profilingQueue.size() > 0
                        ? MessageFormat.format(" | Samples {0}", this.profilingQueue.size())
                        : "";

                if ((maxStatusMessageLength != 0) || (eta > 5)) {
                    String statusMessage = MessageFormat.format(
                            "\r{4} %{5,number,00.00} | ETA {2} | LAST TPS {0} ops / sec | AVG TPS {1,number,#.0} | LEFT {3}{6}", //
                            doneNow, throughput, this.etaToString((int) eta), workQueue.size(), gauge,
                            percentDone, sampling);

                    maxStatusMessageLength = Math.max(statusMessage.length(), maxStatusMessageLength);
                    statusMessage = StringUtils.leftPad(statusMessage,
                            maxStatusMessageLength - statusMessage.length());
                    System.out.print(statusMessage);
                }
            }

            if (elapsed > BenchmarkTest.MAX_TEST_TIME) {
                throw new IllegalStateException("Max allowed test time exceeded");
            }

            Thread.sleep(1000);
        }

        if (maxStatusMessageLength > 0) {
            System.out.print("\r" + StringUtils.repeat(" ", maxStatusMessageLength) + "\r");
        }

        executor.shutdown();

        if (!executor.awaitTermination(10, TimeUnit.SECONDS)) {
            BenchmarkTest.LOG.warn("Forcefully shutting down the thread pool");

            executor.shutdownNow();
        }

        BenchmarkTest.LOG.warn("Iterations completed");
    } catch (final InterruptedException e) {
        throw new RuntimeException(e);
    }
}