Example usage for java.util.concurrent ThreadPoolExecutor awaitTermination

List of usage examples for java.util.concurrent ThreadPoolExecutor awaitTermination

Introduction

In this page you can find the example usage for java.util.concurrent ThreadPoolExecutor awaitTermination.

Prototype

public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException 

Source Link

Usage

From source file:org.apache.accumulo.core.file.rfile.MultiThreadedRFileTest.java

@SuppressFBWarnings(value = "INFORMATION_EXPOSURE_THROUGH_AN_ERROR_MESSAGE", justification = "information put into error message is safe and used for testing")
@Test/*from  w  w w . j  av a  2 s  .  com*/
public void testMultipleReaders() throws IOException {
    final List<Throwable> threadExceptions = Collections.synchronizedList(new ArrayList<Throwable>());
    Map<String, MutableInt> messages = new HashMap<>();
    Map<String, String> stackTrace = new HashMap<>();

    final TestRFile trfBase = new TestRFile(conf);

    writeData(trfBase);

    trfBase.openReader();

    try {

        validate(trfBase);

        final TestRFile trfBaseCopy = trfBase.deepCopy();

        validate(trfBaseCopy);

        // now start up multiple RFile deepcopies
        int maxThreads = 10;
        String name = "MultiThreadedRFileTestThread";
        ThreadPoolExecutor pool = new ThreadPoolExecutor(maxThreads + 1, maxThreads + 1, 5 * 60,
                TimeUnit.SECONDS, new LinkedBlockingQueue<>(), new NamingThreadFactory(name));
        pool.allowCoreThreadTimeOut(true);
        try {
            Runnable runnable = () -> {
                try {
                    TestRFile trf = trfBase;
                    synchronized (trfBaseCopy) {
                        trf = trfBaseCopy.deepCopy();
                    }
                    validate(trf);
                } catch (Throwable t) {
                    threadExceptions.add(t);
                }
            };
            for (int i = 0; i < maxThreads; i++) {
                pool.submit(runnable);
            }
        } finally {
            pool.shutdown();
            try {
                pool.awaitTermination(Long.MAX_VALUE, TimeUnit.MILLISECONDS);
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
        }

        for (Throwable t : threadExceptions) {
            String msg = t.getClass() + " : " + t.getMessage();
            if (!messages.containsKey(msg)) {
                messages.put(msg, new MutableInt(1));
            } else {
                messages.get(msg).increment();
            }
            StringWriter string = new StringWriter();
            PrintWriter writer = new PrintWriter(string);
            t.printStackTrace(writer);
            writer.flush();
            stackTrace.put(msg, string.getBuffer().toString());
        }
    } finally {
        trfBase.closeReader();
        trfBase.close();
    }

    for (String message : messages.keySet()) {
        LOG.error(messages.get(message) + ": " + message);
        LOG.error(stackTrace.get(message));
    }

    assertTrue(threadExceptions.isEmpty());
}

From source file:org.apache.activemq.JmsConnectionStartStopTest.java

public void testConcurrentSessionCreateWithStart() throws Exception {
    ThreadPoolExecutor executor = new ThreadPoolExecutor(50, Integer.MAX_VALUE, 60L, TimeUnit.SECONDS,
            new SynchronousQueue<Runnable>());
    final Vector<Throwable> exceptions = new Vector<Throwable>();
    final Random rand = new Random();
    Runnable createSessionTask = new Runnable() {
        @Override//from w  w w . ja  v  a2 s  .  co  m
        public void run() {
            try {
                TimeUnit.MILLISECONDS.sleep(rand.nextInt(10));
                stoppedConnection.createSession(false, Session.AUTO_ACKNOWLEDGE);
            } catch (Exception e) {
                exceptions.add(e);
            }
        }
    };

    Runnable startStopTask = new Runnable() {
        @Override
        public void run() {
            try {
                TimeUnit.MILLISECONDS.sleep(rand.nextInt(10));
                stoppedConnection.start();
                stoppedConnection.stop();
            } catch (Exception e) {
                exceptions.add(e);
            }
        }
    };

    for (int i = 0; i < 1000; i++) {
        executor.execute(createSessionTask);
        executor.execute(startStopTask);
    }

    executor.shutdown();
    assertTrue("executor terminated", executor.awaitTermination(30, TimeUnit.SECONDS));
    assertTrue("no exceptions: " + exceptions, exceptions.isEmpty());
}

From source file:org.apache.cassandra.service.StorageService.java

public synchronized void initServer() throws IOException, org.apache.cassandra.config.ConfigurationException {
    logger_.info("Cassandra version: " + FBUtilities.getReleaseVersionString());
    logger_.info("Thrift API version: " + Constants.VERSION);

    if (initialized) {
        if (isClientMode)
            throw new UnsupportedOperationException("StorageService does not support switching modes.");
        return;/* ww  w .j  ava2 s . co m*/
    }
    initialized = true;
    isClientMode = false;

    if (Boolean.parseBoolean(System.getProperty("cassandra.load_ring_state", "true"))) {
        logger_.info("Loading persisted ring state");
        for (Map.Entry<Token, InetAddress> entry : SystemTable.loadTokens().entrySet()) {
            tokenMetadata_.updateNormalToken(entry.getKey(), entry.getValue());
            Gossiper.instance.addSavedEndpoint(entry.getValue());
        }
    }

    // daemon threads, like our executors', continue to run while shutdown hooks are invoked
    Thread drainOnShutdown = new Thread(new WrappedRunnable() {
        public void runMayThrow() throws ExecutionException, InterruptedException, IOException {
            ThreadPoolExecutor mutationStage = StageManager.getStage(Stage.MUTATION);
            if (!mutationStage.isShutdown()) {
                mutationStage.shutdown();
                mutationStage.awaitTermination(1, TimeUnit.SECONDS);
                CommitLog.instance.shutdownBlocking();
            }
        }
    });
    Runtime.getRuntime().addShutdownHook(drainOnShutdown);

    if (Boolean.parseBoolean(System.getProperty("cassandra.join_ring", "true"))) {
        joinTokenRing();
    } else {
        logger_.info(
                "Not joining ring as requested. Use JMX (StorageService->joinRing()) to initiate ring joining");
    }
}

From source file:org.apache.hadoop.hbase.master.procedure.SplitTableRegionProcedure.java

/**
 * Create Split directory/*from  w  ww .  j ava  2s .  c  o  m*/
 * @param env MasterProcedureEnv
 * @throws IOException
 */
private Pair<Integer, Integer> splitStoreFiles(final MasterProcedureEnv env, final HRegionFileSystem regionFs)
        throws IOException {
    final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
    final Configuration conf = env.getMasterConfiguration();

    // The following code sets up a thread pool executor with as many slots as
    // there's files to split. It then fires up everything, waits for
    // completion and finally checks for any exception
    //
    // Note: splitStoreFiles creates daughter region dirs under the parent splits dir
    // Nothing to unroll here if failure -- re-run createSplitsDir will
    // clean this up.
    int nbFiles = 0;
    for (String family : regionFs.getFamilies()) {
        Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(family);
        if (storeFiles != null) {
            nbFiles += storeFiles.size();
        }
    }
    if (nbFiles == 0) {
        // no file needs to be splitted.
        return new Pair<Integer, Integer>(0, 0);
    }
    // Default max #threads to use is the smaller of table's configured number of blocking store
    // files or the available number of logical cores.
    int defMaxThreads = Math.min(
            conf.getInt(HStore.BLOCKING_STOREFILES_KEY, HStore.DEFAULT_BLOCKING_STOREFILE_COUNT),
            Runtime.getRuntime().availableProcessors());
    // Max #threads is the smaller of the number of storefiles or the default max determined above.
    int maxThreads = Math.min(conf.getInt(HConstants.REGION_SPLIT_THREADS_MAX, defMaxThreads), nbFiles);
    LOG.info("Preparing to split " + nbFiles + " storefiles for region " + parentHRI + " using " + maxThreads
            + " threads");
    ThreadPoolExecutor threadPool = (ThreadPoolExecutor) Executors.newFixedThreadPool(maxThreads,
            Threads.getNamedThreadFactory("StoreFileSplitter-%1$d"));
    List<Future<Pair<Path, Path>>> futures = new ArrayList<Future<Pair<Path, Path>>>(nbFiles);

    // Split each store file.
    final HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName());
    for (String family : regionFs.getFamilies()) {
        final HColumnDescriptor hcd = htd.getFamily(family.getBytes());
        final Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(family);
        if (storeFiles != null && storeFiles.size() > 0) {
            final CacheConfig cacheConf = new CacheConfig(conf, hcd);
            for (StoreFileInfo storeFileInfo : storeFiles) {
                StoreFileSplitter sfs = new StoreFileSplitter(regionFs, family.getBytes(), new StoreFile(
                        mfs.getFileSystem(), storeFileInfo, conf, cacheConf, hcd.getBloomFilterType()));
                futures.add(threadPool.submit(sfs));
            }
        }
    }
    // Shutdown the pool
    threadPool.shutdown();

    // Wait for all the tasks to finish
    long fileSplitTimeout = conf.getLong("hbase.master.fileSplitTimeout", 30000);
    try {
        boolean stillRunning = !threadPool.awaitTermination(fileSplitTimeout, TimeUnit.MILLISECONDS);
        if (stillRunning) {
            threadPool.shutdownNow();
            // wait for the thread to shutdown completely.
            while (!threadPool.isTerminated()) {
                Thread.sleep(50);
            }
            throw new IOException(
                    "Took too long to split the" + " files and create the references, aborting split");
        }
    } catch (InterruptedException e) {
        throw (InterruptedIOException) new InterruptedIOException().initCause(e);
    }

    int daughterA = 0;
    int daughterB = 0;
    // Look for any exception
    for (Future<Pair<Path, Path>> future : futures) {
        try {
            Pair<Path, Path> p = future.get();
            daughterA += p.getFirst() != null ? 1 : 0;
            daughterB += p.getSecond() != null ? 1 : 0;
        } catch (InterruptedException e) {
            throw (InterruptedIOException) new InterruptedIOException().initCause(e);
        } catch (ExecutionException e) {
            throw new IOException(e);
        }
    }

    if (LOG.isDebugEnabled()) {
        LOG.debug("Split storefiles for region " + parentHRI + " Daughter A: " + daughterA
                + " storefiles, Daughter B: " + daughterB + " storefiles.");
    }
    return new Pair<Integer, Integer>(daughterA, daughterB);
}

From source file:org.apache.hadoop.hbase.PerformanceEvaluation2.java

private void doMultipleClients(final Test cmd, final List<TableSplit> splits, final int nthread)
        throws IOException {

    BlockingQueue<Runnable> queue = new LinkedBlockingQueue<Runnable>(nthread);
    final ThreadPoolExecutor services = new ThreadPoolExecutor(nthread, nthread, 10, TimeUnit.SECONDS, queue,
            new ThreadPoolExecutor.CallerRunsPolicy());
    for (final TableSplit ts : splits) {
        services.submit(new Runnable() {

            @Override/*from w w  w  . java  2 s  . c o m*/
            public void run() {
                try {
                    long startTime = System.currentTimeMillis();
                    runOneClient(cmd, ts);
                    long elapsedTime = System.currentTimeMillis() - startTime;

                    LOG.info("Finished " + Thread.currentThread().getName() + " in " + elapsedTime + "ms for "
                            + cmd.rows.get() + " rows and " + cmd.kvs.get() + " cols");

                    totalRowCount.add(cmd.rows.get());
                    totalKVCount.add(cmd.kvs.get());
                } catch (Exception e) {
                    e.printStackTrace();
                    throw new RuntimeException(e);
                }
            }
        });
    }

    services.shutdown();
    try {
        services.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS);
    } catch (InterruptedException e) {
        e.printStackTrace();
    }
}

From source file:org.apache.hadoop.hbase.regionserver.CompactSplit.java

private void waitFor(ThreadPoolExecutor t, String name) {
    boolean done = false;
    while (!done) {
        try {/*from  w  ww.ja  va2  s.  c om*/
            done = t.awaitTermination(60, TimeUnit.SECONDS);
            LOG.info("Waiting for " + name + " to finish...");
            if (!done) {
                t.shutdownNow();
            }
        } catch (InterruptedException ie) {
            LOG.warn("Interrupted waiting for " + name + " to finish...");
        }
    }
}

From source file:org.apache.hadoop.hbase.regionserver.IndexSplitTransaction.java

private void splitStoreFiles(final Map<byte[], List<StoreFile>> hstoreFilesToSplit) throws IOException {
    if (hstoreFilesToSplit == null) {
        // Could be null because close didn't succeed -- for now consider it fatal
        throw new IOException("Close returned empty list of StoreFiles");
    }//from   www.  j av a 2 s . c  om
    // The following code sets up a thread pool executor with as many slots as
    // there's files to split. It then fires up everything, waits for
    // completion and finally checks for any exception
    int nbFiles = hstoreFilesToSplit.size();
    if (nbFiles == 0) {
        // no file needs to be splitted.
        return;
    }
    ThreadFactoryBuilder builder = new ThreadFactoryBuilder();
    builder.setNameFormat("StoreFileSplitter-%1$d");
    ThreadFactory factory = builder.build();
    ThreadPoolExecutor threadPool = (ThreadPoolExecutor) Executors.newFixedThreadPool(nbFiles, factory);
    List<Future<Void>> futures = new ArrayList<Future<Void>>(nbFiles);

    // Split each store file.
    for (Map.Entry<byte[], List<StoreFile>> entry : hstoreFilesToSplit.entrySet()) {
        for (StoreFile sf : entry.getValue()) {
            StoreFileSplitter sfs = new StoreFileSplitter(entry.getKey(), sf);
            futures.add(threadPool.submit(sfs));
        }
    }
    // Shutdown the pool
    threadPool.shutdown();

    // Wait for all the tasks to finish
    try {
        boolean stillRunning = !threadPool.awaitTermination(this.fileSplitTimeout, TimeUnit.MILLISECONDS);
        if (stillRunning) {
            threadPool.shutdownNow();
            // wait for the thread to shutdown completely.
            while (!threadPool.isTerminated()) {
                Thread.sleep(50);
            }
            throw new IOException(
                    "Took too long to split the" + " files and create the references, aborting split");
        }
    } catch (InterruptedException e) {
        throw (InterruptedIOException) new InterruptedIOException().initCause(e);
    }

    // Look for any exception
    for (Future<Void> future : futures) {
        try {
            future.get();
        } catch (InterruptedException e) {
            throw (InterruptedIOException) new InterruptedIOException().initCause(e);
        } catch (ExecutionException e) {
            throw new IOException(e);
        }
    }
}

From source file:org.apache.hadoop.hbase.regionserver.SplitTransactionImpl.java

/**
 * Creates reference files for top and bottom half of the
 * @param hstoreFilesToSplit map of store files to create half file references for.
 * @return the number of reference files that were created.
 * @throws IOException/*from  ww  w . ja v  a  2  s  .com*/
 */
private Pair<Integer, Integer> splitStoreFiles(final Map<byte[], List<StoreFile>> hstoreFilesToSplit)
        throws IOException {
    if (hstoreFilesToSplit == null) {
        // Could be null because close didn't succeed -- for now consider it fatal
        throw new IOException("Close returned empty list of StoreFiles");
    }
    // The following code sets up a thread pool executor with as many slots as
    // there's files to split. It then fires up everything, waits for
    // completion and finally checks for any exception
    int nbFiles = 0;
    for (Map.Entry<byte[], List<StoreFile>> entry : hstoreFilesToSplit.entrySet()) {
        nbFiles += entry.getValue().size();
    }
    if (nbFiles == 0) {
        // no file needs to be splitted.
        return new Pair<Integer, Integer>(0, 0);
    }
    // Default max #threads to use is the smaller of table's configured number of blocking store
    // files or the available number of logical cores.
    int defMaxThreads = Math.min(
            parent.conf.getInt(HStore.BLOCKING_STOREFILES_KEY, HStore.DEFAULT_BLOCKING_STOREFILE_COUNT),
            Runtime.getRuntime().availableProcessors());
    // Max #threads is the smaller of the number of storefiles or the default max determined above.
    int maxThreads = Math.min(parent.conf.getInt(HConstants.REGION_SPLIT_THREADS_MAX, defMaxThreads), nbFiles);
    LOG.info("Preparing to split " + nbFiles + " storefiles for region " + this.parent + " using " + maxThreads
            + " threads");
    ThreadFactoryBuilder builder = new ThreadFactoryBuilder();
    builder.setNameFormat("StoreFileSplitter-%1$d");
    ThreadFactory factory = builder.build();
    ThreadPoolExecutor threadPool = (ThreadPoolExecutor) Executors.newFixedThreadPool(maxThreads, factory);
    List<Future<Pair<Path, Path>>> futures = new ArrayList<Future<Pair<Path, Path>>>(nbFiles);

    // Split each store file.
    for (Map.Entry<byte[], List<StoreFile>> entry : hstoreFilesToSplit.entrySet()) {
        for (StoreFile sf : entry.getValue()) {
            StoreFileSplitter sfs = new StoreFileSplitter(entry.getKey(), sf);
            futures.add(threadPool.submit(sfs));
        }
    }
    // Shutdown the pool
    threadPool.shutdown();

    // Wait for all the tasks to finish
    try {
        boolean stillRunning = !threadPool.awaitTermination(this.fileSplitTimeout, TimeUnit.MILLISECONDS);
        if (stillRunning) {
            threadPool.shutdownNow();
            // wait for the thread to shutdown completely.
            while (!threadPool.isTerminated()) {
                Thread.sleep(50);
            }
            throw new IOException(
                    "Took too long to split the" + " files and create the references, aborting split");
        }
    } catch (InterruptedException e) {
        throw (InterruptedIOException) new InterruptedIOException().initCause(e);
    }

    int created_a = 0;
    int created_b = 0;
    // Look for any exception
    for (Future<Pair<Path, Path>> future : futures) {
        try {
            Pair<Path, Path> p = future.get();
            created_a += p.getFirst() != null ? 1 : 0;
            created_b += p.getSecond() != null ? 1 : 0;
        } catch (InterruptedException e) {
            throw (InterruptedIOException) new InterruptedIOException().initCause(e);
        } catch (ExecutionException e) {
            throw new IOException(e);
        }
    }

    if (LOG.isDebugEnabled()) {
        LOG.debug("Split storefiles for region " + this.parent + " Daughter A: " + created_a
                + " storefiles, Daughter B: " + created_b + " storefiles.");
    }
    return new Pair<Integer, Integer>(created_a, created_b);
}

From source file:org.apache.hadoop.hbase.util.FSUtils.java

/**
 * This function is to scan the root path of the file system to get either the
 * mapping between the region name and its best locality region server or the
 * degree of locality of each region on each of the servers having at least
 * one block of that region. The output map parameters are both optional.
 *
 * @param conf//  w ww .  ja  v a 2s .  c o m
 *          the configuration to use
 * @param desiredTable
 *          the table you wish to scan locality for
 * @param threadPoolSize
 *          the thread pool size to use
 * @param regionToBestLocalityRSMapping
 *          the map into which to put the best locality mapping or null
 * @param regionDegreeLocalityMapping
 *          the map into which to put the locality degree mapping or null,
 *          must be a thread-safe implementation
 * @throws IOException
 *           in case of file system errors or interrupts
 */
private static void getRegionLocalityMappingFromFS(final Configuration conf, final String desiredTable,
        int threadPoolSize, Map<String, String> regionToBestLocalityRSMapping,
        Map<String, Map<String, Float>> regionDegreeLocalityMapping) throws IOException {
    FileSystem fs = FileSystem.get(conf);
    Path rootPath = FSUtils.getRootDir(conf);
    long startTime = EnvironmentEdgeManager.currentTimeMillis();
    Path queryPath;
    // The table files are in ${hbase.rootdir}/data/<namespace>/<table>/*
    if (null == desiredTable) {
        queryPath = new Path(new Path(rootPath, HConstants.BASE_NAMESPACE_DIR).toString() + "/*/*/*/");
    } else {
        queryPath = new Path(FSUtils.getTableDir(rootPath, TableName.valueOf(desiredTable)).toString() + "/*/");
    }

    // reject all paths that are not appropriate
    PathFilter pathFilter = new PathFilter() {
        @Override
        public boolean accept(Path path) {
            // this is the region name; it may get some noise data
            if (null == path) {
                return false;
            }

            // no parent?
            Path parent = path.getParent();
            if (null == parent) {
                return false;
            }

            String regionName = path.getName();
            if (null == regionName) {
                return false;
            }

            if (!regionName.toLowerCase().matches("[0-9a-f]+")) {
                return false;
            }
            return true;
        }
    };

    FileStatus[] statusList = fs.globStatus(queryPath, pathFilter);

    if (null == statusList) {
        return;
    } else {
        LOG.debug("Query Path: " + queryPath + " ; # list of files: " + statusList.length);
    }

    // lower the number of threads in case we have very few expected regions
    threadPoolSize = Math.min(threadPoolSize, statusList.length);

    // run in multiple threads
    ThreadPoolExecutor tpe = new ThreadPoolExecutor(threadPoolSize, threadPoolSize, 60, TimeUnit.SECONDS,
            new ArrayBlockingQueue<Runnable>(statusList.length));
    try {
        // ignore all file status items that are not of interest
        for (FileStatus regionStatus : statusList) {
            if (null == regionStatus) {
                continue;
            }

            if (!regionStatus.isDirectory()) {
                continue;
            }

            Path regionPath = regionStatus.getPath();
            if (null == regionPath) {
                continue;
            }

            tpe.execute(new FSRegionScanner(fs, regionPath, regionToBestLocalityRSMapping,
                    regionDegreeLocalityMapping));
        }
    } finally {
        tpe.shutdown();
        int threadWakeFrequency = conf.getInt(HConstants.THREAD_WAKE_FREQUENCY, 60 * 1000);
        try {
            // here we wait until TPE terminates, which is either naturally or by
            // exceptions in the execution of the threads
            while (!tpe.awaitTermination(threadWakeFrequency, TimeUnit.MILLISECONDS)) {
                // printing out rough estimate, so as to not introduce
                // AtomicInteger
                LOG.info("Locality checking is underway: { Scanned Regions : " + tpe.getCompletedTaskCount()
                        + "/" + tpe.getTaskCount() + " }");
            }
        } catch (InterruptedException e) {
            throw (InterruptedIOException) new InterruptedIOException().initCause(e);
        }
    }

    long overhead = EnvironmentEdgeManager.currentTimeMillis() - startTime;
    String overheadMsg = "Scan DFS for locality info takes " + overhead + " ms";

    LOG.info(overheadMsg);
}

From source file:org.apache.hadoop.util.AsyncDiskService.java

/**
 * Wait for the termination of the thread pools.
 * /*from w ww .  j av  a  2 s. co  m*/
 * @param milliseconds  The number of milliseconds to wait
 * @return   true if all thread pools are terminated without time limit
 * @throws InterruptedException 
 */
public synchronized boolean awaitTermination(long milliseconds) throws InterruptedException {

    long end = Time.now() + milliseconds;
    for (Map.Entry<String, ThreadPoolExecutor> e : executors.entrySet()) {
        ThreadPoolExecutor executor = e.getValue();
        if (!executor.awaitTermination(Math.max(end - Time.now(), 0), TimeUnit.MILLISECONDS)) {
            LOG.warn("AsyncDiskService awaitTermination timeout.");
            return false;
        }
    }
    LOG.info("All AsyncDiskService threads are terminated.");
    return true;
}