Example usage for java.lang Thread setName

List of usage examples for java.lang Thread setName

Introduction

In this page you can find the example usage for java.lang Thread setName.

Prototype

public final synchronized void setName(String name) 

Source Link

Document

Changes the name of this thread to be equal to the argument name .

Usage

From source file:org.dasein.cloud.skeleton.RESTMethod.java

public @Nonnull APIResponse get(final @Nonnull String operation, final @Nonnull String resource,
        final @Nullable String id, final @Nullable NameValuePair... parameters) {
    final APIResponse response = new APIResponse();

    Thread t = new Thread() {
        public void run() {
            try {
                APITrace.begin(provider, operation);
                try {
                    try {
                        get(response, null, 1, resource, id, parameters);
                    } catch (Throwable t) {
                        response.receive(new CloudException(t));
                    }//from ww w. ja va  2 s  .c  om
                } finally {
                    APITrace.end();
                }
            } finally {
                provider.release();
            }
        }
    };

    t.setName(operation);
    t.setDaemon(true);

    provider.hold();
    t.start();
    return response;
}

From source file:org.apache.stratos.autoscaler.internal.AutoscalerServiceComponent.java

protected void activate(ComponentContext componentContext) throws Exception {
    if (log.isDebugEnabled()) {
        log.debug("Activating AutoscalerServiceComponent...");
    }//from  w w w.  j a va  2s.  c o m
    try {
        XMLConfiguration conf = ConfUtil.getInstance(AutoscalerConstants.COMPONENTS_CONFIG).getConfiguration();
        int threadPoolSize = conf.getInt(AutoscalerConstants.THREAD_POOL_SIZE_KEY,
                AutoscalerConstants.AUTOSCALER_THREAD_POOL_SIZE);
        executorService = StratosThreadPool.getExecutorService(AutoscalerConstants.AUTOSCALER_THREAD_POOL_ID,
                threadPoolSize);

        int schedulerThreadPoolSize = conf.getInt(AutoscalerConstants.SCHEDULER_THREAD_POOL_SIZE_KEY,
                AutoscalerConstants.AUTOSCALER_SCHEDULER_THREAD_POOL_SIZE);
        scheduler = StratosThreadPool.getScheduledExecutorService(AutoscalerConstants.AUTOSCALER_SCHEDULER_ID,
                schedulerThreadPoolSize);

        Runnable autoscalerActivator = new Runnable() {
            @Override
            public void run() {
                try {
                    ComponentStartUpSynchronizer componentStartUpSynchronizer = ServiceReferenceHolder
                            .getInstance().getComponentStartUpSynchronizer();
                    // Wait for cloud controller component to be activated
                    componentStartUpSynchronizer.waitForComponentActivation(Component.Autoscaler,
                            Component.CloudController);

                    ServiceReferenceHolder.getInstance().setExecutorService(executorService);
                    CartridgeConfigFileReader.readProperties();
                    if (AutoscalerContext.getInstance().isClustered()) {
                        Thread coordinatorElectorThread = new Thread() {
                            @Override
                            public void run() {
                                ServiceReferenceHolder.getInstance().getHazelcastInstance()
                                        .getLock(AUTOSCALER_COORDINATOR_LOCK).lock();

                                log.info("Elected this member ["
                                        + ServiceReferenceHolder.getInstance().getHazelcastInstance()
                                                .getCluster().getLocalMember().getUuid()
                                        + "] " + "as the autoscaler coordinator for the cluster");

                                AutoscalerContext.getInstance().setCoordinator(true);
                                try {
                                    executeCoordinatorTasks();
                                } catch (Exception e) {
                                    log.error("Error in activating the autoscaler component ", e);
                                }
                            }
                        };
                        coordinatorElectorThread.setName("Autoscaler coordinator elector thread");
                        executorService.submit(coordinatorElectorThread);
                    } else {
                        executeCoordinatorTasks();
                    }
                    componentStartUpSynchronizer.waitForAxisServiceActivation(Component.Autoscaler,
                            "AutoscalerService");
                    componentStartUpSynchronizer.setComponentStatus(Component.Autoscaler, true);
                    if (log.isInfoEnabled()) {
                        log.info("Autoscaler service component activated");
                    }
                } catch (Exception e) {
                    log.error("Error in activating autoscaler service component ", e);
                }
            }
        };
        Thread autoscalerActivatorThread = new Thread(autoscalerActivator);
        autoscalerActivatorThread.start();
    } catch (Exception e) {
        log.error("Error in activating autoscaler service component ", e);
    }
}

From source file:org.apache.nifi.remote.util.SiteToSiteRestApiClient.java

public SiteToSiteRestApiClient(final SSLContext sslContext, final HttpProxy proxy,
        final EventReporter eventReporter) {
    this.sslContext = sslContext;
    this.proxy = proxy;
    this.eventReporter = eventReporter;

    ttlExtendTaskExecutor = Executors.newScheduledThreadPool(1, new ThreadFactory() {
        private final ThreadFactory defaultFactory = Executors.defaultThreadFactory();

        @Override/*  w  w  w  .j  a v  a 2s .  co m*/
        public Thread newThread(final Runnable r) {
            final Thread thread = defaultFactory.newThread(r);
            thread.setName(Thread.currentThread().getName() + " TTLExtend");
            thread.setDaemon(true);
            return thread;
        }
    });
}

From source file:com.saysth.commons.quartz.SchedulerFactoryBean.java

/**
 * Start the Quartz Scheduler, respecting the "startupDelay" setting.
 * /*from   www . j  ava 2s .  c o  m*/
 * @param scheduler
 *            the Scheduler to start
 * @param startupDelay
 *            the number of seconds to wait before starting the Scheduler
 *            asynchronously
 */
protected void startScheduler(final Scheduler scheduler, final int startupDelay) throws SchedulerException {
    if (startupDelay <= 0) {
        logger.info("Starting Quartz Scheduler now");
        scheduler.start();
    } else {
        if (logger.isInfoEnabled()) {
            logger.info("Will start Quartz Scheduler [" + scheduler.getSchedulerName() + "] in " + startupDelay
                    + " seconds");
        }
        Thread schedulerThread = new Thread() {
            @Override
            public void run() {
                try {
                    Thread.sleep(startupDelay * 1000);
                } catch (InterruptedException ex) {
                    // simply proceed
                }
                if (logger.isInfoEnabled()) {
                    logger.info("Starting Quartz Scheduler now, after delay of " + startupDelay + " seconds");
                }
                try {
                    scheduler.start();
                } catch (SchedulerException ex) {
                    throw new SchedulingException("Could not start Quartz Scheduler after delay", ex);
                }
            }
        };
        schedulerThread.setName("Quartz Scheduler [" + scheduler.getSchedulerName() + "]");
        schedulerThread.setDaemon(true);
        schedulerThread.start();
    }
}

From source file:io.webfolder.cdp.ChromiumDownloader.java

public Path download(ChromiumVersion version) {
    final Path destinationRoot = getChromiumPath(version);
    final Path executable = getExecutable(version);

    String url;//  ww w  .  java2s . c  o  m
    if (WINDOWS) {
        url = format("%s/Win_x64/%d/chrome-win.zip", DOWNLOAD_HOST, version.getRevision());
    } else if (LINUX) {
        url = format("%s/Linux_x64/%d/chrome-linux.zip", DOWNLOAD_HOST, version.getRevision());
    } else if (MAC) {
        url = format("%s/Mac/%d/chrome-mac.zip", DOWNLOAD_HOST, version.getRevision());
    } else {
        throw new CdpException("Unsupported OS found - " + OS);
    }

    try {
        URL u = new URL(url);
        HttpURLConnection conn = (HttpURLConnection) u.openConnection();
        conn.setRequestMethod("HEAD");
        conn.setConnectTimeout(TIMEOUT);
        conn.setReadTimeout(TIMEOUT);
        if (conn.getResponseCode() != 200) {
            throw new CdpException(conn.getResponseCode() + " - " + conn.getResponseMessage());
        }
        long contentLength = conn.getHeaderFieldLong("x-goog-stored-content-length", 0);
        String fileName = url.substring(url.lastIndexOf("/") + 1, url.lastIndexOf(".")) + "-r"
                + version.getRevision() + ".zip";
        Path archive = get(getProperty("java.io.tmpdir")).resolve(fileName);
        if (exists(archive) && contentLength != size(archive)) {
            delete(archive);
        }
        if (!exists(archive)) {
            logger.info("Downloading Chromium [revision=" + version.getRevision() + "] 0%");
            u = new URL(url);
            if (conn.getResponseCode() != 200) {
                throw new CdpException(conn.getResponseCode() + " - " + conn.getResponseMessage());
            }
            conn = (HttpURLConnection) u.openConnection();
            conn.setConnectTimeout(TIMEOUT);
            conn.setReadTimeout(TIMEOUT);
            Thread thread = null;
            AtomicBoolean halt = new AtomicBoolean(false);
            Runnable progress = () -> {
                try {
                    long fileSize = size(archive);
                    logger.info("Downloading Chromium [revision={}] {}%", version.getRevision(),
                            round((fileSize * 100L) / contentLength));
                } catch (IOException e) {
                    // ignore
                }
            };
            try (InputStream is = conn.getInputStream()) {
                logger.info("Download location: " + archive.toString());
                thread = new Thread(() -> {
                    while (true) {
                        try {
                            if (halt.get()) {
                                break;
                            }
                            progress.run();
                            sleep(1000);
                        } catch (Throwable e) {
                            // ignore
                        }
                    }
                });
                thread.setName("cdp4j");
                thread.setDaemon(true);
                thread.start();
                copy(conn.getInputStream(), archive);
            } finally {
                if (thread != null) {
                    progress.run();
                    halt.set(true);
                }
            }
        }
        logger.info("Extracting to: " + destinationRoot.toString());
        if (exists(archive)) {
            createDirectories(destinationRoot);
            unpack(archive.toFile(), destinationRoot.toFile());
        }

        if (!exists(executable) || !isExecutable(executable)) {
            throw new CdpException("Chromium executable not found: " + executable.toString());
        }

        if (!WINDOWS) {
            Set<PosixFilePermission> permissions = getPosixFilePermissions(executable);
            if (!permissions.contains(OWNER_EXECUTE)) {
                permissions.add(OWNER_EXECUTE);
                setPosixFilePermissions(executable, permissions);
            }
            if (!permissions.contains(GROUP_EXECUTE)) {
                permissions.add(GROUP_EXECUTE);
                setPosixFilePermissions(executable, permissions);
            }
        }
    } catch (IOException e) {
        throw new CdpException(e);
    }
    return executable;
}

From source file:org.apache.hadoop.raid.RaidShell.java

/**
 * checks the raided file system, prints a list of corrupt files to
 * this.out and returns the number of corrupt files.
 * Also prints out the total number of files with at least one missing block.
 * When called with '-retNumStrpsMissingBlks', also prints out number of stripes
 * with certain number of blocks missing for files using the 'RS' codec. 
 *//*from   ww  w  .  jav  a 2 s.  co m*/
public void fsck(String cmd, String[] args, int startIndex) throws IOException {
    final int numFsckArgs = args.length - startIndex;
    int numThreads = 16;
    String path = "/";
    boolean argsOk = false;
    boolean countOnly = false;
    boolean cntMissingBlksPerStrp = false;
    boolean listRecoverableFile = false;
    if (numFsckArgs >= 1) {
        argsOk = true;
        path = args[startIndex];
    }
    for (int i = startIndex + 1; i < args.length; i++) {
        if (args[i].equals("-threads")) {
            numThreads = Integer.parseInt(args[++i]);
        } else if (args[i].equals("-count")) {
            countOnly = true;
        } else if (args[i].equals("-retNumStrpsMissingBlks")) {
            cntMissingBlksPerStrp = true;
        } else if (args[i].equals("-listrecoverablefiles")) {
            listRecoverableFile = true;
        }
    }
    if (!argsOk) {
        printUsage(cmd);
        return;
    }
    final String dateString = dateFormat.format(new Date());
    ;
    System.err
            .println("Running RAID FSCK with " + numThreads + " threads on " + path + " at time " + dateString);

    FileSystem fs = (new Path(path)).getFileSystem(conf);

    // if we got a raid fs, get the underlying fs 
    if (fs instanceof DistributedRaidFileSystem) {
        fs = ((DistributedRaidFileSystem) fs).getFileSystem();
    }

    // check that we have a distributed fs
    if (!(fs instanceof DistributedFileSystem)) {
        throw new IOException("expected DistributedFileSystem but got " + fs.getClass().getName());
    }
    final DistributedFileSystem dfs = (DistributedFileSystem) fs;

    // get a list of corrupted files (not considering parity blocks just yet)
    // from the name node
    // these are the only files we need to consider:
    // if a file has no corrupted data blocks, it is OK even if some
    // of its parity blocks are corrupted, so no further checking is
    // necessary
    System.err.println("Querying NameNode for list of corrupt files under " + path);
    final String[] files = DFSUtil.getCorruptFiles(dfs, path);
    final List<String> corruptFileCandidates = new LinkedList<String>();
    for (final String f : files) {
        // if this file is a parity file
        // or if it does not start with the specified path,
        // ignore it
        boolean matched = false;
        for (Codec c : Codec.getCodecs()) {
            if (f.startsWith(c.getParityPrefix())) {
                matched = true;
            }
        }
        if (!matched) {
            corruptFileCandidates.add(f);
        }
    }
    // filter files marked for deletion
    RaidUtils.filterTrash(conf, corruptFileCandidates);

    //clear numStrpMissingBlks if missing blocks per stripe is to be counted
    if (cntMissingBlksPerStrp) {
        for (AtomicLongArray numStrpMissingBlks : numStrpMissingBlksMap.values()) {
            for (int i = 0; i < numStrpMissingBlks.length(); i++) {
                numStrpMissingBlks.set(i, 0);
            }
        }
    }
    System.err.println("Processing " + corruptFileCandidates.size() + " possibly corrupt files using "
            + numThreads + " threads");
    ExecutorService executor = null;
    ThreadFactory factory = new ThreadFactory() {
        final AtomicInteger tnum = new AtomicInteger();

        public Thread newThread(Runnable r) {
            Thread t = new Thread(r);
            t.setName("Raidfsck-" + dateString + "-" + tnum.incrementAndGet());
            return t;
        }
    };
    if (numThreads > 1) {
        executor = Executors.newFixedThreadPool(numThreads, factory);
    } else {
        numThreads = 1;
    }
    final List<String> unRecoverableFiles = Collections.synchronizedList(new LinkedList<String>());
    final List<String> recoverableFiles = Collections.synchronizedList(new LinkedList<String>());
    final boolean finalCountOnly = countOnly;
    final boolean finalMissingBlksPerStrpCnt = cntMissingBlksPerStrp;
    final boolean finalListRecoverableFile = listRecoverableFile;
    final int step = numThreads;
    final AtomicInteger finishNum = new AtomicInteger(0);
    for (int i = 0; i < numThreads; i++) {
        if (!dfs.getClient().isOpen()) {
            throw new IOException("Filesystem closed.");
        }
        final int startIdx = i;
        Runnable work = new Runnable() {
            public void run() {
                try {
                    for (int idx = startIdx; idx < corruptFileCandidates.size(); idx += step) {
                        String corruptFileCandidate = corruptFileCandidates.get(idx);
                        boolean corrupt = false;
                        try {
                            FileStatus corruptStat;
                            try {
                                corruptStat = dfs.getFileStatus(new Path(corruptFileCandidate));
                            } catch (FileNotFoundException fnfe) {
                                continue;
                            }
                            if (!dfs.getClient().isOpen()) {
                                LOG.warn("Filesystem closed.");
                                return;
                            }
                            corrupt = isFileCorrupt(dfs, corruptStat, finalMissingBlksPerStrpCnt);
                            if (corrupt) {
                                incrCorruptCount();
                                if (!finalCountOnly && !finalListRecoverableFile) {
                                    unRecoverableFiles.add(corruptFileCandidate);
                                }
                            } else {
                                if (!finalCountOnly && finalListRecoverableFile) {
                                    recoverableFiles.add(corruptFileCandidate);
                                }
                            }
                        } catch (Throwable e) {
                            LOG.error("Error in processing " + corruptFileCandidate, e);
                        }
                    }
                } finally {
                    finishNum.incrementAndGet();
                }
            }
        };
        if (executor != null) {
            executor.execute(work);
        } else {
            work.run();
        }
    }
    if (executor != null) {
        try {
            while (finishNum.get() < numThreads) {
                try {
                    Thread.sleep(2000);
                } catch (InterruptedException ie) {
                    LOG.warn("Raidfsck get exception ", ie);
                    throw new IOException(ie);
                }
            }
        } finally {
            executor.shutdown(); // Waits for submitted tasks to finish.
        }
    }

    // If client is closed, fail the fsck check.
    if (!dfs.getClient().isOpen()) {
        throw new IOException("Filesystem closed.");
    }

    if (countOnly) {
        //Number of corrupt files (which cannot be fixed by Raid)
        out.println(getCorruptCount());
        LOG.info("Nubmer of corrupt files:" + getCorruptCount());
        //Number of files with at least one missing block
        out.println(corruptFileCandidates.size());
        LOG.info("Number of files with at least one block missing/corrupt: " + corruptFileCandidates.size());
    } else {
        if (listRecoverableFile) {
            for (String file : recoverableFiles) {
                out.println(file);
            }
        } else {
            for (String file : unRecoverableFiles) {
                out.println(file);
            }
        }
    }

    /*Number of stripes with missing blocks array, separated by each code id:
     * Number of missing blocks found from non-raided files.
     * codeId1
     * index 0: Number of stripes found with one block missing in this fsck
     * index 1: Number of stripes found with two block missing in this fsck
     * and so on
     * codeId2
     * index 0: Number of stripes found with one block missing in this fsck
     * index 1: Number of stripes found with two block missing in this fsck
     * and so on
     */
    if (cntMissingBlksPerStrp) {
        out.println(this.numNonRaidedMissingBlks);
        for (String codecId : numStrpMissingBlksMap.keySet()) {
            out.println(codecId);
            AtomicLongArray numStrpMissingBlks = numStrpMissingBlksMap.get(codecId);
            for (int j = 0; j < numStrpMissingBlks.length(); j++) {
                long temp = numStrpMissingBlks.get(j);
                out.println(temp);
                LOG.info("Number of stripes with missing blocks at index " + j + " is " + temp);
            }
        }
    }
}

From source file:org.apache.mele.embedded.HadoopQueueEmbedded.java

private Thread getAckThread() {
    Thread thread = new Thread(new Runnable() {
        @Override// w w  w  . ja va  2s.  c om
        public void run() {
            LOG.info("Starting ack thread.");
            while (!_shutdown.get()) {
                try {
                    if (ackCheck()) {
                        try {
                            closeAck();
                        } catch (IOException e) {
                            LOG.error("Unknown error during closing of ack.", e);
                        }
                        _shutdown.set(true);
                        return;
                    }
                } catch (IOException e) {
                    LOG.error("Unknown error during ack check.", e);
                }
                try {
                    sleep(_ackSleepTime);
                } catch (IOException e) {
                    LOG.error("Unknown error during sleep.", e);
                }
            }
        }
    });
    thread.setName("ACK THREAD [" + _currentAckFile + "]");
    return thread;
}

From source file:org.apache.stratos.common.internal.CommonServiceComponent.java

protected void activate(ComponentContext context) {
    if (log.isDebugEnabled()) {
        log.debug("Activating CommonServiceComponent...");
    }//from www. jav  a2  s.co  m
    try {
        final BundleContext bundleContext = context.getBundleContext();
        if (CommonUtil.getStratosConfig() == null) {
            StratosConfiguration stratosConfig = CommonUtil.loadStratosConfiguration();
            CommonUtil.setStratosConfig(stratosConfig);
        }

        // Loading the EULA
        if (CommonUtil.getEula() == null) {
            String eula = CommonUtil.loadTermsOfUsage();
            CommonUtil.setEula(eula);
        }

        AxisConfiguration axisConfig = ServiceReferenceHolder.getInstance().getAxisConfiguration();
        if ((axisConfig != null) && (axisConfig.getClusteringAgent() != null)) {
            Thread thread = new Thread() {
                @Override
                public void run() {
                    try {
                        // Wait for the hazelcast instance to be available
                        long startTime = System.currentTimeMillis();
                        log.info("Waiting for the hazelcast instance to be initialized...");
                        while (ServiceReferenceHolder.getInstance().getHazelcastInstance() == null) {
                            log.info("Waiting for Hazelcast instance to be initialized...");
                            Thread.sleep(1000);
                            if ((System.currentTimeMillis()
                                    - startTime) >= StratosConstants.HAZELCAST_INSTANCE_INIT_TIMEOUT) {
                                throw new RuntimeException("Hazelcast instance was not initialized within "
                                        + StratosConstants.HAZELCAST_INSTANCE_INIT_TIMEOUT / 1000 + " seconds");
                            }
                        }
                        registerDistributedObjectProviderService(bundleContext);
                        registerComponentStartUpSynchronizer(bundleContext);

                    } catch (Exception e) {
                        log.error(e);
                    }
                }
            };
            thread.setName("Distributed object provider registration thread");
            thread.start();
        } else {
            registerDistributedObjectProviderService(bundleContext);
            registerComponentStartUpSynchronizer(bundleContext);
        }

        // Register manager configuration service
        try {
            StratosConfiguration stratosConfiguration = CommonUtil.loadStratosConfiguration();
            bundleContext.registerService(StratosConfiguration.class.getName(), stratosConfiguration, null);
        } catch (Exception ex) {
            String msg = "An error occurred while registering stratos configuration service";
            log.error(msg, ex);
        }

        if (log.isInfoEnabled()) {
            log.info("Stratos common service component is activated");
        }
    } catch (Exception e) {
        log.error("Error in activating stratos common service component", e);
    }
}

From source file:com.android.exchange.ExchangeService.java

static public void reloadFolderList(Context context, long accountId, boolean force) {
    ExchangeService exchangeService = INSTANCE;
    if (exchangeService == null)
        return;//from  ww  w . j  ava  2  s . c o  m
    Cursor c = context.getContentResolver().query(Mailbox.CONTENT_URI, Mailbox.CONTENT_PROJECTION,
            MailboxColumns.ACCOUNT_KEY + "=? AND " + MailboxColumns.TYPE + "=?",
            new String[] { Long.toString(accountId), Long.toString(Mailbox.TYPE_EAS_ACCOUNT_MAILBOX) }, null);
    try {
        if (c.moveToFirst()) {
            synchronized (sSyncLock) {
                Mailbox mailbox = new Mailbox();
                mailbox.restore(c);
                Account acct = Account.restoreAccountWithId(context, accountId);
                if (acct == null) {
                    reloadFolderListFailed(accountId);
                    return;
                }
                String syncKey = acct.mSyncKey;
                // No need to reload the list if we don't have one
                if (!force && (syncKey == null || syncKey.equals("0"))) {
                    reloadFolderListFailed(accountId);
                    return;
                }

                // Change all ping/push boxes to push/hold
                ContentValues cv = new ContentValues();
                cv.put(Mailbox.SYNC_INTERVAL, Mailbox.CHECK_INTERVAL_PUSH_HOLD);
                context.getContentResolver().update(Mailbox.CONTENT_URI, cv,
                        WHERE_PUSH_OR_PING_NOT_ACCOUNT_MAILBOX, new String[] { Long.toString(accountId) });
                log("Set push/ping boxes to push/hold");

                long id = mailbox.mId;
                AbstractSyncService svc = exchangeService.mServiceMap.get(id);
                // Tell the service we're done
                if (svc != null) {
                    synchronized (svc.getSynchronizer()) {
                        svc.stop();
                        // Interrupt the thread so that it can stop
                        Thread thread = svc.mThread;
                        if (thread != null) {
                            thread.setName(thread.getName() + " (Stopped)");
                            thread.interrupt();
                        }
                    }
                    // Abandon the service
                    exchangeService.releaseMailbox(id);
                    // And have it start naturally
                    kick("reload folder list");
                }
            }
        }
    } finally {
        c.close();
    }
}

From source file:com.inmobi.grill.driver.hive.TestRemoteHiveDriver.java

@Test
public void testMultiThreadClient() throws Exception {
    LOG.info("@@ Starting multi thread test");
    // Launch two threads
    createTestTable("test_multithreads");
    HiveConf thConf = new HiveConf(conf, TestRemoteHiveDriver.class);
    thConf.setLong(HiveDriver.GRILL_CONNECTION_EXPIRY_DELAY, 10000);
    final HiveDriver thrDriver = new HiveDriver();
    thrDriver.configure(thConf);// w w w  .  jav a2  s.  c  o  m
    QueryContext ctx = new QueryContext("USE " + TestRemoteHiveDriver.class.getSimpleName(), null, conf);
    thrDriver.execute(ctx);

    // Launch a select query
    final int QUERIES = 5;
    int launchedQueries = 0;
    final int THREADS = 5;
    final long POLL_DELAY = 500;
    List<Thread> thrs = new ArrayList<Thread>();
    final AtomicInteger errCount = new AtomicInteger();
    for (int q = 0; q < QUERIES; q++) {
        final QueryContext qctx;
        try {
            qctx = new QueryContext("SELECT * FROM test_multithreads", null, conf);
            thrDriver.executeAsync(qctx);
        } catch (GrillException e) {
            errCount.incrementAndGet();
            LOG.info(q + " executeAsync error: " + e.getCause());
            continue;
        }
        LOG.info("@@ Launched query: " + q + " " + qctx.getQueryHandle());
        launchedQueries++;
        // Launch many threads to poll for status
        final QueryHandle handle = qctx.getQueryHandle();

        for (int i = 0; i < THREADS; i++) {
            int thid = q * THREADS + i;
            Thread th = new Thread(new Runnable() {
                @Override
                public void run() {
                    for (int i = 0; i < 1000; i++) {
                        try {
                            thrDriver.updateStatus(qctx);
                            if (qctx.getDriverStatus().isFinished()) {
                                LOG.info("@@ " + handle.getHandleId() + " >> "
                                        + qctx.getDriverStatus().getState());
                                thrDriver.closeQuery(handle);
                                break;
                            }
                            Thread.sleep(POLL_DELAY);
                        } catch (GrillException e) {
                            LOG.error("Got Exception", e.getCause());
                            e.printStackTrace();
                            errCount.incrementAndGet();
                            break;
                        } catch (InterruptedException e) {
                            e.printStackTrace();
                            break;
                        }
                    }
                }
            });
            thrs.add(th);
            th.setName("Poller#" + (thid));
            th.start();
        }
    }

    for (Thread th : thrs) {
        try {
            th.join(10000);
        } catch (InterruptedException e) {
            LOG.warn("Not ended yet: " + th.getName());
        }
    }
    Assert.assertEquals(0, thrDriver.getHiveHandleSize());
    LOG.info("@@ Completed all pollers. Total thrift errors: " + errCount.get());
    assertEquals(launchedQueries, QUERIES);
    assertEquals(thrs.size(), QUERIES * THREADS);
    assertEquals(errCount.get(), 0);
}