Example usage for java.lang Thread isAlive

List of usage examples for java.lang Thread isAlive

Introduction

In this page you can find the example usage for java.lang Thread isAlive.

Prototype

public final native boolean isAlive();

Source Link

Document

Tests if this thread is alive.

Usage

From source file:org.opennms.mock.snmp.MockSnmpAgent.java

public static MockSnmpAgent createAgentAndRun(Resource moFile, String bindAddress) throws InterruptedException {
    try {/*ww w . j a  va2 s  .c o  m*/
        if (moFile.getInputStream() == null) {
            throw new IllegalArgumentException(
                    "could not get InputStream mock object resource; does it exist?  Resource: " + moFile);
        }
    } catch (IOException e) {
        throw new RuntimeException("Got IOException while checking for existence of mock object file: " + e, e);
    }

    MockSnmpAgent agent = new MockSnmpAgent(new File("/dev/null"), new File("/dev/null"), moFile, bindAddress);
    Thread thread = new Thread(agent);
    thread.start();

    try {
        while (!agent.isRunning() && thread.isAlive()) {
            Thread.sleep(10);
        }
    } catch (InterruptedException e) {
        agent.shutDownAndWait();
        throw e;
    }

    if (!thread.isAlive()) {
        agent.m_running = false;
        agent.m_stopped = true;
        throw new IllegalStateException("agent failed to start--check logs");
    }

    return agent;
}

From source file:org.tranche.logs.LogUtil.java

/**
 * //  w w w .j a v  a 2 s .co m
 * @return
 */
public static final String getThreadDump() {
    Map<Thread, StackTraceElement[]> threadInfo = Thread.getAllStackTraces();
    StringBuffer buf = new StringBuffer();
    buf.append("Thread dump: " + threadInfo.size() + " threads");
    buf.append("\n" + "\n");
    for (Thread t : threadInfo.keySet()) {
        StackTraceElement[] ste = threadInfo.get(t);
        String daemonMsg = t.isDaemon() ? "daemon" : "non-daemon";
        String aliveMsg = t.isAlive() ? "alive" : "non-alive";
        buf.append("    * " + t.getName() + " (priority: " + t.getPriority() + ", " + daemonMsg + ", "
                + aliveMsg + ", state: " + t.getState() + ") ");
        buf.append("\n");

        for (int i = 0; i < ste.length; i++) {
            buf.append("        " + ste[i].toString());
            buf.append("\n");
        }

        buf.append("\n");
    }
    buf.append("\n" + "\n");
    return buf.toString();
}

From source file:com.likya.myra.jef.utils.JobQueueOperations.java

public static boolean hasActiveThreads(HashMap<String, JobImpl> jobQueue) {

    Iterator<JobImpl> jobsIterator = jobQueue.values().iterator();
    while (jobsIterator.hasNext()) {
        JobImpl scheduledJob = jobsIterator.next();
        Thread myExecuter = scheduledJob.getMyExecuter();
        if ((myExecuter != null) && myExecuter.isAlive()) {
            return true;
        }//  ww  w . ja  v a2  s  .  c om
    }

    return false;
}

From source file:org.openqa.selenium.server.browserlaunchers.AsyncExecute.java

/** Waits the specified timeout for the process to die */
public static int waitForProcessDeath(Process p, long timeout) {
    ProcessWaiter pw = new ProcessWaiter(p);
    Thread waiter = new Thread(pw);
    waiter.start();/*w  w w .j a  v a 2s. c o m*/
    try {
        waiter.join(timeout);
    } catch (InterruptedException e) {
        throw new RuntimeException("Bug? Main interrupted while waiting for process", e);
    }
    if (waiter.isAlive()) {
        waiter.interrupt();
    }
    try {
        waiter.join();
    } catch (InterruptedException e) {
        throw new RuntimeException("Bug? Main interrupted while waiting for dead process waiter", e);
    }
    InterruptedException ie = pw.getException();
    if (ie != null) {
        throw new ProcessStillAliveException("Timeout waiting for process to die", ie);
    }
    return p.exitValue();

}

From source file:org.apache.hadoop.hbase.regionserver.TestRegionServerNoMaster.java

public static void stopMasterAndAssignMeta(HBaseTestingUtility HTU) throws IOException, InterruptedException {
    // Stop master
    HMaster master = HTU.getHBaseCluster().getMaster();
    Thread masterThread = HTU.getHBaseCluster().getMasterThread();
    ServerName masterAddr = master.getServerName();
    master.stopMaster();//from  w  w  w.  j  a  v  a 2 s.c om

    LOG.info("Waiting until master thread exits");
    while (masterThread != null && masterThread.isAlive()) {
        Threads.sleep(100);
    }

    HRegionServer.TEST_SKIP_REPORTING_TRANSITION = true;
    // Master is down, so is the meta. We need to assign it somewhere
    // so that regions can be assigned during the mocking phase.
    HRegionServer hrs = HTU.getHBaseCluster().getLiveRegionServerThreads().get(0).getRegionServer();
    ZooKeeperWatcher zkw = hrs.getZooKeeper();
    MetaTableLocator mtl = new MetaTableLocator();
    ServerName sn = mtl.getMetaRegionLocation(zkw);
    if (sn != null && !masterAddr.equals(sn)) {
        return;
    }

    ProtobufUtil.openRegion(null, hrs.getRSRpcServices(), hrs.getServerName(),
            HRegionInfo.FIRST_META_REGIONINFO);
    while (true) {
        sn = mtl.getMetaRegionLocation(zkw);
        if (sn != null && sn.equals(hrs.getServerName())
                && hrs.onlineRegions.containsKey(HRegionInfo.FIRST_META_REGIONINFO.getEncodedName())) {
            break;
        }
        Thread.sleep(100);
    }
}

From source file:es.darkhogg.hazelnutt.Hazelnutt.java

/**
 * Terminates the application in at most <i>time</i> milliseconds for
 * every alive thread. /* ww  w  . ja v a 2 s .  c  o  m*/
 * 
 * @param time Number of milliseconds to wait for each thread to terminate
 */
public static void terminate(long time) {
    Logger logger = getLogger();
    logger.info("Terminating application...");

    try {
        getFrame().dispose();

        // Get the root thread group
        ThreadGroup rootThreadGroup = Thread.currentThread().getThreadGroup();
        while (rootThreadGroup.getParent() != null) {
            rootThreadGroup = rootThreadGroup.getParent();
        }

        // Declare some collections
        Queue<ThreadGroup> threadGroups = new LinkedList<ThreadGroup>();
        Queue<Thread> threads = new LinkedList<Thread>();

        // Get ALL groups
        threadGroups.add(rootThreadGroup);
        while (!threadGroups.isEmpty()) {
            ThreadGroup group = threadGroups.remove();

            Thread[] subThreads = new Thread[group.activeCount() * 2];
            //group.enumerate( subThreads );
            for (Thread subThread : subThreads) {
                if (subThread != null) {
                    threads.add(subThread);
                }
            }

            ThreadGroup[] subThreadGroups = new ThreadGroup[group.activeGroupCount() * 2];
            for (ThreadGroup subThreadGroup : subThreadGroups) {
                if (subThreadGroup != null) {
                    threadGroups.add(subThreadGroup);
                }
            }
        }

        // Join a maximum of time milliseconds for all non-daemon threads
        while (!threads.isEmpty()) {
            Thread thread = threads.remove();
            LOGGER.trace(thread);

            if (!thread.isDaemon() && thread != Thread.currentThread()) {
                logger.trace("Waiting for thread '" + thread.getName() + "'");
                thread.join(time);
                if (thread.isAlive()) {
                    logger.trace("Interrupting thread '" + thread.getName() + "'");
                    thread.interrupt();
                }
            }
        }

    } catch (Throwable e) {
        LOGGER.warn("Interrupted while terminating application", e);

    } finally {
        // Exit the program
        System.exit(0);
    }
}

From source file:mServer.upload.MserverFtp.java

public static boolean uploadFtp(String srcPathFile_, String destFileName_, MserverDatenUpload datenUpload_) {
    try {/*from  w  w w  .  ja v  a 2 s  .com*/
        srcPathFile = srcPathFile_;
        destFileName = destFileName_;
        datenUpload = datenUpload_;
        server = datenUpload.arr[MserverDatenUpload.UPLOAD_SERVER_NR];
        strPort = datenUpload.arr[MserverDatenUpload.UPLOAD_PORT_NR];
        username = datenUpload.arr[MserverDatenUpload.UPLOAD_USER_NR];
        password = datenUpload.arr[MserverDatenUpload.UPLOAD_PWD_NR];
        MserverLog.systemMeldung("");
        MserverLog.systemMeldung("----------------------");
        MserverLog.systemMeldung("Upload start");
        MserverLog.systemMeldung("Server: " + server);
        Thread t = new Thread(new Ftp());
        t.start();

        int warten = MserverKonstanten.MAX_WARTEN_FTP_UPLOAD /*Minuten*/;
        MserverLog.systemMeldung("Max Laufzeit FTP[Min]: " + warten);
        MserverLog.systemMeldung("-----------------------------------");
        warten = 1000 * 60 * warten;
        t.join(warten);

        if (t != null) {
            if (t.isAlive()) {
                MserverLog.fehlerMeldung(396958702, MserverFtp.class.getName(),
                        "Der letzte FtpUpload luft noch");
                MserverLog.systemMeldung("und wird gekillt");
                t.stop();
                retFtp = false;
            }
        }
    } catch (Exception ex) {
        MserverLog.fehlerMeldung(739861047, MserverFtp.class.getName(), "uploadFtp", ex);
    }
    return retFtp;
}

From source file:org.unitime.timetable.solver.remote.SolverRegisterService.java

public static Object execute(Callback callback, long timeout) throws Exception {
    Exec ex = new Exec(callback);
    if (timeout <= 0) {
        ex.run();//  w ww  .  j  a va2  s. c  o  m
    } else {
        Thread et = new Thread(ex);
        et.start();
        et.join(timeout);
        if (et.isAlive())
            et.interrupt();
    }
    if (ex.getAnswer() != null && ex.getAnswer() instanceof Exception)
        throw (Exception) ex.getAnswer();
    return ex.getAnswer();
}

From source file:com.strategicgains.docussandra.controller.perf.remote.mongo.MongoLoader.java

public static void loadMongoData(MongoClientURI uri, final int NUM_WORKERS, Database database,
        final int numDocs, final PerfTestParent clazz) {
    logger.info("------------Loading Data into: " + database.name() + " with MONGO!------------");
    try {/* w w w . j a v  a 2 s .  c  om*/
        try {
            MongoClient mongoClient = new MongoClient(uri);
            mongoClient.setWriteConcern(WriteConcern.MAJORITY);
            DB db = mongoClient.getDB(database.name());
            final DBCollection coll = db.getCollection(database.name());
            ArrayList<Thread> workers = new ArrayList<>(NUM_WORKERS + 1);
            int docsPerWorker = numDocs / NUM_WORKERS;
            try {
                List<Document> docs = clazz.getDocumentsFromFS();
                ArrayList<List<Document>> documentQueues = new ArrayList<>(NUM_WORKERS + 1);
                int numDocsAssigned = 0;
                while ((numDocsAssigned + 1) < numDocs) {
                    int start = numDocsAssigned;
                    int end = numDocsAssigned + docsPerWorker;
                    if (end > numDocs) {
                        end = numDocs - 1;
                    }
                    documentQueues.add(new ArrayList(docs.subList(start, end)));
                    numDocsAssigned = end;
                }
                for (final List<Document> queue : documentQueues) {
                    workers.add(new Thread() {
                        @Override
                        public void run() {
                            for (Document d : queue) {
                                DBObject o = (DBObject) JSON.parse(d.object());
                                coll.save(o);
                            }
                            logger.info("Thread " + Thread.currentThread().getName() + " is done. It processed "
                                    + queue.size() + " documents.");
                        }
                    });
                }
            } catch (UnsupportedOperationException e)//we can't read everything in at once
            {
                //all we need to do in this block is find a way to set "workers"
                for (int i = 0; i < NUM_WORKERS; i++) {
                    workers.add(new Thread() {
                        private final int chunk = (int) (Math.random() * 100) + 150;//pick a random chunk so we are not going back to the FS all at the same time and potentially causing a bottle neck

                        @Override
                        public void run() {
                            ThreadLocal<Integer> counter = new ThreadLocal<>();
                            counter.set(new Integer(0));
                            try {
                                List<Document> docs = clazz.getDocumentsFromFS(chunk);//grab a handful of documents
                                while (docs.size() > 0) {
                                    for (Document d : docs)//process the documents we grabbed
                                    {
                                        DBObject o = (DBObject) JSON.parse(d.object());
                                        coll.save(o);
                                        counter.set(counter.get() + 1);
                                    }
                                    docs = clazz.getDocumentsFromFS(chunk);//grab another handful of documents
                                }
                                logger.info("Thread " + Thread.currentThread().getName()
                                        + " is done. It processed " + counter.get() + " documents.");
                            } catch (IOException | ParseException e) {
                                logger.error("Couldn't read from document", e);
                            }
                        }
                    });
                }
            }

            long start = new Date().getTime();
            //start your threads!
            for (Thread t : workers) {
                t.start();
            }
            logger.info("All threads started, waiting for completion.");
            boolean allDone = false;
            boolean first = true;
            while (!allDone || first) {
                first = false;
                boolean done = true;
                for (Thread t : workers) {
                    if (t.isAlive()) {
                        done = false;
                        logger.info("Thread " + t.getName() + " is still running.");
                        break;
                    }
                }
                if (done) {
                    allDone = true;
                } else {
                    logger.info("We still have workers running...");
                    try {
                        Thread.sleep(10000);
                    } catch (InterruptedException e) {
                    }
                }
            }
            long end = new Date().getTime();
            long miliseconds = end - start;
            double seconds = (double) miliseconds / 1000d;
            output.info("Done loading data using: " + NUM_WORKERS + ". Took: " + seconds + " seconds");
            double tpms = (double) numDocs / (double) miliseconds;
            double tps = tpms * 1000;
            double transactionTime = (double) miliseconds / (double) numDocs;
            output.info(database.name() + " Mongo Average Transactions Per Second: " + tps);
            output.info(
                    database.name() + " Mongo Average Transactions Time (in miliseconds): " + transactionTime);

        } catch (UnknownHostException e) {
            logger.error("Couldn't connect to Mongo Server", e);
        }
    } catch (IOException | ParseException e) {
        logger.error("Couldn't read data.", e);
    }
}

From source file:com.nubits.nubot.utils.Utils.java

public static void logActiveThreads() {
    int active = Thread.activeCount();
    LOG.trace("currently active threads: " + active);
    Thread allThreads[] = new Thread[active];
    Thread.enumerate(allThreads);

    for (int i = 0; i < active; i++) {
        Thread t = allThreads[i];
        LOG.trace(i + ": " + t + " id: " + t.getId() + " name: " + t.getName() + " " + t.getContextClassLoader()
                + " group: " + t.getThreadGroup() + " alive" + t.isAlive());
        LOG.trace("super: " + t.getClass().getSuperclass());
    }/*w w  w.  j  av  a 2s  . c  o  m*/

    if (active > maxThreadsError) {
        LOG.error("too many threads started");
    }
}