Example usage for java.util Stack empty

List of usage examples for java.util Stack empty

Introduction

In this page you can find the example usage for java.util Stack empty.

Prototype

public boolean empty() 

Source Link

Document

Tests if this stack is empty.

Usage

From source file:org.apache.hadoop.tools.DistCpV1.java

/**
 * Initialize DFSCopyFileMapper specific job-configuration.
 * @param conf : The dfs/mapred configuration.
 * @param jobConf : The handle to the jobConf object to be initialized.
 * @param args Arguments/*from w  w w .j  a v a 2 s  .co m*/
 * @return true if it is necessary to launch a job.
 */
static boolean setup(Configuration conf, JobConf jobConf, final Arguments args) throws IOException {
    jobConf.set(DST_DIR_LABEL, args.dst.toUri().toString());

    //set boolean values
    final boolean update = args.flags.contains(Options.UPDATE);
    final boolean skipCRCCheck = args.flags.contains(Options.SKIPCRC);
    final boolean overwrite = !update && args.flags.contains(Options.OVERWRITE) && !args.dryrun;
    jobConf.setBoolean(Options.UPDATE.propertyname, update);
    jobConf.setBoolean(Options.SKIPCRC.propertyname, skipCRCCheck);
    jobConf.setBoolean(Options.OVERWRITE.propertyname, overwrite);
    jobConf.setBoolean(Options.IGNORE_READ_FAILURES.propertyname,
            args.flags.contains(Options.IGNORE_READ_FAILURES));
    jobConf.setBoolean(Options.PRESERVE_STATUS.propertyname, args.flags.contains(Options.PRESERVE_STATUS));

    final String randomId = getRandomId();
    JobClient jClient = new JobClient(jobConf);
    Path stagingArea;
    try {
        stagingArea = JobSubmissionFiles.getStagingDir(jClient.getClusterHandle(), conf);
    } catch (InterruptedException ie) {
        throw new IOException(ie);
    }

    Path jobDirectory = new Path(stagingArea + NAME + "_" + randomId);
    FsPermission mapredSysPerms = new FsPermission(JobSubmissionFiles.JOB_DIR_PERMISSION);
    FileSystem.mkdirs(jClient.getFs(), jobDirectory, mapredSysPerms);
    jobConf.set(JOB_DIR_LABEL, jobDirectory.toString());

    long maxBytesPerMap = conf.getLong(BYTES_PER_MAP_LABEL, BYTES_PER_MAP);

    FileSystem dstfs = args.dst.getFileSystem(conf);

    // get tokens for all the required FileSystems..
    TokenCache.obtainTokensForNamenodes(jobConf.getCredentials(), new Path[] { args.dst }, conf);

    boolean dstExists = dstfs.exists(args.dst);
    boolean dstIsDir = false;
    if (dstExists) {
        dstIsDir = dstfs.getFileStatus(args.dst).isDirectory();
    }

    // default logPath
    Path logPath = args.log;
    if (logPath == null) {
        String filename = "_distcp_logs_" + randomId;
        if (!dstExists || !dstIsDir) {
            Path parent = args.dst.getParent();
            if (null == parent) {
                // If dst is '/' on S3, it might not exist yet, but dst.getParent()
                // will return null. In this case, use '/' as its own parent to prevent
                // NPE errors below.
                parent = args.dst;
            }
            if (!dstfs.exists(parent)) {
                dstfs.mkdirs(parent);
            }
            logPath = new Path(parent, filename);
        } else {
            logPath = new Path(args.dst, filename);
        }
    }
    FileOutputFormat.setOutputPath(jobConf, logPath);

    // create src list, dst list
    FileSystem jobfs = jobDirectory.getFileSystem(jobConf);

    Path srcfilelist = new Path(jobDirectory, "_distcp_src_files");
    Path dstfilelist = new Path(jobDirectory, "_distcp_dst_files");
    Path dstdirlist = new Path(jobDirectory, "_distcp_dst_dirs");
    jobConf.set(SRC_LIST_LABEL, srcfilelist.toString());
    jobConf.set(DST_DIR_LIST_LABEL, dstdirlist.toString());
    int srcCount = 0, cnsyncf = 0, dirsyn = 0;
    long fileCount = 0L, dirCount = 0L, byteCount = 0L, cbsyncs = 0L, skipFileCount = 0L, skipByteCount = 0L;
    try (SequenceFile.Writer src_writer = SequenceFile.createWriter(jobConf, Writer.file(srcfilelist),
            Writer.keyClass(LongWritable.class), Writer.valueClass(FilePair.class),
            Writer.compression(SequenceFile.CompressionType.NONE));
            SequenceFile.Writer dst_writer = SequenceFile.createWriter(jobConf, Writer.file(dstfilelist),
                    Writer.keyClass(Text.class), Writer.valueClass(Text.class),
                    Writer.compression(SequenceFile.CompressionType.NONE));
            SequenceFile.Writer dir_writer = SequenceFile.createWriter(jobConf, Writer.file(dstdirlist),
                    Writer.keyClass(Text.class), Writer.valueClass(FilePair.class),
                    Writer.compression(SequenceFile.CompressionType.NONE));) {
        // handle the case where the destination directory doesn't exist
        // and we've only a single src directory OR we're updating/overwriting
        // the contents of the destination directory.
        final boolean special = (args.srcs.size() == 1 && !dstExists) || update || overwrite;

        Path basedir = null;
        HashSet<Path> parentDirsToCopy = new HashSet<Path>();
        if (args.basedir != null) {
            FileSystem basefs = args.basedir.getFileSystem(conf);
            basedir = args.basedir.makeQualified(basefs.getUri(), basefs.getWorkingDirectory());
            if (!basefs.isDirectory(basedir)) {
                throw new IOException("Basedir " + basedir + " is not a directory.");
            }
        }

        for (Iterator<Path> srcItr = args.srcs.iterator(); srcItr.hasNext();) {
            final Path src = srcItr.next();
            FileSystem srcfs = src.getFileSystem(conf);
            FileStatus srcfilestat = srcfs.getFileStatus(src);
            Path root = special && srcfilestat.isDirectory() ? src : src.getParent();
            if (dstExists && !dstIsDir && (args.srcs.size() > 1 || srcfilestat.isDirectory())) {
                // destination should not be a file
                throw new IOException("Destination " + args.dst + " should be a dir"
                        + " if multiple source paths are there OR if" + " the source path is a dir");
            }

            if (basedir != null) {
                root = basedir;
                Path parent = src.getParent().makeQualified(srcfs.getUri(), srcfs.getWorkingDirectory());
                while (parent != null && !parent.equals(basedir)) {
                    if (!parentDirsToCopy.contains(parent)) {
                        parentDirsToCopy.add(parent);
                        String dst = makeRelative(root, parent);
                        FileStatus pst = srcfs.getFileStatus(parent);
                        src_writer.append(new LongWritable(0), new FilePair(pst, dst));
                        dst_writer.append(new Text(dst), new Text(parent.toString()));
                        dir_writer.append(new Text(dst), new FilePair(pst, dst));
                        if (++dirsyn > SYNC_FILE_MAX) {
                            dirsyn = 0;
                            dir_writer.sync();
                        }
                    }
                    parent = parent.getParent();
                }

                if (parent == null) {
                    throw new IOException("Basedir " + basedir + " is not a prefix of source path " + src);
                }
            }

            if (srcfilestat.isDirectory()) {
                ++srcCount;
                final String dst = makeRelative(root, src);
                if (!update || !dirExists(conf, new Path(args.dst, dst))) {
                    ++dirCount;
                    src_writer.append(new LongWritable(0), new FilePair(srcfilestat, dst));
                }
                dst_writer.append(new Text(dst), new Text(src.toString()));
            }

            Stack<FileStatus> pathstack = new Stack<FileStatus>();
            for (pathstack.push(srcfilestat); !pathstack.empty();) {
                FileStatus cur = pathstack.pop();
                FileStatus[] children = srcfs.listStatus(cur.getPath());
                for (int i = 0; i < children.length; i++) {
                    boolean skipPath = false;
                    final FileStatus child = children[i];
                    final String dst = makeRelative(root, child.getPath());
                    ++srcCount;

                    if (child.isDirectory()) {
                        pathstack.push(child);
                        if (!update || !dirExists(conf, new Path(args.dst, dst))) {
                            ++dirCount;
                        } else {
                            skipPath = true; // skip creating dir at destination
                        }
                    } else {
                        Path destPath = new Path(args.dst, dst);
                        if (cur.isFile() && (args.srcs.size() == 1)) {
                            // Copying a single file; use dst path provided by user as
                            // destination file rather than destination directory
                            Path dstparent = destPath.getParent();
                            FileSystem destFileSys = destPath.getFileSystem(jobConf);
                            if (!(destFileSys.exists(dstparent)
                                    && destFileSys.getFileStatus(dstparent).isDirectory())) {
                                destPath = dstparent;
                            }
                        }
                        //skip path if the src and the dst files are the same.
                        skipPath = update && sameFile(srcfs, child, dstfs, destPath, skipCRCCheck);
                        //skip path if it exceed file limit or size limit
                        skipPath |= fileCount == args.filelimit || byteCount + child.getLen() > args.sizelimit;

                        if (!skipPath) {
                            ++fileCount;
                            byteCount += child.getLen();

                            if (LOG.isTraceEnabled()) {
                                LOG.trace("adding file " + child.getPath());
                            }

                            ++cnsyncf;
                            cbsyncs += child.getLen();
                            if (cnsyncf > SYNC_FILE_MAX || cbsyncs > maxBytesPerMap) {
                                src_writer.sync();
                                dst_writer.sync();
                                cnsyncf = 0;
                                cbsyncs = 0L;
                            }
                        } else {
                            ++skipFileCount;
                            skipByteCount += child.getLen();
                            if (LOG.isTraceEnabled()) {
                                LOG.trace("skipping file " + child.getPath());
                            }
                        }
                    }

                    if (!skipPath) {
                        src_writer.append(new LongWritable(child.isDirectory() ? 0 : child.getLen()),
                                new FilePair(child, dst));
                    }

                    dst_writer.append(new Text(dst), new Text(child.getPath().toString()));
                }

                if (cur.isDirectory()) {
                    String dst = makeRelative(root, cur.getPath());
                    dir_writer.append(new Text(dst), new FilePair(cur, dst));
                    if (++dirsyn > SYNC_FILE_MAX) {
                        dirsyn = 0;
                        dir_writer.sync();
                    }
                }
            }
        }
    }
    LOG.info("sourcePathsCount(files+directories)=" + srcCount);
    LOG.info("filesToCopyCount=" + fileCount);
    LOG.info("bytesToCopyCount=" + TraditionalBinaryPrefix.long2String(byteCount, "", 1));
    if (update) {
        LOG.info("filesToSkipCopyCount=" + skipFileCount);
        LOG.info("bytesToSkipCopyCount=" + TraditionalBinaryPrefix.long2String(skipByteCount, "", 1));
    }
    if (args.dryrun) {
        return false;
    }
    int mapCount = setMapCount(byteCount, jobConf);
    // Increase the replication of _distcp_src_files, if needed
    setReplication(conf, jobConf, srcfilelist, mapCount);

    FileStatus dststatus = null;
    try {
        dststatus = dstfs.getFileStatus(args.dst);
    } catch (FileNotFoundException fnfe) {
        LOG.info(args.dst + " does not exist.");
    }

    // create dest path dir if copying > 1 file
    if (dststatus == null) {
        if (srcCount > 1 && !dstfs.mkdirs(args.dst)) {
            throw new IOException("Failed to create" + args.dst);
        }
    }

    final Path sorted = new Path(jobDirectory, "_distcp_sorted");
    checkDuplication(jobfs, dstfilelist, sorted, conf);

    if (dststatus != null && args.flags.contains(Options.DELETE)) {
        long deletedPathsCount = deleteNonexisting(dstfs, dststatus, sorted, jobfs, jobDirectory, jobConf,
                conf);
        LOG.info("deletedPathsFromDestCount(files+directories)=" + deletedPathsCount);
    }

    Path tmpDir = new Path(
            (dstExists && !dstIsDir) || (!dstExists && srcCount == 1) ? args.dst.getParent() : args.dst,
            "_distcp_tmp_" + randomId);
    jobConf.set(TMP_DIR_LABEL, tmpDir.toUri().toString());

    // Explicitly create the tmpDir to ensure that it can be cleaned
    // up by fullyDelete() later.
    tmpDir.getFileSystem(conf).mkdirs(tmpDir);

    LOG.info("sourcePathsCount=" + srcCount);
    LOG.info("filesToCopyCount=" + fileCount);
    LOG.info("bytesToCopyCount=" + TraditionalBinaryPrefix.long2String(byteCount, "", 1));
    jobConf.setInt(SRC_COUNT_LABEL, srcCount);
    jobConf.setLong(TOTAL_SIZE_LABEL, byteCount);

    return (fileCount + dirCount) > 0;
}

From source file:alluxio.underfs.hdfs.HdfsUnderFileSystem.java

@Override
public boolean mkdirs(String path, MkdirsOptions options) throws IOException {
    IOException te = null;//from   ww w.  j  ava  2  s. co  m
    RetryPolicy retryPolicy = new CountingRetry(MAX_TRY);
    while (retryPolicy.attemptRetry()) {
        try {
            Path hdfsPath = new Path(path);
            if (mFileSystem.exists(hdfsPath)) {
                LOG.debug("Trying to create existing directory at {}", path);
                return false;
            }
            // Create directories one by one with explicit permissions to ensure no umask is applied,
            // using mkdirs will apply the permission only to the last directory
            Stack<Path> dirsToMake = new Stack<>();
            dirsToMake.push(hdfsPath);
            Path parent = hdfsPath.getParent();
            while (!mFileSystem.exists(parent)) {
                dirsToMake.push(parent);
                parent = parent.getParent();
            }
            while (!dirsToMake.empty()) {
                Path dirToMake = dirsToMake.pop();
                if (!FileSystem.mkdirs(mFileSystem, dirToMake, new FsPermission(options.getMode().toShort()))) {
                    return false;
                }
                // Set the owner to the Alluxio client user to achieve permission delegation.
                // Alluxio server-side user is required to be a HDFS superuser. If it fails to set owner,
                // proceeds with mkdirs and print out an warning message.
                try {
                    setOwner(dirToMake.toString(), options.getOwner(), options.getGroup());
                } catch (IOException e) {
                    LOG.warn("Failed to update the ufs dir ownership, default values will be used. " + e);
                }
            }
            return true;
        } catch (IOException e) {
            LOG.warn("{} try to make directory for {} : {}", retryPolicy.getRetryCount(), path, e.getMessage());
            te = e;
        }
    }
    throw te;
}

From source file:org.apache.hadoop.hbase.filter.ParseFilter.java

/**
 * This function is called while parsing the filterString and an operator is parsed
 * <p>//from   ww  w  . j a  va 2 s .c o  m
 * @param operatorStack the stack containing the operators and parenthesis
 * @param filterStack the stack containing the filters
 * @param operator the operator found while parsing the filterString
 */
public void reduce(Stack<ByteBuffer> operatorStack, Stack<Filter> filterStack, ByteBuffer operator) {
    while (!operatorStack.empty() && !(ParseConstants.LPAREN_BUFFER.equals(operatorStack.peek()))
            && hasHigherPriority(operatorStack.peek(), operator)) {
        filterStack.push(popArguments(operatorStack, filterStack));
    }
}

From source file:com.anite.zebra.core.Engine.java

public void transitionTask(ITaskInstance taskInstance) throws TransitionException {
    /*/*w ww. j a  v  a2 s. co  m*/
     * we need to LOCK the ProcessInstance from changes by other Engine
     * instances 
     */

    IProcessInstance currentProcess = taskInstance.getProcessInstance();
    try {
        stateFactory.acquireLock(currentProcess, this);
    } catch (LockException e) {
        String emsg = "Failed to aquire an exclusive lock on the Process Instance (" + currentProcess
                + "). Transitioning aborted.";
        log.error(emsg, e);
        throw new TransitionException(emsg, e);
    }

    Stack taskStack = new Stack();
    taskStack.push(taskInstance);
    while (!taskStack.empty()) {
        // get the task from the Stack
        ITaskInstance currentTask = (ITaskInstance) taskStack.pop();
        Map createdTasks;
        try {
            createdTasks = transitionTaskFromStack(currentTask, currentProcess);
        } catch (Exception e) {
            String emsg = "Problem encountered transitioning task from Stack";
            log.error(emsg, e);
            throw new TransitionException(e);
        }
        for (Iterator it = createdTasks.values().iterator(); it.hasNext();) {
            ITaskInstance newTask = (ITaskInstance) it.next();
            ITaskDefinition td;
            try {
                td = newTask.getTaskDefinition();
            } catch (DefinitionNotFoundException e) {
                String emsg = "FATAL: Failed to access the Task Definition";
                log.error(emsg, e);
                // throwing an exception here will leave the process "locked", but that is a valid situation
                throw new TransitionException(emsg, e);
            }
            if (td.isAuto() || td.isSynchronised()) {
                /*
                 * is an Auto task, so add to the stack for processing.
                 * Also treat 
                 * check,
                 * to see if task is present in stack before adding it
                 */
                if (!taskStack.contains(newTask)) {
                    if (log.isInfoEnabled()) {
                        log.info("Added task to TaskStack - " + newTask);
                    }
                    taskStack.push(newTask);
                } else {
                    if (log.isInfoEnabled()) {
                        log.info("transitionTask - task already exists in stack " + newTask);
                    }
                }
            }
        }
    }
    try {
        if (currentProcess.getTaskInstances().size() == 0) {
            // mark process complete
            doProcessDestruct(currentProcess);
        }
        /*
         * release lock on process instance
         */
        stateFactory.releaseLock(currentProcess, this);
    } catch (Exception e) {
        String emsg = "FATAL: Couldnt release lock on Process Instance (" + currentProcess
                + ") after transitioning. Process will be left in an usuable state";
        log.fatal(emsg, e);
        throw new TransitionException(emsg, e);
    }
}

From source file:org.alfresco.web.app.AlfrescoNavigationHandler.java

/**
 * Adds the current view to the stack (if required).
 * If the current view is already the top of the stack it is not added again
 * to stop the stack from growing and growing.
 * //from w ww  .  j av a 2s .c  om
 * @param context FacesContext
 */
@SuppressWarnings("unchecked")
protected void addCurrentViewToStack(FacesContext context) {
    // if the current viewId is either the dialog or wizard container page
    // we need to save the state of the current dialog or wizard to the stack

    // If the current view is a normal page and it is not the same as the 
    // view currently at the top of the stack (you can't launch a dialog from
    // the same page 2 times in a row so it must mean the user navigated away
    // from the first dialog) just add the viewId to the stack

    // work out what to add to the stack
    String viewId = context.getViewRoot().getViewId();
    String dialogContainer = getDialogContainer(context);
    String wizardContainer = getWizardContainer(context);
    Object objectForStack = null;
    if (viewId.equals(dialogContainer)) {
        DialogManager dlgMgr = Application.getDialogManager();
        objectForStack = dlgMgr.getState();
    } else if (viewId.equals(wizardContainer)) {
        WizardManager wizMgr = Application.getWizardManager();
        objectForStack = wizMgr.getState();
    } else {
        objectForStack = viewId;
    }

    // if the stack is currently empty add the item
    Stack stack = getViewStack(context);
    if (stack.empty()) {
        stack.push(objectForStack);

        if (logger.isDebugEnabled())
            logger.debug("Pushed item to view stack: " + objectForStack);
    } else {
        // if the item to go on to the stack and the top of
        // stack are both Strings and equals to each other
        // don't add anything to the stack to stop it 
        // growing unecessarily
        Object topOfStack = stack.peek();
        if (objectForStack instanceof String && topOfStack instanceof String
                && topOfStack.equals(objectForStack)) {
            if (logger.isDebugEnabled())
                logger.debug("current view is already top of the view stack!");
        } else {
            stack.push(objectForStack);

            if (logger.isDebugEnabled())
                logger.debug("Pushed item to view stack: " + objectForStack);
        }
    }
}

From source file:FibonacciHeap.java

/**
 * Creates a String representation of this Fibonacci heap.
 *
 * @return String of this./*  w ww.j a  v a2  s  .c o  m*/
 */
public String toString() {
    if (minNode == null) {
        return "FibonacciHeap=[]";
    }

    // create a new stack and put root on it
    Stack<FibonacciHeapNode<T>> stack = new Stack<FibonacciHeapNode<T>>();
    stack.push(minNode);

    StringBuffer buf = new StringBuffer(512);
    buf.append("FibonacciHeap=[");

    // do a simple breadth-first traversal on the tree
    while (!stack.empty()) {
        FibonacciHeapNode<T> curr = stack.pop();
        buf.append(curr);
        buf.append(", ");

        if (curr.child != null) {
            stack.push(curr.child);
        }

        FibonacciHeapNode<T> start = curr;
        curr = curr.right;

        while (curr != start) {
            buf.append(curr);
            buf.append(", ");

            if (curr.child != null) {
                stack.push(curr.child);
            }

            curr = curr.right;
        }
    }

    buf.append(']');

    return buf.toString();
}

From source file:org.sipfoundry.sipxconfig.cfgmgt.ConfigManagerImpl.java

private void runProviders(ConfigRequest request, String jobLabel) {
    synchronized (m_lock) {
        try {//from  w  ww .jav  a2s . c  o  m
            while (m_flag) {
                m_lock.wait();
            }
        } catch (InterruptedException e) {
            LOG.warn("Thread interrupted. Config might be in stale state; rerun send profiles.");
        }
    }
    LOG.info("Configuration work to do. Notifying providers.");
    Serializable job = m_jobContext.schedule(jobLabel);
    m_jobContext.start(job);
    Stack<Exception> errors = new Stack<Exception>();
    for (ConfigProvider provider : getProviders()) {
        try {
            provider.replicate(this, request);
        } catch (Exception e) {
            LOG.error(jobLabel, e);
            errors.push(e);
        }
    }

    // even though there are errors, proceed to deploy phase. May want to
    // reevaluate this decision --Douglas
    if (errors.size() == 0) {
        m_jobContext.success(job);
    } else {
        fail(m_jobContext, jobLabel, job, errors.pop());
        // Tricky alert - show additional errors as new jobs
        while (!errors.empty()) {
            Serializable jobError = m_jobContext.schedule(jobLabel);
            m_jobContext.start(jobError);
            fail(m_jobContext, jobLabel, jobError, errors.pop());
        }
    }
}

From source file:song.androidstudy.tracedump.java

public boolean dump(int stackLimit) {
    if (mTraceRecordList == null) {
        return false;
    }/*from ww w. j  a v  a  2s.  c  o m*/
    HashMap<Integer, Integer> knownFrame = new HashMap<Integer, Integer>(mTda.length);
    int maxStackLevel = 0;
    HashMap<Integer, Stack<StackFrame>> threadStackMap = null;
    Stack<StackFrame> threadStack = null;
    Long endtime, startTime;
    int i;
    boolean contextSwitch;
    // TODO Auto-generated method stub
    threadStackMap = new HashMap<Integer, Stack<StackFrame>>(mTda.length);
    for (i = 0; i < mTda.length; i++) {
        threadStackMap.put(mTda[i].getId(), new Stack<StackFrame>());
        knownFrame.put(mTda[i].getId(), -1);
    }
    for (TimeLineView.Record record : mTraceRecordList) {
        TimeLineView.Block block = record.block;
        TimeLineView.Row row = record.row;
        MethodData data = block.getMethodData();
        threadStack = threadStackMap.get(row.getId());
        startTime = block.getStartTime();
        endtime = block.getEndTime();
        while (!threadStack.empty()) {
            if (threadStack.peek().endtime <= startTime) {
                // a function call returned
                if (threadStack.size() == knownFrame.get(row.getId())) {
                    knownFrame.put(row.getId(), -1);
                }
                threadStack.pop();
            } else {
                if (threadStack.peek().endtime < endtime) {
                    System.out.println("stack prediction error!");
                }
                break;
            }
        }
        if (!data.getClassName().equals("(context switch)")) {
            StackFrame frame = new StackFrame();
            frame.className = data.getClassName();
            frame.methodName = data.getMethodName();
            frame.startTime = startTime;
            frame.endtime = endtime;
            contextSwitch = false;
            threadStack.push(frame);
            if (knownFrame.get(row.getId()) == -1) {
                if (knownMethod(data)) {
                    knownFrame.put(row.getId(), threadStack.size());
                }
            }
        } else {
            contextSwitch = true;
        }
        if (threadStack.size() > maxStackLevel) {
            maxStackLevel = threadStack.size();
        }

        if (threadStack.size() <= stackLimit) {
            String name = data.getClassName() + '.' + data.getMethodName();
            if (knownFrame.get(row.getId()) == -1 || knownFrame.get(row.getId()) == threadStack.size()) {
                if (!ignoreMethod(data)) {
                    for (i = 0; i < threadStack.size(); i++) {
                        System.out.print("  ");
                    }
                    System.out.println(name + "          ::" + +block.getStartTime() + "::" + block.getEndTime()
                            + "::" + threadStack.size() + "::" + row.getName());
                }
            }
        }
        if (contextSwitch) {
            System.out.println("--------------------------");
        }
    }
    System.out.println("max stack level is " + maxStackLevel);
    return true;
}

From source file:com.jsmartframework.web.manager.TagHandler.java

@SuppressWarnings("unchecked")
protected void popDelegateTagParent() {
    Stack<RefAction> actionStack = (Stack<RefAction>) getMappedValue(DELEGATE_TAG_PARENT);

    RefAction refAction = actionStack.pop();
    if (actionStack.empty()) {
        removeMappedValue(DELEGATE_TAG_PARENT);
    }//w w  w  . ja v  a 2  s . c o  m

    Map<String, EventAction> refs = refAction.getRefs();
    if (refs != null) {

        for (String refId : refs.keySet()) {
            EventAction eventAction = refs.get(refId);
            if (eventAction == null) {
                continue;
            }

            Map<String, Set<Ajax>> refAjaxs = eventAction.getAjaxs();
            if (refAjaxs != null) {
                for (String event : refAjaxs.keySet()) {
                    for (Ajax jsonAjax : refAjaxs.get(event)) {
                        StringBuilder builder = new StringBuilder();
                        builder.append(JSMART_AJAX.format(getJsonValue(jsonAjax)));
                        builder = getDelegateFunction(id, "*[role-delegate=\"" + refId + "\"]",
                                event.toLowerCase(), builder);
                        appendDocScript(builder);
                    }
                }
            }

            Map<String, Set<Bind>> refBinds = eventAction.getBinds();
            if (refBinds != null) {
                for (String event : refBinds.keySet()) {
                    for (Bind jsonBind : refBinds.get(event)) {
                        StringBuilder builder = new StringBuilder();
                        builder.append(JSMART_BIND.format(getJsonValue(jsonBind)));
                        builder = getDelegateFunction(id, "*[role-delegate=\"" + refId + "\"]",
                                event.toLowerCase(), builder);
                        appendDocScript(builder);
                    }
                }
            }
        }
    }
}

From source file:web.diva.server.model.SomClustering.SomClustImgGenerator.java

public int countgenes(Node trunk) {
    java.util.Stack c = new java.util.Stack();
    int ret = 0;/*  w  ww . java 2s . c om*/
    c.push(trunk);
    Node tr = trunk;

    if (trunk == null) {
        System.out.print("\n!No trunk\n");
    }

    while (!c.empty()) {
        tr = (Node) c.pop();

        if (tr.merged) {
            c.push(tr.left);
            c.push(tr.right);
        } else {
            ret++;
        }
    }

    return ret;
}