Example usage for java.util Stack push

List of usage examples for java.util Stack push

Introduction

In this page you can find the example usage for java.util Stack push.

Prototype

public E push(E item) 

Source Link

Document

Pushes an item onto the top of this stack.

Usage

From source file:com.almarsoft.GroundhogReader.MessageListActivity.java

private void fillListNonRecursive(Article root, int depth, String replyto) {

    Stack<MiniHeader> stack = new Stack<MiniHeader>();

    boolean markReplies = mPrefs.getBoolean("markReplies", true);
    boolean finished = false;

    String clean_subject;//w ww.  jav  a2 s  . co  m
    MiniHeader tmpMiniItem;
    HeaderItemClass ih = null;
    String[] refsArray;
    String msgId;

    ArrayList<HeaderItemClass> nonStarredItems = new ArrayList<HeaderItemClass>();
    HashSet<String> bannedTrollsSet = DBUtils.getBannedTrolls(getApplicationContext());
    HashSet<String> starredSet = DBUtils.getStarredSubjectsSet(getApplicationContext());

    // Proxy for speed
    HashSet<String> myPostsSetProxy = mMyPostsSet;
    ArrayList<HeaderItemClass> headerItemsListProxy = new ArrayList<HeaderItemClass>();
    int refsArrayLen;

    while (!finished) {

        if (root == null)
            finished = true;

        root.setReplyTo(replyto);

        if (!root.isDummy()) {
            ih = new HeaderItemClass(root, depth);

            // Don't feed the troll
            if (!bannedTrollsSet.contains(root.getFrom())) {

                // Put the replies in red (if configured)
                if (markReplies) {
                    refsArray = root.getReferences();
                    refsArrayLen = refsArray.length;
                    msgId = null;

                    if (refsArray != null && refsArrayLen > 0) {
                        msgId = refsArray[refsArrayLen - 1];
                    }

                    if (msgId != null && myPostsSetProxy != null && myPostsSetProxy.contains(msgId))
                        ih.myreply = true;
                    else
                        ih.myreply = false;
                }

                clean_subject = root.simplifiedSubject();
                if (starredSet.contains(clean_subject)) {
                    ih.starred = true;
                    headerItemsListProxy.add(ih); // Starred items first
                } else {
                    // Nonstarred items will be added to mHeaderItemsList at the end
                    nonStarredItems.add(ih);
                }
            }
        }

        if (root.next != null) {
            tmpMiniItem = new MiniHeader(root.next, depth, replyto);
            stack.push(tmpMiniItem);
        }

        if (root.kid != null) {

            replyto = root.getFrom();
            if (!root.isDummy())
                ++depth;
            root = root.kid;

        } else if (!stack.empty()) {

            tmpMiniItem = stack.pop();
            root = tmpMiniItem.article;
            depth = tmpMiniItem.depth;
            replyto = tmpMiniItem.replyto;

        } else
            finished = true;

    }

    // Now add the non starred items after the starred ones
    int nonStarredItemsLen = nonStarredItems.size();
    for (int i = 0; i < nonStarredItemsLen; i++) {
        headerItemsListProxy.add(nonStarredItems.get(i));
    }

    mHeaderItemsList = headerItemsListProxy;
    nonStarredItems = null;
}

From source file:alluxio.master.file.DefaultFileSystemMaster.java

/**
 * Implements renaming./*from  ww  w.  ja v a2 s.c o m*/
 *
 * @param srcInodePath the path of the rename source
 * @param dstInodePath the path to the rename destination
 * @param replayed whether the operation is a result of replaying the journal
 * @param options method options
 * @throws FileDoesNotExistException if a non-existent file is encountered
 * @throws InvalidPathException if an invalid path is encountered
 * @throws IOException if an I/O error is encountered
 */
private void renameInternal(LockedInodePath srcInodePath, LockedInodePath dstInodePath, boolean replayed,
        RenameOptions options) throws FileDoesNotExistException, InvalidPathException, IOException {

    // Rename logic:
    // 1. Change the source inode name to the destination name.
    // 2. Insert the source inode into the destination parent.
    // 3. Do UFS operations if necessary.
    // 4. Remove the source inode (reverting the name) from the source parent.
    // 5. Set the last modification times for both source and destination parent inodes.

    Inode<?> srcInode = srcInodePath.getInode();
    AlluxioURI srcPath = srcInodePath.getUri();
    AlluxioURI dstPath = dstInodePath.getUri();
    InodeDirectory srcParentInode = srcInodePath.getParentInodeDirectory();
    InodeDirectory dstParentInode = dstInodePath.getParentInodeDirectory();
    String srcName = srcPath.getName();
    String dstName = dstPath.getName();

    LOG.debug("Renaming {} to {}", srcPath, dstPath);

    // 1. Change the source inode name to the destination name.
    srcInode.setName(dstName);
    srcInode.setParentId(dstParentInode.getId());

    // 2. Insert the source inode into the destination parent.
    if (!dstParentInode.addChild(srcInode)) {
        // On failure, revert changes and throw exception.
        srcInode.setName(srcName);
        srcInode.setParentId(srcParentInode.getId());
        throw new InvalidPathException("Destination path: " + dstPath + " already exists.");
    }

    // 3. Do UFS operations if necessary.
    // If the source file is persisted, rename it in the UFS.
    try {
        if (!replayed && srcInode.isPersisted()) {
            MountTable.Resolution resolution = mMountTable.resolve(srcPath);

            String ufsSrcPath = resolution.getUri().toString();
            UnderFileSystem ufs = resolution.getUfs();
            String ufsDstUri = mMountTable.resolve(dstPath).getUri().toString();
            // Create ancestor directories from top to the bottom. We cannot use recursive create
            // parents here because the permission for the ancestors can be different.
            List<Inode<?>> dstInodeList = dstInodePath.getInodeList();
            Stack<Pair<String, MkdirsOptions>> ufsDirsToMakeWithOptions = new Stack<>();
            AlluxioURI curUfsDirPath = new AlluxioURI(ufsDstUri).getParent();
            // The dst inode does not exist yet, so the last inode in the list is the existing parent.
            for (int i = dstInodeList.size() - 1; i >= 0; i--) {
                if (ufs.isDirectory(curUfsDirPath.toString())) {
                    break;
                }
                Inode<?> curInode = dstInodeList.get(i);
                MkdirsOptions mkdirsOptions = MkdirsOptions.defaults().setCreateParent(false)
                        .setOwner(curInode.getOwner()).setGroup(curInode.getGroup())
                        .setMode(new Mode(curInode.getMode()));
                ufsDirsToMakeWithOptions.push(new Pair<>(curUfsDirPath.toString(), mkdirsOptions));
                curUfsDirPath = curUfsDirPath.getParent();
            }
            while (!ufsDirsToMakeWithOptions.empty()) {
                Pair<String, MkdirsOptions> ufsDirAndPerm = ufsDirsToMakeWithOptions.pop();
                if (!ufs.mkdirs(ufsDirAndPerm.getFirst(), ufsDirAndPerm.getSecond())) {
                    throw new IOException(
                            ExceptionMessage.FAILED_UFS_CREATE.getMessage(ufsDirAndPerm.getFirst()));
                }
            }
            boolean success;
            if (srcInode.isFile()) {
                success = ufs.renameFile(ufsSrcPath, ufsDstUri);
            } else {
                success = ufs.renameDirectory(ufsSrcPath, ufsDstUri);
            }
            if (!success) {
                throw new IOException(ExceptionMessage.FAILED_UFS_RENAME.getMessage(ufsSrcPath, ufsDstUri));
            }
        }
    } catch (Exception e) {
        // On failure, revert changes and throw exception.
        if (!dstParentInode.removeChild(dstName)) {
            LOG.error("Failed to revert rename changes. Alluxio metadata may be inconsistent.");
        }
        srcInode.setName(srcName);
        srcInode.setParentId(srcParentInode.getId());
        throw e;
    }

    // TODO(jiri): A crash between now and the time the rename operation is journaled will result in
    // an inconsistency between Alluxio and UFS.

    // 4. Remove the source inode (reverting the name) from the source parent. The name must be
    // reverted or removeChild will not be able to find the appropriate child entry since it is
    // keyed on the original name.
    srcInode.setName(srcName);
    if (!srcParentInode.removeChild(srcInode)) {
        // This should never happen.
        LOG.error("Failed to rename {} to {} in Alluxio. Alluxio and under storage may be " + "inconsistent.",
                srcPath, dstPath);
        srcInode.setName(dstName);
        if (!dstParentInode.removeChild(dstName)) {
            LOG.error("Failed to revert changes when renaming {} to {}. Alluxio metadata may be "
                    + "inconsistent.", srcPath, dstPath);
        }
        srcInode.setName(srcName);
        srcInode.setParentId(srcParentInode.getId());
        throw new IOException("Failed to remove source path " + srcPath + " from parent");
    }
    srcInode.setName(dstName);

    // 5. Set the last modification times for both source and destination parent inodes.
    // Note this step relies on setLastModificationTimeMs being thread safe to guarantee the
    // correct behavior when multiple files are being renamed within a directory.
    dstParentInode.setLastModificationTimeMs(options.getOperationTimeMs());
    srcParentInode.setLastModificationTimeMs(options.getOperationTimeMs());
    Metrics.PATHS_RENAMED.inc();
}

From source file:com.zeroio.webdav.WebdavServlet.java

/**
 * PROPFIND Method./*from   www .  jav  a 2 s  . c  o  m*/
 *
 * @param context Description of the Parameter
 * @throws ServletException Description of the Exception
 * @throws IOException      Description of the Exception
 */
protected void doPropfind(ActionContext context) throws ServletException, IOException {

    String path = getRelativePath(context.getRequest());

    //fix for windows clients
    if (path.equals("/files")) {
        path = "";
    }

    if (path.endsWith("/")) {
        path = path.substring(0, path.length() - 1);
    }

    if ((path.toUpperCase().startsWith("/WEB-INF")) || (path.toUpperCase().startsWith("/META-INF"))) {
        context.getResponse().sendError(WebdavStatus.SC_FORBIDDEN);
        return;
    }

    if (path.indexOf("/.") > -1 || path.indexOf(".DS_Store") > -1) {
        //Fix for MACOSX finder. Do not allow requests for files starting with a period
        return;
    }
    //System.out.println("METHOD PROPFIND....PATH: " + path);
    // Properties which are to be displayed.
    Vector properties = null;
    // Propfind depth by default 1 for performance reasons
    int depth = 1;
    // Propfind type
    int type = FIND_ALL_PROP;

    String depthStr = context.getRequest().getHeader("Depth");
    if (depthStr == null) {
        depth = INFINITY;
    } else {
        if (depthStr.equals("0")) {
            depth = 0;
        } else if (depthStr.equals("1")) {
            depth = 1;
        } else if (depthStr.equals("infinity")) {
            depth = INFINITY;
        }
    }

    /*
     *  Read the request xml and determine all the properties
     */
    Node propNode = null;
    DocumentBuilder documentBuilder = getDocumentBuilder();
    try {
        Document document = documentBuilder.parse(new InputSource(context.getRequest().getInputStream()));
        // Get the root element of the document
        Element rootElement = document.getDocumentElement();
        NodeList childList = rootElement.getChildNodes();
        for (int i = 0; i < childList.getLength(); i++) {
            Node currentNode = childList.item(i);
            switch (currentNode.getNodeType()) {
            case Node.TEXT_NODE:
                break;
            case Node.ELEMENT_NODE:
                if (currentNode.getNodeName().endsWith("prop")) {
                    type = FIND_BY_PROPERTY;
                    propNode = currentNode;
                }
                if (currentNode.getNodeName().endsWith("propname")) {
                    type = FIND_PROPERTY_NAMES;
                }
                if (currentNode.getNodeName().endsWith("allprop")) {
                    type = FIND_ALL_PROP;
                }
                break;
            }
        }
    } catch (Exception e) {
        // Most likely there was no content : we use the defaults.
        // TODO : Enhance that !
        //e.printStackTrace(System.out);
    }

    if (type == FIND_BY_PROPERTY) {
        properties = new Vector();
        if (!properties.contains("creationdate")) {
            //If the request did not contain creationdate property then add this to requested properties
            //to make the information available for clients
            properties.addElement("creationdate");
        }
        NodeList childList = propNode.getChildNodes();
        for (int i = 0; i < childList.getLength(); i++) {
            Node currentNode = childList.item(i);
            switch (currentNode.getNodeType()) {
            case Node.TEXT_NODE:
                break;
            case Node.ELEMENT_NODE:
                String nodeName = currentNode.getNodeName();
                String propertyName = null;
                if (nodeName.indexOf(':') != -1) {
                    propertyName = nodeName.substring(nodeName.indexOf(':') + 1);
                } else {
                    propertyName = nodeName;
                }
                // href is a live property which is handled differently
                properties.addElement(propertyName);
                break;
            }
        }
    }

    // Properties have been determined
    // Retrieve the resources

    Connection db = null;
    boolean exists = true;
    boolean status = true;
    Object current = null;
    Object child = null;
    ModuleContext resources = null;
    SystemStatus thisSystem = null;
    StringBuffer xmlsb = new StringBuffer();
    try {
        db = this.getConnection(context);
        resources = getCFSResources(db, context);
        if (resources == null) {
            context.getResponse().sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
            return;
        }
        thisSystem = this.getSystemStatus(context);
        current = resources.lookup(thisSystem, db, path);
        if (current instanceof ModuleContext) {
            //System.out.println( ((ModuleContext) current).toString());
        }
    } catch (NamingException e) {
        //e.printStackTrace(System.out);
        exists = false;
        int slash = path.lastIndexOf('/');
        if (slash != -1) {
            String parentPath = path.substring(0, slash);
            Vector currentLockNullResources = (Vector) lockNullResources.get(parentPath);
            if (currentLockNullResources != null) {
                Enumeration lockNullResourcesList = currentLockNullResources.elements();
                while (lockNullResourcesList.hasMoreElements()) {
                    String lockNullPath = (String) lockNullResourcesList.nextElement();
                    if (lockNullPath.equals(path)) {
                        context.getResponse().setStatus(WebdavStatus.SC_MULTI_STATUS);
                        context.getResponse().setContentType("text/xml; charset=UTF-8");
                        // Create multistatus object
                        XMLWriter generatedXML = new XMLWriter(context.getResponse().getWriter());
                        generatedXML.writeXMLHeader();
                        generatedXML.writeElement(null, "multistatus" + generateNamespaceDeclarations(),
                                XMLWriter.OPENING);
                        parseLockNullProperties(context.getRequest(), generatedXML, lockNullPath, type,
                                properties);
                        generatedXML.writeElement(null, "multistatus", XMLWriter.CLOSING);
                        generatedXML.sendData();
                        //e.printStackTrace(System.out);
                        return;
                    }
                }
            }
        }
    } catch (SQLException e) {
        e.printStackTrace(System.out);
        context.getResponse().sendError(CFS_SQLERROR, e.getMessage());
        status = false;
    } finally {
        this.freeConnection(db, context);
    }

    if (!status) {
        return;
    }

    if (!exists) {
        context.getResponse().sendError(HttpServletResponse.SC_NOT_FOUND, path);
        return;
    }

    context.getResponse().setStatus(WebdavStatus.SC_MULTI_STATUS);
    context.getResponse().setContentType("text/xml; charset=UTF-8");
    // Create multistatus object
    ////System.out.println("Creating Multistatus Object");

    XMLWriter generatedXML = new XMLWriter(context.getResponse().getWriter());
    generatedXML.writeXMLHeader();
    generatedXML.writeElement(null, "multistatus" + generateNamespaceDeclarations(), XMLWriter.OPENING);

    //System.out.println("Depth: " + depth);
    if (depth == 0) {
        parseProperties(context, resources, generatedXML, path, type, properties);
    } else {
        // The stack always contains the object of the current level
        Stack stack = new Stack();
        stack.push(path);
        // Stack of the objects one level below
        Stack stackBelow = new Stack();

        while ((!stack.isEmpty()) && (depth >= 0)) {
            String currentPath = (String) stack.pop();
            try {
                if (!currentPath.equals(path)) {
                    //object at url currentPath not yet looked up. so perform lookup at url currentPath
                    child = resources.lookup(currentPath);
                    parseProperties(context, resources, generatedXML, currentPath, type, properties);
                }
            } catch (NamingException e) {
                e.printStackTrace(System.out);
                continue;
            }

            if (!status) {
                return;
            }

            if ((current instanceof ModuleContext) && depth > 0) {
                // Get a list of all the resources at the current path and store them
                // in the stack
                try {
                    NamingEnumeration enum1 = ((ModuleContext) current).list("");
                    int count = 0;
                    while (enum1.hasMoreElements()) {
                        NameClassPair ncPair = (NameClassPair) enum1.nextElement();
                        String newPath = currentPath;
                        if (!(newPath.endsWith("/"))) {
                            newPath += "/";
                        }
                        newPath += ncPair.getName();
                        //System.out.println("STACKING CHILD: " + newPath);
                        stackBelow.push(newPath);
                        count++;
                    }
                    if (currentPath.equals(path) && count == 0) {
                        // This directory does not have any files or folders.
                        //System.out.println("DIRECTORY HAS NO FILES OR FOLDERS...");
                        parseProperties(context, resources, generatedXML, properties);
                    }
                } catch (NamingException e) {
                    //e.printStackTrace(System.out);
                    context.getResponse().sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, path);
                    return;
                }

                // Displaying the lock-null resources present in that collection
                String lockPath = currentPath;
                if (lockPath.endsWith("/")) {
                    lockPath = lockPath.substring(0, lockPath.length() - 1);
                }
                Vector currentLockNullResources = (Vector) lockNullResources.get(lockPath);
                if (currentLockNullResources != null) {
                    Enumeration lockNullResourcesList = currentLockNullResources.elements();
                    while (lockNullResourcesList.hasMoreElements()) {
                        String lockNullPath = (String) lockNullResourcesList.nextElement();
                        System.out.println("Lock null path: " + lockNullPath);
                        parseLockNullProperties(context.getRequest(), generatedXML, lockNullPath, type,
                                properties);
                    }
                }
            }
            if (stack.isEmpty()) {
                depth--;
                stack = stackBelow;
                stackBelow = new Stack();
            }
            xmlsb.append(generatedXML.toString());
            //System.out.println("xml : " + generatedXML.toString());
            generatedXML.sendData();
        }
    }

    Iterator locks = lockNullResources.keySet().iterator();
    while (locks.hasNext()) {
        String lockpath = (String) locks.next();
        //System.out.println("LOCK PATH: " + lockpath);
    }

    generatedXML.writeElement(null, "multistatus", XMLWriter.CLOSING);
    xmlsb.append(generatedXML.toString());
    generatedXML.sendData();
    //System.out.println("xml: " + xmlsb.toString());
}

From source file:com.espertech.esper.event.util.RendererMeta.java

/**
 * Ctor.//from   ww w. j a va2  s. c o  m
 * @param eventType to render
 * @param stack the stack of properties to avoid looping
 * @param options rendering options
 */
public RendererMeta(EventType eventType, Stack<EventTypePropertyPair> stack, RendererMetaOptions options) {
    ArrayList<GetterPair> gettersSimple = new ArrayList<GetterPair>();
    ArrayList<GetterPair> gettersIndexed = new ArrayList<GetterPair>();
    ArrayList<GetterPair> gettersMapped = new ArrayList<GetterPair>();
    ArrayList<NestedGetterPair> gettersNested = new ArrayList<NestedGetterPair>();

    EventPropertyDescriptor[] descriptors = eventType.getPropertyDescriptors();
    for (EventPropertyDescriptor desc : descriptors) {
        String propertyName = desc.getPropertyName();

        if ((!desc.isIndexed()) && (!desc.isMapped()) && (!desc.isFragment())) {
            EventPropertyGetter getter = eventType.getGetter(propertyName);
            if (getter == null) {
                log.warn("No getter returned for event type '" + eventType.getName() + "' and property '"
                        + propertyName + "'");
                continue;
            }
            gettersSimple.add(new GetterPair(getter, propertyName,
                    OutputValueRendererFactory.getOutputValueRenderer(desc.getPropertyType(), options)));
        }

        if (desc.isIndexed() && !desc.isRequiresIndex() && (!desc.isFragment())) {
            EventPropertyGetter getter = eventType.getGetter(propertyName);
            if (getter == null) {
                log.warn("No getter returned for event type '" + eventType.getName() + "' and property '"
                        + propertyName + "'");
                continue;
            }
            gettersIndexed.add(new GetterPair(getter, propertyName,
                    OutputValueRendererFactory.getOutputValueRenderer(desc.getPropertyType(), options)));
        }

        if (desc.isMapped() && !desc.isRequiresMapkey() && (!desc.isFragment())) {
            EventPropertyGetter getter = eventType.getGetter(propertyName);
            if (getter == null) {
                log.warn("No getter returned for event type '" + eventType.getName() + "' and property '"
                        + propertyName + "'");
                continue;
            }
            gettersMapped.add(new GetterPair(getter, propertyName,
                    OutputValueRendererFactory.getOutputValueRenderer(desc.getPropertyType(), options)));
        }

        if (desc.isFragment()) {
            EventPropertyGetter getter = eventType.getGetter(propertyName);
            FragmentEventType fragmentType = eventType.getFragmentType(propertyName);
            if (getter == null) {
                log.warn("No getter returned for event type '" + eventType.getName() + "' and property '"
                        + propertyName + "'");
                continue;
            }
            if (fragmentType == null) {
                log.warn("No fragment type returned for event type '" + eventType.getName() + "' and property '"
                        + propertyName + "'");
                continue;
            }

            EventTypePropertyPair pair = new EventTypePropertyPair(fragmentType.getFragmentType(),
                    propertyName);
            if ((options.isPreventLooping() && stack.contains(pair))) {
                continue; // prevent looping behavior on self-references
            }

            stack.push(pair);
            RendererMeta fragmentMetaData = new RendererMeta(fragmentType.getFragmentType(), stack, options);
            stack.pop();

            gettersNested.add(
                    new NestedGetterPair(getter, propertyName, fragmentMetaData, fragmentType.isIndexed()));
        }
    }

    simpleProperties = gettersSimple.toArray(new GetterPair[gettersSimple.size()]);
    indexProperties = gettersIndexed.toArray(new GetterPair[gettersIndexed.size()]);
    mappedProperties = gettersMapped.toArray(new GetterPair[gettersMapped.size()]);
    nestedProperties = gettersNested.toArray(new NestedGetterPair[gettersNested.size()]);
}

From source file:org.alfresco.repo.transfer.AlienProcessorImpl.java

/**
 * Top down un-invasion/*w ww .  ja  va2s  .  c om*/
 * <p>
 * Steps down the tree retreating from all the invaded nodes.
 * <p>
 * The retreat will stop is there is a "sub-invasion".
 * <p>   
 * @param nodeRef the top of the tree
 * @param fromRepositoryId the repository that is retreating.
 */
private void retreatDownwards(NodeRef nodeRef, String fromRepositoryId) {
    Stack<NodeRef> nodesToRetreat = new Stack<NodeRef>();
    nodesToRetreat.add(nodeRef);

    /**
     * Now go and do the retreat.        
     */
    while (!nodesToRetreat.isEmpty()) {
        if (log.isDebugEnabled()) {
            log.debug("retreat :" + nodeRef + ", repoId:" + fromRepositoryId);
        }

        /**
         *  for the current node and all alien children
         *  
         *  if they are "from" the retreating repository then 
         */
        NodeRef currentNodeRef = nodesToRetreat.pop();

        log.debug("retreatNode:" + currentNodeRef);

        if (getNodeService().hasAspect(currentNodeRef, TransferModel.ASPECT_ALIEN)) {
            // Yes this is an alien node
            List<String> invadedBy = (List<String>) getNodeService().getProperty(currentNodeRef,
                    TransferModel.PROP_INVADED_BY);

            String parentRepoId;
            if (nodeService.hasAspect(currentNodeRef, TransferModel.ASPECT_TRANSFERRED)) {
                log.debug("node is transferred");
                parentRepoId = (String) nodeService.getProperty(currentNodeRef,
                        TransferModel.PROP_FROM_REPOSITORY_ID);
            } else {
                log.debug("node is local");
                parentRepoId = descriptorService.getCurrentRepositoryDescriptor().getId();
            }

            if (fromRepositoryId.equalsIgnoreCase(parentRepoId)) {
                // This node is "owned" by the retreating repo
                // Yes we are invaded by fromRepositoryId
                if (invadedBy.size() == 1) {
                    // we are invaded by a single repository which must be fromRepositoryId
                    log.debug("no longe alien:" + currentNodeRef);
                    getNodeService().removeAspect(currentNodeRef, TransferModel.ASPECT_ALIEN);
                } else {
                    invadedBy.remove(parentRepoId);
                    getNodeService().setProperty(currentNodeRef, TransferModel.PROP_INVADED_BY,
                            (Serializable) invadedBy);
                }

                //List<ChildAssociationRef> refs = getNodeService().getChildAssocs(currentNodeRef);
                List<ChildAssociationRef> refs = nodeService.getChildAssocsByPropertyValue(currentNodeRef,
                        TransferModel.PROP_INVADED_BY, fromRepositoryId);
                for (ChildAssociationRef ref : refs) {
                    if (log.isDebugEnabled()) {
                        log.debug("will need to check child:" + ref);
                    }
                    nodesToRetreat.push(ref.getChildRef());
                }
            }
        }
    }
}

From source file:org.apache.myfaces.custom.fisheye.HtmlFishEyeNavigationMenuRenderer.java

protected void renderMenuItem(FacesContext context, ResponseWriter writer, UIComponent menu, UIComponent item,
        Stack childsMenuStack) throws IOException {
    // find the enclosing form
    FormInfo formInfo = findNestingForm(item, context);
    String clientId = item.getClientId(context);
    if (formInfo == null) {
        throw new IllegalArgumentException("Component " + clientId + " must be embedded in an form");
    }//from   w ww  .j a  va 2 s . com
    UIComponent nestingForm = formInfo.getForm();
    String formName = formInfo.getFormName();

    StringBuffer onClick = new StringBuffer();

    String jsForm = "document.forms['" + formName + "']";
    if (RendererUtils.isAdfOrTrinidadForm(formInfo.getForm())) {
        onClick.append("submitForm('");
        onClick.append(formInfo.getForm().getClientId(context));
        onClick.append("',1,{source:'");
        onClick.append(clientId);
        onClick.append("'});return false;");
    } else {
        // call the clear_<formName> method
        onClick.append(HtmlRendererUtils.getClearHiddenCommandFormParamsFunctionName(formName)).append("();");

        //if (MyfacesConfig.getCurrentInstance(context.getExternalContext()).isAutoScroll()) {
        //HtmlRendererUtils.appendAutoScrollAssignment(onClick, formName);
        //}

        // add id parameter for decode
        String hiddenFieldName = HtmlRendererUtils.getHiddenCommandLinkFieldName(formInfo);
        onClick.append(jsForm);
        onClick.append(".elements['").append(hiddenFieldName).append("']");
        onClick.append(".value='").append(clientId).append("';");
        addHiddenCommandParameter(context, nestingForm, hiddenFieldName);
    }
    String target;
    String caption;
    String iconSrc;
    if (item instanceof UINavigationMenuItem) {
        target = ((UINavigationMenuItem) item).getTarget();
        caption = ((UINavigationMenuItem) item).getItemLabel();
        iconSrc = ((UINavigationMenuItem) item).getIcon();
    } else if (item instanceof FishEyeCommandLink) {
        target = ((FishEyeCommandLink) item).getTarget();
        caption = ((FishEyeCommandLink) item).getCaption();
        iconSrc = ((FishEyeCommandLink) item).getIconSrc();
    } else {
        throw new IllegalArgumentException("expected UINavigationMenuItem or FisheyCommandLink");
    }

    // add the target window
    if (target != null && target.trim().length() > 0) {
        onClick.append(jsForm);
        onClick.append(".target='");
        onClick.append(target);
        onClick.append("';");
    }

    // onSubmit
    onClick.append("if(").append(jsForm).append(".onsubmit){var result=").append(jsForm).append(
            ".onsubmit();  if( (typeof result == 'undefined') || result ) {" + jsForm + ".submit();}}else{");

    // submit
    onClick.append(jsForm);
    onClick.append(".submit();}return false;"); // return false, so that
                                                // browser does not handle
                                                // the click

    Map paramMap = new HashMap();
    paramMap.put(CAPTION_ATTR, caption);
    paramMap.put(ICON_SRC_ATTR, iconSrc);
    paramMap.put(ON_CLICK_ATTR, new StringBuffer("function () {").append(onClick).append("}"));
    // push the onclick as lambda and use a stringbuffer so that we do not
    // get enclosing quotes
    String menuItemId = DojoUtils.renderWidgetInitializationCode(writer, item, DOJO_ITEM_TYPE, paramMap,
            item.getClientId(context), false);
    childsMenuStack.push(menuItemId);
    // we have to revert the elements,
    // hence a stack
}

From source file:org.apache.catalina.core.StandardContext.java

/**
 * Get naming context full name./*  w ww . j a v a 2 s.c  om*/
 */
private String getNamingContextName() {
    if (namingContextName == null) {
        Container parent = getParent();
        if (parent == null) {
            namingContextName = getName();
        } else {
            Stack stk = new Stack();
            StringBuffer buff = new StringBuffer();
            while (parent != null) {
                stk.push(parent.getName());
                parent = parent.getParent();
            }
            while (!stk.empty()) {
                buff.append("/" + stk.pop());
            }
            buff.append(getName());
            namingContextName = buff.toString();
        }
    }
    return namingContextName;
}

From source file:de.uni_koblenz.jgralab.utilities.rsa.Rsa2Tg.java

private void checkAcyclicityOfMayBeNestedIn(Queue<GraphElementClass> topLevelNestingElements) {
    LocalIntegerVertexMarker number = new LocalIntegerVertexMarker(sg);
    LocalIntegerVertexMarker rnumber = new LocalIntegerVertexMarker(sg);
    int num = 0;//from w  w w  .  j  av a  2  s  . c  om
    int rnum = 0;

    // depth first search
    Stack<GraphElementClass> stack = new Stack<GraphElementClass>();
    for (GraphElementClass root : topLevelNestingElements) {
        stack.push(root);
        while (!stack.isEmpty()) {
            GraphElementClass current = stack.pop();
            number.mark(current, ++num);
            for (MayBeNestedIn_nestingElement i : current.getIncidences(MayBeNestedIn_nestingElement.class)) {
                GraphElementClass child = (GraphElementClass) i.getThat();
                if (!number.isMarked(child)) {
                    stack.push(child);
                } else {
                    if (!rnumber.isMarked(child)) {
                        // there exists a backward arc
                        throw new ProcessingException(getParser(), getFileName(),
                                "The nesting hierarchy is not acyclic.");
                    }
                }
            }
            rnumber.mark(current, ++rnum);
        }
    }
}

From source file:com.amazonaws.services.kinesis.scaling.StreamScaler.java

private ScalingOperationReport scaleStream(String streamName, int originalShardCount, int targetShards,
        int operationsMade, int shardsCompleted, long startTime, Stack<ShardHashInfo> shardStack,
        Integer minCount, Integer maxCount) throws Exception {
    final double targetPct = 1d / targetShards;
    boolean checkMinMax = minCount != null || maxCount != null;
    String lastShardLower = null;
    String lastShardHigher = null;
    ScaleDirection scaleDirection = originalShardCount >= targetShards ? ScaleDirection.DOWN
            : ScaleDirection.UP;// w  w w.  ja v a 2 s  .com

    // seed the current shard count from the working stack
    int currentCount = shardStack.size();

    // we'll run iteratively until the shard stack is emptied or we reach
    // one of the caps
    ScalingCompletionStatus endStatus = ScalingCompletionStatus.Ok;
    do {
        if (checkMinMax) {
            // stop scaling if we've reached the min or max count
            boolean stopOnCap = false;
            String message = null;
            if (minCount != null && currentCount == minCount && targetShards <= minCount) {
                stopOnCap = true;
                if (operationsMade == 0) {
                    endStatus = ScalingCompletionStatus.AlreadyAtMinimum;
                } else {
                    endStatus = ScalingCompletionStatus.Ok;
                }
                message = String.format("%s: Minimum Shard Count of %s Reached", streamName, minCount);
            }
            if (maxCount != null && currentCount == maxCount && targetShards >= maxCount) {
                if (operationsMade == 0) {
                    endStatus = ScalingCompletionStatus.AlreadyAtMaximum;
                } else {
                    endStatus = ScalingCompletionStatus.Ok;
                }
                message = String.format("%s: Maximum Shard Count of %s Reached", streamName, maxCount);
                stopOnCap = true;
            }
            if (stopOnCap) {
                LOG.info(message);
                return reportFor(endStatus, streamName, operationsMade, scaleDirection);
            }
        }

        // report progress every shard completed
        if (shardsCompleted > 0) {
            reportProgress(streamName, shardsCompleted, currentCount, shardStack.size(), startTime);
        }

        // once the stack is emptied, return a report of the hash space
        // allocation
        if (shardStack.empty()) {
            return reportFor(endStatus, streamName, operationsMade, scaleDirection);
        }

        ShardHashInfo lowerShard = shardStack.pop();
        if (lowerShard != null) {
            lastShardLower = lowerShard.getShardId();
        } else {
            throw new Exception(String.format("%s: Null ShardHashInfo retrieved after processing %s",
                    streamName, lastShardLower));
        }

        // first check is if the bottom shard is smaller or larger than our
        // target width
        if (StreamScalingUtils.softCompare(lowerShard.getPctWidth(), targetPct) < 0) {
            if (shardStack.empty()) {
                // our current shard is smaller than the target size, but
                // there's nothing else to do
                return reportFor(endStatus, streamName, operationsMade, scaleDirection);
            } else {
                // get the next higher shard
                ShardHashInfo higherShard = shardStack.pop();

                if (higherShard != null) {
                    lastShardHigher = higherShard.getShardId();
                }

                if (StreamScalingUtils.softCompare(lowerShard.getPctWidth() + higherShard.getPctWidth(),
                        targetPct) > 0) {
                    // The two lowest shards together are larger than the
                    // target size, so split the upper at the target offset
                    // and
                    // merge the lower of the two new shards to the lowest
                    // shard
                    AdjacentShards splitUpper = higherShard.doSplit(kinesisClient,
                            targetPct - lowerShard.getPctWidth(),
                            shardStack.isEmpty() ? higherShard.getShardId()
                                    : shardStack.lastElement().getShardId());
                    operationsMade++;

                    // place the upper of the two new shards onto the stack
                    shardStack.push(splitUpper.getHigherShard());

                    // merge lower of the new shards with the lowest shard
                    LOG.info(String.format("Merging Shard %s with %s", lowerShard.getShardId(),
                            splitUpper.getLowerShard().getShardId()));
                    ShardHashInfo lowerMerged = new AdjacentShards(streamName, lowerShard,
                            splitUpper.getLowerShard()).doMerge(kinesisClient,
                                    shardStack.isEmpty() ? splitUpper.getHigherShard().getShardId()
                                            : shardStack.lastElement().getShardId());
                    LOG.info(String.format("Created Shard %s (%s)", lowerMerged.getShardId(),
                            pctFormat.format(lowerMerged.getPctWidth())));
                    shardsCompleted++;

                    // count of shards is unchanged in this case as we've
                    // just rebalanced, so current count is not updated
                } else {
                    // The lower and upper shards together are smaller than
                    // the target size, so merge the two shards together
                    ShardHashInfo lowerMerged = new AdjacentShards(streamName, lowerShard, higherShard)
                            .doMerge(kinesisClient, shardStack.isEmpty() ? higherShard.getShardId()
                                    : shardStack.lastElement().getShardId());
                    shardsCompleted++;
                    currentCount--;

                    // put the new shard back on the stack - it may still be
                    // too small relative to the target
                    shardStack.push(lowerMerged);
                }
            }
        } else if (StreamScalingUtils.softCompare(lowerShard.getPctWidth(), targetPct) == 0) {
            // at the correct size - move on
        } else {
            // lowest shard is larger than the target size so split at the
            // target offset
            AdjacentShards splitLower = lowerShard.doSplit(kinesisClient, targetPct,
                    shardStack.isEmpty() ? lowerShard.getShardId() : shardStack.lastElement().getShardId());
            operationsMade++;

            LOG.info(
                    String.format("Split Shard %s at %s Creating Final Shard %s and Intermediate Shard %s (%s)",
                            lowerShard.getShardId(), pctFormat.format(targetPct),
                            splitLower.getLowerShard().getShardId(), splitLower.getHigherShard(),
                            pctFormat.format(splitLower.getHigherShard().getPctWidth())));

            // push the higher of the two splits back onto the stack
            shardStack.push(splitLower.getHigherShard());
            shardsCompleted++;
            currentCount++;
        }
    } while (shardStack.size() > 0 || !shardStack.empty());

    return reportFor(endStatus, streamName, operationsMade, scaleDirection);
}