Example usage for java.util Stack pop

List of usage examples for java.util Stack pop

Introduction

In this page you can find the example usage for java.util Stack pop.

Prototype

public synchronized E pop() 

Source Link

Document

Removes the object at the top of this stack and returns that object as the value of this function.

Usage

From source file:org.apache.tajo.engine.planner.rewrite.ProjectionPushDownRule.java

@Override
public LogicalNode visitInsert(Context context, LogicalPlan plan, LogicalPlan.QueryBlock block, InsertNode node,
        Stack<LogicalNode> stack) throws PlanningException {
    stack.push(node);/*from   www .  j  av  a  2  s .  c  o m*/
    visit(context, plan, block, node.getChild(), stack);
    stack.pop();
    return node;
}

From source file:org.apache.fop.render.rtf.rtflib.rtfdoc.RtfTextrun.java

/**
 * Inserts paragraph break before all close group marks.
 *
 * @throws IOException  for I/O problems
 * @return The paragraph break element//from  w  ww  .  j  ava 2s  . co  m
 */
public RtfParagraphBreak addParagraphBreak() throws IOException {
    // get copy of children list
    List children = getChildren();
    Stack tmp = new Stack();
    RtfParagraphBreak par = null;

    // delete all previous CloseGroupMark
    int deletedCloseGroupCount = 0;

    ListIterator lit = children.listIterator(children.size());
    while (lit.hasPrevious() && (lit.previous() instanceof RtfCloseGroupMark)) {
        tmp.push(Integer.valueOf(((RtfCloseGroupMark) lit.next()).getBreakType()));
        lit.remove();
        deletedCloseGroupCount++;
    }

    if (children.size() != 0) {
        // add paragraph break and restore all deleted close group marks
        setChildren(children);
        par = new RtfParagraphBreak(this, writer);
        for (int i = 0; i < deletedCloseGroupCount; i++) {
            addCloseGroupMark(((Integer) tmp.pop()).intValue());
        }
    }
    return par;
}

From source file:com.aipo.container.gadgets.parse.AipoNekoSimplifiedHtmlParser.java

private void fixNekoWeirdness(Document document) {
    // Neko as of versions > 1.9.13 stuffs all leading <script> nodes into
    // <head>.
    // This breaks all sorts of assumptions in gadgets, notably the existence of
    // document.body.
    // We can't tell Neko to avoid putting <script> into <head> however, since
    // gadgets/*w w w  . j  av a  2s . c o m*/
    // like <Content><script>...</script><style>...</style> will break due to
    // both
    // <script> and <style> ending up in <body> -- at which point Neko
    // unceremoniously
    // drops the <style> (and <link>) elements.
    // Therefore we just search for <script> elements in <head> and stuff them
    // all into
    // the top of <body>.
    // This method assumes a normalized document as input.
    Node html = DomUtil.getFirstNamedChildNode(document, "html");
    if (html.getNextSibling() != null && html.getNextSibling().getNodeName().equalsIgnoreCase("html")) {
        // if a doctype is specified, then the desired root <html> node is wrapped
        // by an <HTML> node
        // Pull out the <html> root.
        html = html.getNextSibling();
    }
    Node head = DomUtil.getFirstNamedChildNode(html, "head");
    if (head == null) {
        head = document.createElement("head");
        html.insertBefore(head, html.getFirstChild());
    }
    NodeList headNodes = head.getChildNodes();
    Stack<Node> headScripts = new Stack<Node>();
    for (int i = 0; i < headNodes.getLength(); ++i) {
        Node headChild = headNodes.item(i);
        if (headChild.getNodeName().equalsIgnoreCase("script")) {
            headScripts.add(headChild);
        }
    }

    // Remove from head, add to top of <body> in <head> order.
    Node body = DomUtil.getFirstNamedChildNode(html, "body");
    if (body == null) {
        body = document.createElement("body");
        html.insertBefore(body, head.getNextSibling());
    }
    Node bodyFirst = body.getFirstChild();
    while (!headScripts.isEmpty()) {
        Node headScript = headScripts.pop();
        head.removeChild(headScript);
        body.insertBefore(headScript, bodyFirst);
        bodyFirst = headScript;
    }
}

From source file:org.apache.jasper.JspC.java

/**
 * Locate all jsp files in the webapp. Used if no explicit
 * jsps are specified./* w  w w .j a v a 2  s.c o  m*/
 */
public void scanFiles(File base) throws JasperException {
    Stack dirs = new Stack();
    dirs.push(base);
    if (extensions == null) {
        extensions = new Vector();
        extensions.addElement("jsp");
        extensions.addElement("jspx");
    }
    while (!dirs.isEmpty()) {
        String s = dirs.pop().toString();
        //System.out.println("--" + s);
        File f = new File(s);
        if (f.exists() && f.isDirectory()) {
            String[] files = f.list();
            String ext;
            for (int i = 0; i < files.length; i++) {
                File f2 = new File(s, files[i]);
                //System.out.println(":" + f2.getPath());
                if (f2.isDirectory()) {
                    dirs.push(f2.getPath());
                    //System.out.println("++" + f2.getPath());
                } else {
                    String path = f2.getPath();
                    String uri = path.substring(uriRoot.length());
                    ext = files[i].substring(files[i].lastIndexOf('.') + 1);
                    if (extensions.contains(ext) || jspConfig.isJspPage(uri)) {
                        //System.out.println(s + "?" + files[i]);
                        pages.addElement(path);
                    } else {
                        //System.out.println("not done:" + ext);
                    }
                }
            }
        }
    }
}

From source file:org.apache.maven.wagon.providers.http.AbstractHttpClientWagon.java

/**
 * Recursively create a path, working down from the leaf to the root.
 * <p>/*from w  w w  .  j a  v  a  2  s  .  co  m*/
 * Borrowed from Apache Sling
 * 
 * @param path a directory path to create
 * @throws HttpException
 * @throws TransferFailedException
 * @throws AuthorizationException
 */
protected void mkdirs(String path) throws HttpException, TransferFailedException, AuthorizationException {
    // Call mkdir on all parent paths, starting at the topmost one
    final Stack<String> parents = new Stack<String>();
    while (path.length() > 0 && !resourceExists(path)) {
        parents.push(path);
        path = getParentPath(path);
    }

    while (!parents.isEmpty()) {
        mkdir(parents.pop());
    }
}

From source file:org.hdiv.urlProcessor.AbstractUrlProcessor.java

/**
 * Removes from <code>url<code> references to relative paths.
 * //from w ww.j  a  v a  2  s  .  c om
 * @param url
 *            url
 * @param originalRequestUri
 *            originalRequestUri
 * @return returns <code>url</code> without relative paths.
 */
protected String removeRelativePaths(String url, String originalRequestUri) {

    String urlWithoutRelativePath = url;

    if (url.startsWith("..")) {
        Stack stack = new Stack();
        String localUri = originalRequestUri.substring(originalRequestUri.indexOf("/"),
                originalRequestUri.lastIndexOf("/"));
        StringTokenizer localUriParts = new StringTokenizer(localUri.replace('\\', '/'), "/");
        while (localUriParts.hasMoreTokens()) {
            String part = localUriParts.nextToken();
            stack.push(part);
        }

        StringTokenizer pathParts = new StringTokenizer(url.replace('\\', '/'), "/");
        while (pathParts.hasMoreTokens()) {
            String part = pathParts.nextToken();

            if (!part.equals(".")) {
                if (part.equals("..")) {
                    stack.pop();
                } else {
                    stack.push(part);
                }
            }
        }

        StringBuffer flatPathBuffer = new StringBuffer();
        for (int i = 0; i < stack.size(); i++) {
            flatPathBuffer.append("/").append(stack.elementAt(i));
        }

        urlWithoutRelativePath = flatPathBuffer.toString();
    }

    return urlWithoutRelativePath;
}

From source file:cn.ctyun.amazonaws.services.s3.transfer.TransferManager.java

/**
 * Downloads all objects in the virtual directory designated by the
 * keyPrefix given to the destination directory given. All virtual
 * subdirectories will be downloaded recursively.
 *
 * @param bucketName//from   w  ww .  j a va  2 s .co m
 *            The bucket containing the virtual directory
 * @param keyPrefix
 *            The key prefix for the virtual directory, or null for the
 *            entire bucket. All subdirectories will be downloaded
 *            recursively.
 * @param destinationDirectory
 *            The directory to place downloaded files. Subdirectories will
 *            be created as necessary.
 */
public MultipleFileDownload downloadDirectory(String bucketName, String keyPrefix, File destinationDirectory) {

    if (keyPrefix == null)
        keyPrefix = "";

    List<S3ObjectSummary> objectSummaries = new LinkedList<S3ObjectSummary>();
    Stack<String> commonPrefixes = new Stack<String>();
    commonPrefixes.add(keyPrefix);
    long totalSize = 0;

    // Recurse all virtual subdirectories to get a list of object summaries.
    // This is a depth-first search.
    do {
        String prefix = commonPrefixes.pop();
        ObjectListing listObjectsResponse = null;

        do {
            if (listObjectsResponse == null) {
                ListObjectsRequest listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName)
                        .withDelimiter(DEFAULT_DELIMITER).withPrefix(prefix);
                listObjectsResponse = s3.listObjects(listObjectsRequest);
            } else {
                listObjectsResponse = s3.listNextBatchOfObjects(listObjectsResponse);
            }

            for (S3ObjectSummary s : listObjectsResponse.getObjectSummaries()) {
                // Skip any files that are also virtual directories, since
                // we can't save both a directory and a file of the same
                // name.
                if (!s.getKey().equals(prefix)
                        && !listObjectsResponse.getCommonPrefixes().contains(s.getKey() + DEFAULT_DELIMITER)) {
                    objectSummaries.add(s);
                    totalSize += s.getSize();
                } else {
                    log.debug("Skipping download for object " + s.getKey()
                            + " since it is also a virtual directory");
                }
            }

            commonPrefixes.addAll(listObjectsResponse.getCommonPrefixes());
        } while (listObjectsResponse.isTruncated());
    } while (!commonPrefixes.isEmpty());

    TransferProgressImpl transferProgress = new TransferProgressImpl();
    transferProgress.setTotalBytesToTransfer(totalSize);
    ProgressListener listener = new TransferProgressUpdatingListener(transferProgress);

    List<DownloadImpl> downloads = new ArrayList<DownloadImpl>();

    String description = "Downloading from " + bucketName + "/" + keyPrefix;
    final MultipleFileDownloadImpl multipleFileDownload = new MultipleFileDownloadImpl(description,
            transferProgress, new ProgressListenerChain(listener), keyPrefix, bucketName, downloads);
    multipleFileDownload.setMonitor(new MultipleFileTransferMonitor(multipleFileDownload, downloads));

    final AllDownloadsQueuedLock allTransfersQueuedLock = new AllDownloadsQueuedLock();
    MultipleFileTransferStateChangeListener stateChangeListener = new MultipleFileTransferStateChangeListener(
            allTransfersQueuedLock, multipleFileDownload);

    for (S3ObjectSummary summary : objectSummaries) {
        // TODO: non-standard delimiters
        File f = new File(destinationDirectory, summary.getKey());
        File parentFile = f.getParentFile();
        if (!parentFile.exists() && !parentFile.mkdirs()) {
            throw new RuntimeException("Couldn't create parent directories for " + f.getAbsolutePath());
        }

        downloads.add((DownloadImpl) download(
                new GetObjectRequest(summary.getBucketName(), summary.getKey()).withProgressListener(listener),
                f, stateChangeListener));
    }

    if (downloads.isEmpty()) {
        multipleFileDownload.setState(TransferState.Completed);
        return multipleFileDownload;
    }

    // Notify all state changes waiting for the downloads to all be queued
    // to wake up and continue.
    synchronized (allTransfersQueuedLock) {
        allTransfersQueuedLock.allQueued = true;
        allTransfersQueuedLock.notifyAll();
    }

    return multipleFileDownload;
}

From source file:com.hubspot.jinjava.interpret.JinjavaInterpreter.java

private void resolveBlockStubs(OutputList output, Stack<String> blockNames) {
    for (BlockPlaceholderOutputNode blockPlaceholder : output.getBlocks()) {

        if (!blockNames.contains(blockPlaceholder.getBlockName())) {
            Collection<List<? extends Node>> blockChain = blocks.get(blockPlaceholder.getBlockName());
            List<? extends Node> block = Iterables.getFirst(blockChain, null);

            if (block != null) {
                List<? extends Node> superBlock = Iterables.get(blockChain, 1, null);
                context.setSuperBlock(superBlock);

                OutputList blockValueBuilder = new OutputList();

                for (Node child : block) {
                    blockValueBuilder.addNode(child.render(this));
                }//from   w w w .j  a  v a 2s .  c  om

                blockNames.push(blockPlaceholder.getBlockName());
                resolveBlockStubs(blockValueBuilder, blockNames);
                blockNames.pop();

                context.removeSuperBlock();

                blockPlaceholder.resolve(blockValueBuilder.getValue());
            }
        }

        if (!blockPlaceholder.isResolved()) {
            blockPlaceholder.resolve("");
        }
    }
}

From source file:org.esco.grouper.services.GrouperAPIExposerImpl.java

/**
 * Gives the root groups from a given stem.
 * @param key The name of the stem./*w  w w.j  ava 2s  .  c o  m*/
 * @return The list of the groups in the specified stem its child stems.
 */
public GrouperDTO[] getAllRootGroupsFromStem(final String key) {
    final GrouperSession session = sessionUtil.createSession();
    final Stem s = fetchStem(session, key);
    Set<GrouperDTO> groups = null;

    if (s != null) {
        groups = new HashSet<GrouperDTO>();
        Stack<Stem> stems = new Stack<Stem>();
        stems.add(s);
        while (!stems.isEmpty()) {
            final Stem currentStem = stems.pop();
            @SuppressWarnings("rawtypes")
            final Set currentChildGroups = currentStem.getChildGroups();
            @SuppressWarnings("rawtypes")
            final Set currentChildStems = currentStem.getChildStems();

            for (Object o : currentChildGroups) {
                final Group g = (Group) o;
                if (g.toMember().getImmediateMemberships().isEmpty()) {
                    groups.add(fetchGrouperData((Group) o));
                }
            }

            for (Object o : currentChildStems) {
                stems.add((Stem) o);
            }
        }
    }

    if (LOGGER.isDebugEnabled()) {
        final StringBuffer sb = new StringBuffer("Child groups for stem ");
        sb.append(key);
        sb.append(": ");
        sb.append(groups);
        LOGGER.debug(sb);
    }

    sessionUtil.stopSession(session);

    if (groups == null) {
        return null;
    }

    return groups.toArray(new GrouperDTO[groups.size()]);
}

From source file:alluxio.job.persist.PersistDefinition.java

@Override
public SerializableVoid runTask(PersistConfig config, SerializableVoid args, JobWorkerContext context)
        throws Exception {
    AlluxioURI uri = new AlluxioURI(config.getFilePath());
    String ufsPath = config.getUfsPath();

    // check if the file is persisted in UFS and delete it, if we are overwriting it
    UfsManager.UfsClient ufsClient = context.getUfsManager().get(config.getMountId());
    try (CloseableResource<UnderFileSystem> ufsResource = ufsClient.acquireUfsResource()) {
        UnderFileSystem ufs = ufsResource.get();
        if (ufs == null) {
            throw new IOException("Failed to create UFS instance for " + ufsPath);
        }//from   w  w w.  j  a v a2s .c  o m
        if (ufs.exists(ufsPath)) {
            if (config.isOverwrite()) {
                LOG.info("File {} is already persisted in UFS. Removing it.", config.getFilePath());
                ufs.deleteFile(ufsPath);
            } else {
                throw new IOException("File " + config.getFilePath()
                        + " is already persisted in UFS, to overwrite the file, please set the overwrite flag"
                        + " in the config.");
            }
        }

        FileSystem fs = FileSystem.Factory.get();
        long bytesWritten;
        try (Closer closer = Closer.create()) {
            OpenFileOptions options = OpenFileOptions.defaults().setReadType(ReadType.NO_CACHE);
            FileInStream in = closer.register(fs.openFile(uri, options));
            AlluxioURI dstPath = new AlluxioURI(ufsPath);
            // Create ancestor directories from top to the bottom. We cannot use recursive create
            // parents here because the permission for the ancestors can be different.
            Stack<Pair<String, MkdirsOptions>> ufsDirsToMakeWithOptions = new Stack<>();
            AlluxioURI curAlluxioPath = uri.getParent();
            AlluxioURI curUfsPath = dstPath.getParent();
            // Stop at the Alluxio root because the mapped directory of Alluxio root in UFS may not
            // exist.
            while (!ufs.isDirectory(curUfsPath.toString()) && curAlluxioPath != null) {
                URIStatus curDirStatus = fs.getStatus(curAlluxioPath);
                ufsDirsToMakeWithOptions.push(new Pair<>(curUfsPath.toString(),
                        MkdirsOptions.defaults().setCreateParent(false).setOwner(curDirStatus.getOwner())
                                .setGroup(curDirStatus.getGroup())
                                .setMode(new Mode((short) curDirStatus.getMode()))));
                curAlluxioPath = curAlluxioPath.getParent();
                curUfsPath = curUfsPath.getParent();
            }
            while (!ufsDirsToMakeWithOptions.empty()) {
                Pair<String, MkdirsOptions> ufsDirAndPerm = ufsDirsToMakeWithOptions.pop();
                // UFS mkdirs might fail if the directory is already created. If so, skip the mkdirs
                // and assume the directory is already prepared, regardless of permission matching.
                if (!ufs.mkdirs(ufsDirAndPerm.getFirst(), ufsDirAndPerm.getSecond())
                        && !ufs.isDirectory(ufsDirAndPerm.getFirst())) {
                    throw new IOException("Failed to create " + ufsDirAndPerm.getFirst() + " with permission "
                            + ufsDirAndPerm.getSecond().toString());
                }
            }
            URIStatus uriStatus = fs.getStatus(uri);
            OutputStream out = closer.register(
                    ufs.create(dstPath.toString(), CreateOptions.defaults().setOwner(uriStatus.getOwner())
                            .setGroup(uriStatus.getGroup()).setMode(new Mode((short) uriStatus.getMode()))));
            bytesWritten = IOUtils.copyLarge(in, out);
            incrementPersistedMetric(ufsClient.getUfsMountPointUri(), bytesWritten);
        }
        LOG.info("Persisted file {} with size {}", ufsPath, bytesWritten);
    }
    return null;
}