List of usage examples for java.util Stack pop
public synchronized E pop()
From source file:org.apache.hadoop.dfs.PermissionChecker.java
private void checkSubAccess(INode inode, FsAction access) throws AccessControlException { if (inode == null || !inode.isDirectory()) { return;/*from w ww . jav a2 s . com*/ } Stack<INodeDirectory> directories = new Stack<INodeDirectory>(); for (directories.push((INodeDirectory) inode); !directories.isEmpty();) { INodeDirectory d = directories.pop(); check(d, access); for (INode child : d.getChildren()) { if (child.isDirectory()) { directories.push((INodeDirectory) child); } } } }
From source file:RoomInfo.java
void route(String to) { Stack rev = new Stack(); RoomInfo r;/*from ww w . j ava 2 s . c o m*/ int num = btStack.size(); // Reverse the stack to display path. for (int i = 0; i < num; i++) rev.push(btStack.pop()); for (int i = 0; i < num; i++) { r = (RoomInfo) rev.pop(); System.out.print(r.from + " to "); } System.out.println(to); }
From source file:edu.emory.cci.aiw.cvrg.eureka.etl.ksb.PropositionDefinitionFinder.java
private void getNodesToLoad(Stack<String> processedStack, LinkedHashSet<String> nodesToLoad) { while (!processedStack.empty()) { String node = processedStack.pop(); if (!nodesToLoad.contains(node)) { if (defaultProps.contains(node)) { nodesToLoad.add(node);// ww w . j av a 2 s.co m } else { List<PropositionDefinition> parents; synchronized (parentsCache) { parents = parentsCache.get(node); } if (parents != null) { for (PropositionDefinition parent : parents) { if (nodesToLoad.contains(parent.getId())) { nodesToLoad.add(node); break; } } } } } } }
From source file:com.pinterest.rocksplicator.controller.tasks.ChainedTask.java
@Override public void process(Context ctx) throws Exception { long id = ctx.getId(); final String cluster = ctx.getCluster(); final String worker = ctx.getWorker(); final TaskQueue taskQueue = ctx.getTaskQueue(); Stack<TaskBase> tasks = new Stack<>(); tasks.push(getParameter().getT2());//from ww w .j a va 2s .c o m tasks.push(getParameter().getT1()); while (!tasks.isEmpty()) { TaskBase taskBase = tasks.pop(); AbstractTask task = TaskFactory.getWorkerTask(taskBase); if (task == null) { taskQueue.failTask(id, "Failed to instantiate task " + taskBase.name); return; } else if (task instanceof ChainedTask) { ChainedTask chainedTask = (ChainedTask) task; tasks.push(chainedTask.getParameter().getT2()); tasks.push(chainedTask.getParameter().getT1()); } else { LocalAckTaskQueue lq = new LocalAckTaskQueue(taskQueue); ctx = new Context(id, cluster, lq, worker); try { task.process(ctx); } catch (Exception ex) { LOG.error("Unexpected exception from task: {} in task chain.", task.getName(), ex); lq.failTask(id, ex.getMessage()); } LocalAckTaskQueue.State state = lq.getState(); if (state.state == LocalAckTaskQueue.State.StateName.UNFINISHED) { LOG.error("Task {} finished processing without ack", id); return; } else if (state.state == LocalAckTaskQueue.State.StateName.FAILED) { LOG.error("Task {} failed with reason: {}. Abort the task chain.", id, state.output); taskQueue.failTask(id, state.output); return; } else if (tasks.isEmpty()) { LOG.info("Finished processing chained task"); taskQueue.finishTask(id, state.output); return; } long nextId = taskQueue.finishTaskAndEnqueueRunningTask(id, state.output, tasks.peek(), worker); if (nextId < 0) { LOG.error("Failed to finish task {} and enqueue new task {}", id, tasks.peek()); return; } else { id = nextId; } } } }
From source file:org.apache.tajo.engine.codegen.ExecutorPreCompiler.java
@Override public LogicalNode visitTableSubQuery(CompilationContext context, LogicalPlan plan, LogicalPlan.QueryBlock block, TableSubQueryNode node, Stack<LogicalNode> stack) throws TajoException { stack.push(node);//from www . j a v a2s . c om visit(context, plan, null, node.getSubQuery(), stack); stack.pop(); if (node.hasTargets()) { for (Target target : node.getTargets()) { compileIfAbsent(context, node.getLogicalSchema(), target.getEvalTree()); } } return node; }
From source file:org.abstracthorizon.proximity.storage.local.WritableFileSystemStorage.java
public void recreateMetadata(Map extraProps) throws StorageException { // issue #44, we will not delete existing metadata, // instead, we will force to "recreate" those the properties factory // eventually appending it with new ones. int processed = 0; Stack stack = new Stack(); List dir = listItems(ItemProperties.PATH_ROOT); stack.push(dir);// ww w . j av a 2s. c o m while (!stack.isEmpty()) { dir = (List) stack.pop(); for (Iterator i = dir.iterator(); i.hasNext();) { ItemProperties ip = (ItemProperties) i.next(); if (ip.isDirectory()) { List subdir = listItems(ip.getPath()); stack.push(subdir); } else { logger.debug("**** {}", ip.getPath()); File target = new File(getStorageBaseDir(), ip.getPath()); ItemProperties nip = getProxiedItemPropertiesFactory().expandItemProperties(ip.getPath(), target, false); if (ip.getMetadata(DefaultExpiringProxyingRepositoryLogic.METADATA_EXPIRES) != null) { logger.debug("We have an " + DefaultExpiringProxyingRepositoryLogic.METADATA_EXPIRES + " property"); nip.setMetadata(DefaultExpiringProxyingRepositoryLogic.METADATA_EXPIRES, ip.getMetadata(DefaultExpiringProxyingRepositoryLogic.METADATA_EXPIRES)); } logger.debug("Recreating metadata : adding " + extraProps + " to " + nip.getAllMetadata()); if (extraProps != null) { nip.getAllMetadata().putAll(extraProps); } storeItemProperties(nip); processed++; } } } logger.info("Recreated metadata on {} items.", Integer.toString(processed)); }
From source file:org.apache.synapse.endpoints.AddressEndpoint.java
public void onFault(MessageContext synCtx) { // perform retries here // if this endpoint has actually failed, inform the parent. setActive(false, synCtx);// ww w . j a va2s . c o m if (parentEndpoint != null) { parentEndpoint.onChildEndpointFail(this, synCtx); } else { Stack faultStack = synCtx.getFaultStack(); if (!faultStack.isEmpty()) { ((FaultHandler) faultStack.pop()).handleFault(synCtx); } } }
From source file:com.google.gwt.site.markdown.MarkupWriter.java
public void writeHTML(MDNode node, String html) throws TranslaterException { if (node.isFolder()) { throw new IllegalArgumentException(); }/*www.j a v a 2 s . com*/ Stack<MDParent> stack = new Stack<MDParent>(); MDParent tmp = node.getParent(); stack.add(tmp); while (tmp.getParent() != null) { tmp = tmp.getParent(); stack.add(tmp); } // get rootnode from stack stack.pop(); File currentDir = rootFile; ensureDirectory(currentDir); while (!stack.isEmpty()) { MDParent pop = stack.pop(); currentDir = new File(currentDir, pop.getName()); ensureDirectory(currentDir); } String fileName = node.getName().substring(0, node.getName().length() - ".md".length()) + ".html"; File fileToWrite = new File(currentDir, fileName); try { Util.writeStringToFile(fileToWrite, html); } catch (IOException e) { throw new TranslaterException("can not write markup to file: '" + fileToWrite + "'", e); } }
From source file:com.amalto.core.history.UniqueIdTransformer.java
public void addIds(org.w3c.dom.Document document) { Stack<Integer> levels = new Stack<Integer>(); levels.push(0);/* ww w. j a v a 2s.co m*/ { Element documentElement = document.getDocumentElement(); if (documentElement != null) { _addIds(document, documentElement, levels); } } levels.pop(); }
From source file:org.apache.hadoop.hive.ql.plan.BaseWork.java
public Set<Operator<?>> getAllOperators() { Set<Operator<?>> returnSet = new LinkedHashSet<Operator<?>>(); Set<Operator<?>> opSet = getAllRootOperators(); Stack<Operator<?>> opStack = new Stack<Operator<?>>(); // add all children opStack.addAll(opSet);//www. j a v a2s . c o m while (!opStack.empty()) { Operator<?> op = opStack.pop(); returnSet.add(op); if (op.getChildOperators() != null) { opStack.addAll(op.getChildOperators()); } } return returnSet; }