List of usage examples for java.util Stack empty
public boolean empty()
From source file:fr.paris.lutece.plugins.upload.web.UploadJspBean.java
/** * Deletes a directory recursively./*from w ww.ja va2 s.c o m*/ * * @param directory The directory to delete */ private static void deleteDirectory(File directory) { // We use a Stack (LIFO) to keep track of the directories to delete Stack<File> dirsToDelete = new Stack<File>(); // The stack is initialized with the main directory dirsToDelete.push(directory); // Loop until all directories have been deleted while (!dirsToDelete.empty()) { // Look at the directory on top of the stack (don't remove it!) File currentDir = (File) dirsToDelete.peek(); // Are there any subdirectories? File[] subDirs = currentDir.listFiles(dirFilter); if (subDirs.length > 0) { // If so, add them to the stack for (int i = 0; i < subDirs.length; i++) { dirsToDelete.push(subDirs[i]); } } else { // If not, delete all files in the directory File[] files = currentDir.listFiles(fileFilter); for (int i = 0; i < files.length; i++) { files[i].delete(); } // Then delete the directory currentDir.delete(); // Then remove the directory from the stack dirsToDelete.pop(); } } }
From source file:fr.paris.lutece.plugins.upload.web.UploadJspBean.java
/** * Returns the total size of a directory. * @param directory The directory/*from ww w .j a v a 2 s .c o m*/ * @return The total size */ private static long getDirectorySize(File directory) { long lResult = 0; // We use a Stack (LIFO) to keep track of the unprocessed directories Stack<File> dirsToProcess = new Stack<File>(); // The stack is initialized with the main directory dirsToProcess.push(directory); // Loop until all directories have been processed while (!dirsToProcess.empty()) { // Get a new directory from the stack File currentDir = dirsToProcess.pop(); // Don't forget the directory's own size! lResult += currentDir.length(); // Add the local files' size to the global size File[] files = currentDir.listFiles(fileFilter); for (int i = 0; i < files.length; i++) { lResult += files[i].length(); } // Add the sub-directories to the stack File[] subDirs = currentDir.listFiles(dirFilter); for (int i = 0; i < subDirs.length; i++) { dirsToProcess.push(subDirs[i]); } } return lResult; }
From source file:org.apache.webdav.ant.Utils.java
/** * // w ww .ja v a2s.com * @param client * @param httpURL * @param lockToken the locktoken to be used or <code>null</code> if * none is to be used * @throws IOException * @throws HttpException */ public static boolean assureExistingCollection(HttpClient client, HttpURL httpURL, String lockToken) throws IOException, HttpException { String path = httpURL.getPath(); if (!path.endsWith("/")) { path = path + "/"; } Stack toBeCreated = new Stack(); while (!path.equals("/")) { HttpURL parent = Utils.createHttpURL(httpURL, path); if (!collectionExists(client, parent)) { toBeCreated.push(path); path = path.substring(0, path.lastIndexOf("/", path.length() - 2) + 1); } else { break; } } boolean created = !toBeCreated.empty(); while (!toBeCreated.empty()) { HttpURL newColl = Utils.createHttpURL(httpURL, (String) toBeCreated.pop()); MkcolMethod mkcol = new MkcolMethod(newColl.getEscapedURI()); mkcol.setFollowRedirects(true); generateIfHeader(mkcol, lockToken); int status = client.executeMethod(mkcol); if (status != WebdavStatus.SC_CREATED) { HttpException ex = new HttpException("Can't create collection " + newColl); ex.setReasonCode(status); ex.setReason(mkcol.getStatusText()); throw ex; } } return created; }
From source file:com.netflix.spinnaker.clouddriver.appengine.artifacts.StorageUtils.java
public static void untarStreamToPath(InputStream inputStream, String basePath) throws IOException { class DirectoryTimestamp { public DirectoryTimestamp(File d, long m) { directory = d;//from ww w . j a v a 2s.co m millis = m; } public File directory; public long millis; } ; // Directories come in hierarchical order within the stream, but // we need to set their timestamps after their children have been written. Stack<DirectoryTimestamp> directoryStack = new Stack<DirectoryTimestamp>(); File baseDirectory = new File(basePath); baseDirectory.mkdir(); TarArchiveInputStream tarStream = new TarArchiveInputStream(inputStream); for (TarArchiveEntry entry = tarStream.getNextTarEntry(); entry != null; entry = tarStream .getNextTarEntry()) { File target = new File(baseDirectory, entry.getName()); if (entry.isDirectory()) { directoryStack.push(new DirectoryTimestamp(target, entry.getModTime().getTime())); continue; } writeStreamToFile(tarStream, target); target.setLastModified(entry.getModTime().getTime()); } while (!directoryStack.empty()) { DirectoryTimestamp info = directoryStack.pop(); info.directory.setLastModified(info.millis); } tarStream.close(); }
From source file:com.kadwa.hadoop.DistExec.java
/** * Initialize ExecFilesMapper specific job-configuration. * * @param conf : The dfs/mapred configuration. * @param jobConf : The handle to the jobConf object to be initialized. * @param args Arguments//ww w. j ava 2 s . c o m * @return true if it is necessary to launch a job. */ private static boolean setup(Configuration conf, JobConf jobConf, final Arguments args) throws IOException { jobConf.set(DST_DIR_LABEL, args.dst.toUri().toString()); jobConf.set(EXEC_CMD_LABEL, args.execCmd); //set boolean values jobConf.setBoolean(Options.REDIRECT_ERROR_TO_OUT.propertyname, args.flags.contains(Options.REDIRECT_ERROR_TO_OUT)); final String randomId = getRandomId(); JobClient jClient = new JobClient(jobConf); Path stagingArea; try { stagingArea = JobSubmissionFiles.getStagingDir(jClient, conf); } catch (InterruptedException e) { throw new IOException(e); } Path jobDirectory = new Path(stagingArea + NAME + "_" + randomId); FsPermission mapredSysPerms = new FsPermission(JobSubmissionFiles.JOB_DIR_PERMISSION); FileSystem.mkdirs(FileSystem.get(jobDirectory.toUri(), conf), jobDirectory, mapredSysPerms); jobConf.set(JOB_DIR_LABEL, jobDirectory.toString()); FileSystem dstfs = args.dst.getFileSystem(conf); // get tokens for all the required FileSystems.. TokenCache.obtainTokensForNamenodes(jobConf.getCredentials(), new Path[] { args.dst }, conf); boolean dstExists = dstfs.exists(args.dst); boolean dstIsDir = false; if (dstExists) { dstIsDir = dstfs.getFileStatus(args.dst).isDir(); } // default logPath Path logPath = args.log; if (logPath == null) { String filename = "_" + NAME + "_logs_" + randomId; if (!dstExists || !dstIsDir) { Path parent = args.dst.getParent(); if (!dstfs.exists(parent)) { dstfs.mkdirs(parent); } logPath = new Path(parent, filename); } else { logPath = new Path(args.dst, filename); } } FileOutputFormat.setOutputPath(jobConf, logPath); // create src list, dst list FileSystem jobfs = jobDirectory.getFileSystem(jobConf); Path srcfilelist = new Path(jobDirectory, "_" + NAME + "_src_files"); jobConf.set(SRC_LIST_LABEL, srcfilelist.toString()); SequenceFile.Writer src_writer = SequenceFile.createWriter(jobfs, jobConf, srcfilelist, LongWritable.class, FilePair.class, SequenceFile.CompressionType.NONE); Path dstfilelist = new Path(jobDirectory, "_" + NAME + "_dst_files"); SequenceFile.Writer dst_writer = SequenceFile.createWriter(jobfs, jobConf, dstfilelist, Text.class, Text.class, SequenceFile.CompressionType.NONE); Path dstdirlist = new Path(jobDirectory, "_" + NAME + "_dst_dirs"); jobConf.set(DST_DIR_LIST_LABEL, dstdirlist.toString()); SequenceFile.Writer dir_writer = SequenceFile.createWriter(jobfs, jobConf, dstdirlist, Text.class, FilePair.class, SequenceFile.CompressionType.NONE); // handle the case where the destination directory doesn't exist // and we've only a single src directory. final boolean special = (args.srcs.size() == 1 && !dstExists); int srcCount = 0, cnsyncf = 0, dirsyn = 0; long fileCount = 0L, byteCount = 0L, cbsyncs = 0L; try { for (Iterator<Path> srcItr = args.srcs.iterator(); srcItr.hasNext();) { final Path src = srcItr.next(); FileSystem srcfs = src.getFileSystem(conf); FileStatus srcfilestat = srcfs.getFileStatus(src); Path root = special && srcfilestat.isDir() ? src : src.getParent(); if (srcfilestat.isDir()) { ++srcCount; } Stack<FileStatus> pathstack = new Stack<FileStatus>(); for (pathstack.push(srcfilestat); !pathstack.empty();) { FileStatus cur = pathstack.pop(); FileStatus[] children = srcfs.listStatus(cur.getPath()); for (int i = 0; i < children.length; i++) { boolean skipfile = false; final FileStatus child = children[i]; final String dst = makeRelative(root, child.getPath()); ++srcCount; if (child.isDir()) { pathstack.push(child); } else { if (!skipfile) { ++fileCount; byteCount += child.getLen(); if (LOG.isTraceEnabled()) { LOG.trace("adding file " + child.getPath()); } ++cnsyncf; cbsyncs += child.getLen(); if (cnsyncf > SYNC_FILE_MAX || cbsyncs > BYTES_PER_MAP) { src_writer.sync(); dst_writer.sync(); cnsyncf = 0; cbsyncs = 0L; } } } if (!skipfile) { src_writer.append(new LongWritable(child.isDir() ? 0 : child.getLen()), new FilePair(child, dst)); } dst_writer.append(new Text(dst), new Text(child.getPath().toString())); } if (cur.isDir()) { String dst = makeRelative(root, cur.getPath()); dir_writer.append(new Text(dst), new FilePair(cur, dst)); if (++dirsyn > SYNC_FILE_MAX) { dirsyn = 0; dir_writer.sync(); } } } } } finally { checkAndClose(src_writer); checkAndClose(dst_writer); checkAndClose(dir_writer); } FileStatus dststatus = null; try { dststatus = dstfs.getFileStatus(args.dst); } catch (FileNotFoundException fnfe) { LOG.info(args.dst + " does not exist."); } // create dest path dir if copying > 1 file if (dststatus == null) { if (srcCount > 1 && !dstfs.mkdirs(args.dst)) { throw new IOException("Failed to create" + args.dst); } } final Path sorted = new Path(jobDirectory, "_" + NAME + "_sorted"); checkDuplication(jobfs, dstfilelist, sorted, conf); Path tmpDir = new Path( (dstExists && !dstIsDir) || (!dstExists && srcCount == 1) ? args.dst.getParent() : args.dst, "_" + NAME + "_tmp_" + randomId); jobConf.set(TMP_DIR_LABEL, tmpDir.toUri().toString()); LOG.info("sourcePathsCount=" + srcCount); LOG.info("filesToExecCount=" + fileCount); LOG.info("bytesToExecCount=" + StringUtils.humanReadableInt(byteCount)); jobConf.setInt(SRC_COUNT_LABEL, srcCount); jobConf.setLong(TOTAL_SIZE_LABEL, byteCount); setMapCount(fileCount, jobConf); return fileCount > 0; }
From source file:tajo.engine.planner.LogicalPlanner.java
private static LogicalNode createCrossJoinFromJoinCondition(FromTable[] tables, EvalNode[] cnf) { Map<String, FromTable> fromTableMap = Maps.newHashMap(); for (FromTable f : tables) { // TODO - to consider alias and self-join fromTableMap.put(f.getTableName(), f); }/*from w w w . j a v a 2s . com*/ JoinTree joinTree = new JoinTree(); // to infer join order for (EvalNode expr : cnf) { if (PlannerUtil.isJoinQual(expr)) { joinTree.addJoin(expr); } } List<String> remain = Lists.newArrayList(fromTableMap.keySet()); remain.removeAll(joinTree.getTables()); // only remain joins not matched to any join condition List<Edge> joinOrder = null; LogicalNode subroot = null; JoinNode join; Schema joinSchema; // if there are at least one join matched to the one of join conditions, // we try to traverse the join tree in the depth-first manner and // determine the initial join order. Here, we do not consider the join cost. // The optimized join order will be considered in the optimizer. if (joinTree.getJoinNum() > 0) { Stack<String> stack = new Stack<String>(); Set<String> visited = Sets.newHashSet(); // initially, one table is pushed into the stack String seed = joinTree.getTables().iterator().next(); stack.add(seed); joinOrder = Lists.newArrayList(); while (!stack.empty()) { String table = stack.pop(); if (visited.contains(table)) { continue; } visited.add(table); // 'joinOrder' will contain all tables corresponding to the given join conditions. for (Edge edge : joinTree.getEdges(table)) { if (!visited.contains(edge.getTarget()) && !edge.getTarget().equals(table)) { stack.add(edge.getTarget()); joinOrder.add(edge); } } } subroot = new ScanNode(fromTableMap.get(joinOrder.get(0).getSrc())); LogicalNode inner; for (Edge edge : joinOrder) { inner = new ScanNode(fromTableMap.get(edge.getTarget())); join = new JoinNode(JoinType.CROSS_JOIN, subroot, inner); subroot = join; joinSchema = SchemaUtil.merge(join.getOuterNode().getOutSchema(), join.getInnerNode().getOutSchema()); join.setInSchema(joinSchema); join.setOutSchema(joinSchema); } } // Here, there are two cases: // 1) there already exists the join plan. // 2) there are no join plan. if (joinOrder != null) { // case 1) // if there are join tables corresponding to any join condition, // the join plan is placed as the outer plan of the product. remain.remove(joinOrder.get(0).getSrc()); remain.remove(joinOrder.get(0).getTarget()); } else { // case 2) // if there are no inferred joins, the one of the remain join tables is placed as the left table subroot = new ScanNode(fromTableMap.get(remain.get(0))); remain.remove(remain.get(0)); } // Here, the variable 'remain' contains join tables which are not matched to any join conditions. // Thus, they will be joined by catasian product for (String table : remain) { join = new JoinNode(JoinType.CROSS_JOIN, subroot, new ScanNode(fromTableMap.get(table))); joinSchema = SchemaUtil.merge(join.getOuterNode().getOutSchema(), join.getInnerNode().getOutSchema()); join.setInSchema(joinSchema); join.setOutSchema(joinSchema); subroot = join; } return subroot; }
From source file:com.linkedin.urls.PathNormalizer.java
License:asdf
/** * 1. Replaces "/./" with "/" recursively. * 2. "/blah/asdf/.." -> "/blah"//from w w w. jav a2 s . c om * 3. "/blah/blah2/blah3/../../blah4" -> "/blah/blah4" * 4. "//" -> "/" * 5. Adds a slash at the end if there isn't one */ private static String sanitizeDotsAndSlashes(String path) { StringBuilder stringBuilder = new StringBuilder(path); Stack<Integer> slashIndexStack = new Stack<Integer>(); int index = 0; while (index < stringBuilder.length() - 1) { if (stringBuilder.charAt(index) == '/') { slashIndexStack.add(index); if (stringBuilder.charAt(index + 1) == '.') { if (index < stringBuilder.length() - 2 && stringBuilder.charAt(index + 2) == '.') { //If it looks like "/../" or ends with "/.." if (index < stringBuilder.length() - 3 && stringBuilder.charAt(index + 3) == '/' || index == stringBuilder.length() - 3) { boolean endOfPath = index == stringBuilder.length() - 3; slashIndexStack.pop(); int endIndex = index + 3; // backtrack so we can detect if this / is part of another replacement index = slashIndexStack.empty() ? -1 : slashIndexStack.pop() - 1; int startIndex = endOfPath ? index + 1 : index; stringBuilder.delete(startIndex + 1, endIndex); } } else if (index < stringBuilder.length() - 2 && stringBuilder.charAt(index + 2) == '/' || index == stringBuilder.length() - 2) { boolean endOfPath = index == stringBuilder.length() - 2; slashIndexStack.pop(); int startIndex = endOfPath ? index + 1 : index; stringBuilder.delete(startIndex, index + 2); // "/./" -> "/" index--; // backtrack so we can detect if this / is part of another replacement } } else if (stringBuilder.charAt(index + 1) == '/') { slashIndexStack.pop(); stringBuilder.deleteCharAt(index); index--; } } index++; } if (stringBuilder.length() == 0) { stringBuilder.append("/"); //Every path has at least a slash } return stringBuilder.toString(); }
From source file:org.dhatim.yaml.handler.YamlEventStreamHandler.java
private boolean lastTypeIsArray(Stack<Type> typeStack) { return !typeStack.empty() && typeStack.peek() == Type.SEQUENCE; }
From source file:org.jolokia.handler.list.DataUpdater.java
/** * Update the given JSON object with the data extracted from the given * MBeanInfo/*from www .j av a2s .co m*/ * * @param pJSONObject JSON object to update * @param pMBeanInfo info to extract from * @param pPathStack stack for further constraining the result */ void update(JSONObject pJSONObject, MBeanInfo pMBeanInfo, Stack<String> pPathStack) { boolean isPathEmpty = pPathStack == null || pPathStack.empty(); String filter = pPathStack != null && !pPathStack.empty() ? pPathStack.pop() : null; verifyThatPathIsEmpty(pPathStack); JSONObject attrMap = extractData(pMBeanInfo, filter); if (attrMap.size() > 0) { pJSONObject.put(getKey(), attrMap); } else if (!isPathEmpty) { throw new IllegalArgumentException("Path given but extracted value is empty"); } }
From source file:com.groupon.jenkins.buildtype.util.shell.ShellCommands.java
public ShellCommands addAll(final Stack<String> commands) { while (!commands.empty()) { this.commands.add(commands.pop()); }//www . ja va 2s .co m return this; }