List of usage examples for org.apache.hadoop.fs FileSystem rename
public abstract boolean rename(Path src, Path dst) throws IOException;
From source file:cc.solr.lucene.store.hdfs.ChangeFileExt.java
License:Apache License
public static void main(String[] args) throws IOException { Path p = new Path(args[0]); FileSystem fileSystem = FileSystem.get(p.toUri(), new Configuration()); FileStatus[] listStatus = fileSystem.listStatus(p); for (FileStatus fileStatus : listStatus) { Path path = fileStatus.getPath(); fileSystem.rename(path, new Path(path.toString() + ".lf")); }//from ww w . ja v a2 s . c o m }
From source file:cn.edu.hfut.dmic.webcollector.crawldb.Merge.java
public static void install(Path crawlPath, Configuration conf) throws IOException { FileSystem fs = FileSystem.get(conf); Path crawldbPath = new Path(crawlPath, "crawldb"); Path newdb = new Path(crawldbPath, "new"); Path currentdb = new Path(crawldbPath, "current"); Path olddb = new Path(crawldbPath, "old"); if (fs.exists(currentdb)) { if (fs.exists(olddb)) { fs.delete(olddb);//w ww . jav a 2 s.c o m } fs.rename(currentdb, olddb); } fs.rename(newdb, currentdb); }
From source file:cn.edu.hfut.dmic.webcollectorcluster.generator.Merge.java
public static void install(Path crawldb) throws IOException { FileSystem fs = crawldb.getFileSystem(CrawlerConfiguration.create()); Path newdb = new Path(crawldb, "new"); Path currentdb = new Path(crawldb, "current"); Path olddb = new Path(crawldb, "old"); if (fs.exists(currentdb)) { if (fs.exists(olddb)) { fs.delete(olddb);// w w w . j a v a 2 s . com } fs.rename(currentdb, olddb); } fs.mkdirs(crawldb); fs.rename(newdb, currentdb); }
From source file:co.cask.cdap.internal.app.runtime.batch.dataset.partitioned.DynamicPartitioningOutputCommitter.java
License:Apache License
/** * Merge two paths together. Anything in from will be moved into to, if there * are any name conflicts while merging the files or directories in from win. * @param fs the File System to use/*from ww w. jav a 2 s .co m*/ * @param from the path data is coming from. * @param to the path data is going to. * @throws IOException on any error */ private void mergePaths(FileSystem fs, final FileStatus from, final Path to) throws IOException { if (from.isFile()) { if (fs.exists(to)) { if (!fs.delete(to, true)) { throw new IOException("Failed to delete " + to); } } if (!fs.rename(from.getPath(), to)) { throw new IOException("Failed to rename " + from + " to " + to); } } else if (from.isDirectory()) { if (fs.exists(to)) { FileStatus toStat = fs.getFileStatus(to); if (!toStat.isDirectory()) { if (!fs.delete(to, true)) { throw new IOException("Failed to delete " + to); } if (!fs.rename(from.getPath(), to)) { throw new IOException("Failed to rename " + from + " to " + to); } } else { //It is a directory so merge everything in the directories for (FileStatus subFrom : fs.listStatus(from.getPath())) { Path subTo = new Path(to, subFrom.getPath().getName()); mergePaths(fs, subFrom, subTo); } } } else { //it does not exist just rename if (!fs.rename(from.getPath(), to)) { throw new IOException("Failed to rename " + from + " to " + to); } } } }
From source file:co.cask.hydrator.plugin.batch.action.FileAction.java
License:Apache License
@SuppressWarnings("ConstantConditions") @Override//from ww w .j av a 2s . c om public void run(BatchActionContext context) throws Exception { if (!config.shouldRun(context)) { return; } config.substituteMacros(context); Job job = JobUtils.createInstance(); Configuration conf = job.getConfiguration(); FileSystem fileSystem = FileSystem.get(conf); Path[] paths; Path sourcePath = new Path(config.path); if (fileSystem.isDirectory(sourcePath)) { FileStatus[] status = fileSystem.listStatus(sourcePath); paths = FileUtil.stat2Paths(status); } else { paths = new Path[] { sourcePath }; } //get regex pattern for file name filtering. boolean patternSpecified = !Strings.isNullOrEmpty(config.pattern); if (patternSpecified) { regex = Pattern.compile(config.pattern); } switch (config.action.toLowerCase()) { case "delete": for (Path path : paths) { if (!patternSpecified || isFileNameMatch(path.getName())) { fileSystem.delete(path, true); } } break; case "move": for (Path path : paths) { if (!patternSpecified || isFileNameMatch(path.getName())) { Path targetFileMovePath = new Path(config.targetFolder, path.getName()); fileSystem.rename(path, targetFileMovePath); } } break; case "archive": for (Path path : paths) { if (!patternSpecified || isFileNameMatch(path.getName())) { try (FSDataOutputStream archivedStream = fileSystem .create(new Path(config.targetFolder, path.getName() + ".zip")); ZipOutputStream zipArchivedStream = new ZipOutputStream(archivedStream); FSDataInputStream fdDataInputStream = fileSystem.open(path)) { zipArchivedStream.putNextEntry(new ZipEntry(path.getName())); int length; byte[] buffer = new byte[1024]; while ((length = fdDataInputStream.read(buffer)) > 0) { zipArchivedStream.write(buffer, 0, length); } zipArchivedStream.closeEntry(); } fileSystem.delete(path, true); } } break; default: LOG.warn("No action required on the file."); break; } }
From source file:com.alexholmes.hdfsslurper.WorkerThread.java
License:Apache License
private void process(FileStatus srcFileStatus) throws IOException, InterruptedException { Path stagingFile = null;/*from w ww.ja v a2s.co m*/ FileSystem destFs = null; String filenameBatchidDelimiter = config.getFileNameBatchIdDelimiter(); try { FileSystem srcFs = srcFileStatus.getPath().getFileSystem(config.getConfig()); // run a script which can change the name of the file as well as // write out a new version of the file // if (config.getWorkScript() != null) { Path newSrcFile = stageSource(srcFileStatus); srcFileStatus = srcFileStatus.getPath().getFileSystem(config.getConfig()).getFileStatus(newSrcFile); } Path srcFile = srcFileStatus.getPath(); // get the target HDFS file // Path destFile = getHdfsTargetPath(srcFileStatus); if (config.getCodec() != null) { String ext = config.getCodec().getDefaultExtension(); if (!destFile.getName().endsWith(ext)) { destFile = new Path(destFile.toString() + ext); } } destFs = destFile.getFileSystem(config.getConfig()); // get the staging HDFS file // stagingFile = fileSystemManager.getStagingFile(srcFileStatus, destFile); String batchId = srcFile.toString().substring( srcFile.toString().lastIndexOf(filenameBatchidDelimiter) + 1, srcFile.toString().length()); log.info("event#Copying source file '" + srcFile + "' to staging destination '" + stagingFile + "'" + "$batchId#" + batchId); // if the directory of the target file doesn't exist, attempt to // create it // Path destParentDir = destFile.getParent(); if (!destFs.exists(destParentDir)) { log.info("event#Attempting creation of target directory: " + destParentDir.toUri()); if (!destFs.mkdirs(destParentDir)) { throw new IOException("event#Failed to create target directory: " + destParentDir.toUri()); } } // if the staging directory doesn't exist, attempt to create it // Path destStagingParentDir = stagingFile.getParent(); if (!destFs.exists(destStagingParentDir)) { log.info("event#Attempting creation of staging directory: " + destStagingParentDir.toUri()); if (!destFs.mkdirs(destStagingParentDir)) { throw new IOException("event#Failed to create staging directory: " + destParentDir.toUri()); } } // copy the file // InputStream is = null; OutputStream os = null; CRC32 crc = new CRC32(); try { is = new BufferedInputStream(srcFs.open(srcFile)); if (config.isVerify()) { is = new CheckedInputStream(is, crc); } os = destFs.create(stagingFile); if (config.getCodec() != null) { os = config.getCodec().createOutputStream(os); } IOUtils.copyBytes(is, os, 4096, false); } finally { IOUtils.closeStream(is); IOUtils.closeStream(os); } long srcFileSize = srcFs.getFileStatus(srcFile).getLen(); long destFileSize = destFs.getFileStatus(stagingFile).getLen(); if (config.getCodec() == null && srcFileSize != destFileSize) { throw new IOException( "event#File sizes don't match, source = " + srcFileSize + ", dest = " + destFileSize); } log.info("event#Local file size = " + srcFileSize + ", HDFS file size = " + destFileSize + "$batchId#" + batchId); if (config.isVerify()) { verify(stagingFile, crc.getValue()); } if (destFs.exists(destFile)) { destFs.delete(destFile, false); } log.info("event#Moving staging file '" + stagingFile + "' to destination '" + destFile + "'" + "$batchId#" + batchId); if (!destFs.rename(stagingFile, destFile)) { throw new IOException("event#Failed to rename file"); } if (config.isCreateLzopIndex() && destFile.getName().endsWith(lzopExt)) { Path lzoIndexPath = new Path(destFile.toString() + LzoIndex.LZO_INDEX_SUFFIX); if (destFs.exists(lzoIndexPath)) { log.info("event#Deleting index file as it already exists"); destFs.delete(lzoIndexPath, false); } indexer.index(destFile); } fileSystemManager.fileCopyComplete(srcFileStatus); } catch (Throwable t) { log.error("event#Caught exception working on file " + srcFileStatus.getPath(), t); // delete the staging file if it still exists // try { if (destFs != null && destFs.exists(stagingFile)) { destFs.delete(stagingFile, false); } } catch (Throwable t2) { log.error("event#Failed to delete staging file " + stagingFile, t2); } fileSystemManager.fileCopyError(srcFileStatus); } }
From source file:com.alibaba.jstorm.hdfs.common.rotation.MoveFileAction.java
License:Apache License
@Override public void execute(FileSystem fileSystem, Path filePath) throws IOException { Path destPath = new Path(destination, filePath.getName()); LOG.info("Moving file {} to {}", filePath, destPath); boolean success = fileSystem.rename(filePath, destPath); return;/* www . ja va 2s .c o m*/ }
From source file:com.anhth12.lambda.ml.MLUpdate.java
@Override public void runUpdate(JavaSparkContext sparkContext, long timestamp, JavaPairRDD<String, M> newKeyMessageData, JavaPairRDD<String, M> pastKeyMessageData, String modelDirString, TopicProducer<String, String> modelUpdateTopic) throws IOException, InterruptedException { Preconditions.checkNotNull(newKeyMessageData); JavaRDD<M> newData = newKeyMessageData.values(); JavaRDD<M> pastData = pastKeyMessageData == null ? null : pastKeyMessageData.values(); if (newData != null) { newData.cache();/* ww w.ja v a2 s . c o m*/ newData.foreachPartition(Functions.<Iterator<M>>noOp()); } if (pastData != null) { pastData.cache(); pastData.foreachPartition(Functions.<Iterator<M>>noOp()); } List<HyperParamValues<?>> hyperParamValues = getHyperParamValues(); int valuesPerHyperParam = HyperParams.chooseValuesPerHyperParam(hyperParamValues.size(), candidates); List<List<?>> hyperParameterCombos = HyperParams.chooseHyperParameterCombos(hyperParamValues, candidates, valuesPerHyperParam); FileSystem fs = FileSystem.get(sparkContext.hadoopConfiguration()); Path modelDir = new Path(modelDirString); Path tempModelPath = new Path(modelDir, ".temporary"); Path candiatesPath = new Path(tempModelPath, Long.toString(System.currentTimeMillis())); fs.mkdirs(candiatesPath); Path bestCandidatePath = findBestCandidatePath(sparkContext, newData, pastData, hyperParameterCombos, candiatesPath); Path finalPath = new Path(modelDir, Long.toString(System.currentTimeMillis())); if (bestCandidatePath == null) { log.info("Unable to build any model"); } else { fs.rename(bestCandidatePath, finalPath); } fs.delete(candiatesPath, true); Path bestModelPath = new Path(finalPath, MODEL_FILE_NAME); if (fs.exists(bestModelPath)) { PMML bestModel; try (InputStream in = new GZIPInputStream(fs.open(finalPath), 1 << 16)) { bestModel = PMMLUtils.read(in); } modelUpdateTopic.send("MODEL", PMMLUtils.toString(bestModel)); publishAdditionalModelData(sparkContext, bestModel, newData, pastData, candiatesPath, modelUpdateTopic); } if (newData != null) { newData.unpersist(); } if (pastData != null) { pastData.unpersist(); } }
From source file:com.architecting.ch07.MapReduceIndexerTool.java
License:Apache License
private boolean rename(Path src, Path dst, FileSystem fs) throws IOException { boolean success = fs.rename(src, dst); if (!success) { LOG.error("Cannot rename " + src + " to " + dst); }/*from ww w. ja va2 s. c om*/ return success; }
From source file:com.asakusafw.runtime.directio.hadoop.HadoopDataSourceUtil.java
License:Apache License
private static void move(Counter counter, FileSystem fromFs, Path from, FileSystem toFs, Path to, boolean fromLocal) throws IOException { if (counter == null) { throw new IllegalArgumentException("counter must not be null"); //$NON-NLS-1$ }/*from w w w. jav a 2 s .co m*/ if (fromFs == null) { throw new IllegalArgumentException("fromFs must not be null"); //$NON-NLS-1$ } if (from == null) { throw new IllegalArgumentException("from must not be null"); //$NON-NLS-1$ } if (toFs == null) { throw new IllegalArgumentException("toFs must not be null"); //$NON-NLS-1$ } if (to == null) { throw new IllegalArgumentException("to must not be null"); //$NON-NLS-1$ } if (fromLocal && isLocalPath(from) == false) { throw new IllegalArgumentException("from must be on local file system"); //$NON-NLS-1$ } if (LOG.isDebugEnabled()) { LOG.debug(MessageFormat.format("Start moving files (from={0}, to={1})", //$NON-NLS-1$ from, to)); } Path source = fromFs.makeQualified(from); Path target = toFs.makeQualified(to); List<Path> list = createFileListRelative(counter, fromFs, source); if (list.isEmpty()) { return; } if (LOG.isDebugEnabled()) { LOG.debug(MessageFormat.format("Process moving files (from={0}, to={1}, count={2})", //$NON-NLS-1$ from, to, list.size())); } Set<Path> directoryCreated = new HashSet<>(); for (Path path : list) { Path sourceFile = new Path(source, path); Path targetFile = new Path(target, path); if (LOG.isTraceEnabled()) { FileStatus stat = fromFs.getFileStatus(sourceFile); LOG.trace(MessageFormat.format("Moving file (from={0}, to={1}, size={2})", //$NON-NLS-1$ sourceFile, targetFile, stat.getLen())); } try { FileStatus stat = toFs.getFileStatus(targetFile); if (LOG.isDebugEnabled()) { LOG.debug(MessageFormat.format("Deleting file: {0}", //$NON-NLS-1$ targetFile)); } if (FileSystemCompatibility.isDirectory(stat)) { toFs.delete(targetFile, true); } else { toFs.delete(targetFile, false); } } catch (FileNotFoundException e) { Path targetParent = targetFile.getParent(); if (directoryCreated.contains(targetParent) == false) { if (LOG.isDebugEnabled()) { LOG.debug(MessageFormat.format("Creating directory: {0}", //$NON-NLS-1$ targetParent)); } toFs.mkdirs(targetParent); directoryCreated.add(targetParent); } } counter.add(1); if (fromLocal) { toFs.moveFromLocalFile(sourceFile, targetFile); } else { boolean succeed = toFs.rename(sourceFile, targetFile); if (succeed == false) { throw new IOException( MessageFormat.format("Failed to move file (from={0}, to={1})", sourceFile, targetFile)); } } counter.add(1); } if (LOG.isDebugEnabled()) { LOG.debug(MessageFormat.format("Finish moving files (from={0}, to={1}, count={2})", //$NON-NLS-1$ from, to, list.size())); } }