Example usage for org.apache.hadoop.fs FileSystem rename

List of usage examples for org.apache.hadoop.fs FileSystem rename

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem rename.

Prototype

public abstract boolean rename(Path src, Path dst) throws IOException;

Source Link

Document

Renames Path src to Path dst.

Usage

From source file:org.springframework.data.hadoop.store.support.OutputStoreObjectSupport.java

License:Apache License

/**
 * Rename file using prefix and suffix settings.
 *
 * @param path the path to rename//from  ww  w  . j av a2 s.  c  o m
 */
protected void renameFile(Path path) {
    // bail out if there's no in-writing settings
    if (!StringUtils.hasText(prefix) && !StringUtils.hasText(suffix)) {
        return;
    }
    String name = path.getName();
    if (StringUtils.startsWithIgnoreCase(name, prefix)) {
        name = name.substring(prefix.length());
    }
    if (StringUtils.endsWithIgnoreCase(name, suffix)) {
        name = name.substring(0, name.length() - suffix.length());
    }
    Path toPath = new Path(path.getParent(), name);
    try {
        FileSystem fs = path.getFileSystem(getConfiguration());

        boolean succeed;
        try {
            fs.delete(toPath, false);
            succeed = fs.rename(path, toPath);
        } catch (Exception e) {
            throw new StoreException("Failed renaming from " + path + " to " + toPath, e);
        }
        if (!succeed) {
            throw new StoreException(
                    "Failed renaming from " + path + " to " + toPath + " because hdfs returned false");
        }
    } catch (IOException e) {
        log.error("Error renaming file", e);
        throw new StoreException("Error renaming file", e);
    }
}

From source file:org.starschema.hadoop.yarn.applications.distributedshell.ApplicationMaster.java

License:Apache License

private void renameScriptFile(final Path renamedScriptPath) throws IOException, InterruptedException {
    appSubmitterUgi.doAs(new PrivilegedExceptionAction<Void>() {
        @Override/*ww w. jav a 2  s.  c o m*/
        public Void run() throws IOException {
            FileSystem fs = new Path(scriptPath).getFileSystem(conf);
            fs.rename(new Path(scriptPath), renamedScriptPath);
            return null;
        }
    });
    LOG.info("User " + appSubmitterUgi.getUserName() + " added suffix to file as " + renamedScriptPath);
}

From source file:org.starschema.hadoop.yarn.applications.distributedshell.ApplicationMaster.java

License:Apache License

private void renameHazelFile(final Path renamedHazelPath) throws IOException, InterruptedException {
    appSubmitterUgi.doAs(new PrivilegedExceptionAction<Void>() {
        @Override//from  w ww  .j  av a 2 s.  c  o  m
        public Void run() throws IOException {
            FileSystem fs = new Path(hazelPath).getFileSystem(conf);
            fs.rename(new Path(hazelPath), renamedHazelPath);
            return null;
        }
    });
    LOG.info("User " + appSubmitterUgi.getUserName() + " added suffix to file as " + renamedHazelPath);
}

From source file:org.terrier.utility.io.HadoopPlugin.java

License:Mozilla Public License

/** Initialises the Plugin, by connecting to the distributed file system */
public void initialise() throws Exception {
    config = getGlobalConfiguration();/*from w w w. jav a2s.  c o  m*/

    final org.apache.hadoop.fs.FileSystem DFS = hadoopFS = org.apache.hadoop.fs.FileSystem.get(config);

    FileSystem terrierDFS = new FileSystem() {
        public String name() {
            return "hdfs";
        }

        /** capabilities of the filesystem */
        public byte capabilities() {
            return FSCapability.READ | FSCapability.WRITE | FSCapability.RANDOM_READ | FSCapability.STAT
                    | FSCapability.DEL_ON_EXIT | FSCapability.LS_DIR;
        }

        public String[] schemes() {
            return new String[] { "dfs", "hdfs" };
        }

        /** returns true if the path exists */
        public boolean exists(String filename) throws IOException {
            if (logger.isDebugEnabled())
                logger.debug("Checking that " + filename + " exists answer=" + DFS.exists(new Path(filename)));
            return DFS.exists(new Path(filename));
        }

        /** open a file of given filename for reading */
        public InputStream openFileStream(String filename) throws IOException {
            if (logger.isDebugEnabled())
                logger.debug("Opening " + filename);
            return DFS.open(new Path(filename));
        }

        /** open a file of given filename for writing */
        public OutputStream writeFileStream(String filename) throws IOException {
            if (logger.isDebugEnabled())
                logger.debug("Creating " + filename);
            return DFS.create(new Path(filename));
        }

        public boolean mkdir(String filename) throws IOException {
            return DFS.mkdirs(new Path(filename));
        }

        public RandomDataOutput writeFileRandom(String filename) throws IOException {
            throw new IOException("HDFS does not support random writing");
        }

        public RandomDataInput openFileRandom(String filename) throws IOException {
            return new HadoopFSRandomAccessFile(DFS, filename);
        }

        public boolean delete(String filename) throws IOException {
            return DFS.delete(new Path(filename), true);
        }

        public boolean deleteOnExit(String filename) throws IOException {
            return DFS.deleteOnExit(new Path(filename));
        }

        public String[] list(String path) throws IOException {
            final FileStatus[] contents = DFS.listStatus(new Path(path));
            if (contents == null)
                throw new FileNotFoundException("Cannot list path " + path);
            final String[] names = new String[contents.length];
            for (int i = 0; i < contents.length; i++) {
                names[i] = contents[i].getPath().getName();
            }
            return names;
        }

        public String getParent(String path) throws IOException {
            return new Path(path).getParent().getName();
        }

        public boolean rename(String source, String destination) throws IOException {
            return DFS.rename(new Path(source), new Path(destination));
        }

        public boolean isDirectory(String path) throws IOException {
            return DFS.getFileStatus(new Path(path)).isDir();
        }

        public long length(String path) throws IOException {
            return DFS.getFileStatus(new Path(path)).getLen();
        }

        public boolean canWrite(String path) throws IOException {
            return DFS.getFileStatus(new Path(path)).getPermission().getUserAction().implies(FsAction.WRITE);
        }

        public boolean canRead(String path) throws IOException {
            return DFS.getFileStatus(new Path(path)).getPermission().getUserAction().implies(FsAction.READ);
        }
    };
    Files.addFileSystemCapability(terrierDFS);
}

From source file:org.trafodion.sql.HBaseAccess.SequenceFileWriter.java

License:Apache License

public boolean hdfsMergeFiles(String srcPathStr, String dstPathStr) throws Exception {
    if (logger.isDebugEnabled())
        logger.debug("SequenceFileWriter.hdfsMergeFiles() - start");
    if (logger.isDebugEnabled())
        logger.debug("SequenceFileWriter.hdfsMergeFiles() - source Path: " + srcPathStr + ", destination File:"
                + dstPathStr);/*from   w  w w  . ja va 2s .  c o m*/
    try {
        Path srcPath = new Path(srcPathStr);
        srcPath = srcPath.makeQualified(srcPath.toUri(), null);
        FileSystem srcFs = FileSystem.get(srcPath.toUri(), conf);

        Path dstPath = new Path(dstPathStr);
        dstPath = dstPath.makeQualified(dstPath.toUri(), null);
        FileSystem dstFs = FileSystem.get(dstPath.toUri(), conf);

        if (dstFs.exists(dstPath)) {
            if (logger.isDebugEnabled())
                logger.debug("SequenceFileWriter.hdfsMergeFiles() - destination files exists");
            // for this prototype we just delete the file-- will change in next code drops
            dstFs.delete(dstPath, false);
            // The caller should already have checked existence of file-- throw exception 
            //throw new FileAlreadyExistsException(dstPath.toString());
        }

        Path tmpSrcPath = new Path(srcPath, "tmp");

        FileSystem.mkdirs(srcFs, tmpSrcPath, srcFs.getFileStatus(srcPath).getPermission());
        logger.debug("SequenceFileWriter.hdfsMergeFiles() - tmp folder created.");
        Path[] files = FileUtil.stat2Paths(srcFs.listStatus(srcPath));
        for (Path f : files) {
            srcFs.rename(f, tmpSrcPath);
        }
        // copyMerge and use false for the delete option since it removes the whole directory
        if (logger.isDebugEnabled())
            logger.debug("SequenceFileWriter.hdfsMergeFiles() - copyMerge");
        FileUtil.copyMerge(srcFs, tmpSrcPath, dstFs, dstPath, false, conf, null);

        if (logger.isDebugEnabled())
            logger.debug("SequenceFileWriter.hdfsMergeFiles() - delete intermediate files");
        srcFs.delete(tmpSrcPath, true);
    } catch (IOException e) {
        if (logger.isDebugEnabled())
            logger.debug("SequenceFileWriter.hdfsMergeFiles() --exception:" + e);
        throw e;
    }

    return true;
}

From source file:org.wso2.carbon.hdfs.mgt.HDFSAdmin.java

License:Open Source License

/**
 * Rename file or a folder using source and the destination of the give FS
 * Object//from w w  w .  j a v a2  s  .c  om
 * 
 * @param srcPath
 *            Current path and the file name of the file to be renamed
 * @param dstPath
 *            new pathe and the file name
 * @return success if rename is successful
 * @throws HDFSServerManagementException
 */

public boolean renameFile(String srcPath, String dstPath) throws HDFSServerManagementException {

    FsPermission fp = HDFSConstants.DEFAULT_FILE_PERMISSION;
    FileSystem hdfsFS = null;
    Path src = new Path(srcPath);
    Path dest = new Path(dstPath);
    boolean fileExists = false;
    try {
        hdfsFS = hdfsAdminHelperInstance.getFSforUser();
    } catch (IOException e) {
        String msg = "Error occurred while trying to mount file system.";
        handleException(msg, e);
    }
    try {
        if (hdfsFS != null && !hdfsFS.exists(dest)) {
            hdfsFS.rename(src, dest);
            hdfsFS.setPermission(dest, fp);
        } else {
            fileExists = true;
        }
    } catch (IOException e) {
        String msg = "Error occurred while trying to rename file.";
        handleException(msg, e);
    }
    handleItemExistState(fileExists, true, false);
    return false;
}

From source file:org.wso2.carbon.hdfs.mgt.HDFSAdmin.java

License:Open Source License

/**
 * Rename file or a folder using source and the destination of the give FS
 * Object//from  w w w.  j a v a  2s.  com
 * 
 * @param srcPath
 *            Current path and the file name of the file to be renamed
 * @param dstPath
 *            new pathe and the file name
 * @return success if rename is successful
 * @throws HDFSServerManagementException
 */

public boolean renameFolder(String srcPath, String dstPath) throws HDFSServerManagementException {

    FsPermission fp = HDFSConstants.DEFAULT_FILE_PERMISSION;
    FileSystem hdfsFS = null;
    boolean isFolderExists = false;
    try {
        hdfsFS = hdfsAdminHelperInstance.getFSforUser();
    } catch (IOException e) {
        String msg = "Error occurred while trying to mount file system.";
        handleException(msg, e);
    }
    try {
        if (hdfsFS != null && !hdfsFS.exists(new Path(dstPath))) {
            hdfsFS.rename(new Path(srcPath), new Path(dstPath));
            return true;
        } else {
            isFolderExists = true;
        }
    } catch (IOException e) {
        String msg = "Error occurred while trying to rename folder.";
        handleException(msg, e);
    }
    handleItemExistState(isFolderExists, true, true);
    return false;
}

From source file:org.wso2.carbon.hdfs.mgt.HDFSAdmin.java

License:Open Source License

public boolean moveFile(String srcPath, String dstPath) throws HDFSServerManagementException {

    FsPermission fp = HDFSConstants.DEFAULT_FILE_PERMISSION;
    FileSystem hdfsFS = null;
    try {/*from w w  w  .j  a v a  2s .co  m*/
        hdfsFS = hdfsAdminHelperInstance.getFSforUser();
    } catch (IOException e) {
        String msg = "Error occurred while trying to mount file system.";
        handleException(msg, e);
    }

    try {
        if (hdfsFS != null) {
            hdfsFS.rename(new Path(srcPath), new Path(dstPath));
            hdfsFS.setPermission(new Path(dstPath), fp);
            return true;
        }

    } catch (IOException e) {
        String msg = "Error occurred while trying to move file.";
        handleException(msg, e);
    }

    return false;
}

From source file:org.wso2.carbon.logging.summarizer.scriptCreator.OutputFileHandler.java

License:Apache License

public void fileReStructure(String colFamilyName) throws IOException {
    log.info("CF " + colFamilyName);

    Configuration conf = new Configuration(false);
    /**/* w  w  w . ja  v a  2  s.  co m*/
     * Create HDFS Client configuration to use name node hosted on host master and port 9000.
     * Client configured to connect to a remote distributed file system.
     */
    conf.set("fs.default.name", hdfsConfig);
    conf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");

    /**
     * Get connection to remote file sytem
     */
    FileSystem fs = FileSystem.get(conf);

    /**
     * Crate file sourcePath object
     */
    Path filePath = new Path(archivedLogLocation);

    String tmpStrArr[] = colFamilyName.split("_");
    String tenantId = tmpStrArr[1];
    String serverName = tmpStrArr[2];

    String createdDate = tmpStrArr[3] + "_" + tmpStrArr[4] + "_" + tmpStrArr[5];
    String directoryPathName = archivedLogLocation + tenantId + "/" + serverName + "/";
    String filePathName = directoryPathName + createdDate;
    log.info("filePathName " + filePathName);
    log.info("createdDate " + createdDate);
    //Rename the 000000_0 file as a .tmp file
    Path sourceFileName = new Path(filePathName + "/000000_0");
    Path destnFileName = new Path(filePathName + "/" + createdDate + ".tmp");

    boolean isRenamed = fs.rename(sourceFileName, destnFileName);
    log.info("rename " + isRenamed);

    /*if (!isRenamed) {
    String path = sourceFileName.toString();
    FileStatus[] status = fs.listStatus(new Path("/stratos/archivedLogs/212/")); // you need to
    log.info(status);
    if (status != null) {
        for (int i = 0; i < status.length; i++) {
            log.info("X:" + status[i].getPath());
        }
    } else {
        log.info("Null");
    }
    // in your hdfs path
    }*/

    //To remove the unicode character in the created .tmp file
    if (isRenamed) {
        Path sanitizedFileName = new Path(filePathName + "/" + createdDate + ".log");
        replaceChar(destnFileName, sanitizedFileName, fs);

        log.info("Logs of Tenant " + tenantId + " of " + serverName + " on " + createdDate
                + " are successfully archived");
    } else {
        log.info("Logs of Tenant " + tenantId + " of " + serverName + " on " + createdDate
                + " are not ******* successfully archived");

    }

}

From source file:pad.InitializationDriver.java

License:Apache License

/**
 * Execute the InitializationDriver Job.
 * //w  w w .  j  a  v a  2  s .  co  m
 * If the input file format is adjacency list, then we can easily determinate the initial number of nodes
 * that is equal to the number of rows of the input file while the number of cliques is zero.
 * In order to obtain a list of arcs from the adjacency list, we use the \see InitializationMapperAdjacent
 * as Mapper and zero Reducer.
 * 
 * If the input file format is cliques list, then we can easily determinate the number of cliques
 * that is equal to the number of rows of the input file.
 * In order to obtain a edges list from the cliques list, we use the \see InitializationMapperClique
 * as Mapper. We store this result into a special folder \see MOS_OUTPUT_NAME.
 * Into the regular folder, this Mapper emits all the encountered nodes.
 * We use \see InitializationReducerNumNodes as Reducer in order to count the initial number of nodes
 * counting all the distinct nodes found. The combiner (\see InitializationCombinerNumNodes) reduce locally
 * the number of duplicated nodes.
 * Obtained the value of the NUM_INITIAL_NODES counter ( \see UtilCounters ), we delete the empty files
 * produced by the Reducer and we move the real results into the main/regular folder.
 * 
 * @param args      array of external arguments, not used in this method
 * @return          <c>1</c> if the InitializationDriver Job failed its execution; <c>0</c> if everything is ok. 
 * @throws Exception 
 */
public int run(String[] args) throws Exception {
    Configuration conf = new Configuration();
    // GenericOptionsParser invocation in order to suppress the hadoop warning.
    new GenericOptionsParser(conf, args);
    Job job = new Job(conf, "InitializationDriver");
    job.setJarByClass(InitializationDriver.class);

    job.setOutputKeyClass(IntWritable.class);
    job.setOutputValueClass(IntWritable.class);

    job.setInputFormatClass(TextInputFormat.class);
    job.setOutputFormatClass(SequenceFileOutputFormat.class);

    FileInputFormat.addInputPath(job, this.input);
    FileOutputFormat.setOutputPath(job, this.output);

    if (this.type == InputType.ADJACENCY_LIST) {
        // In order to obtain the arcs list from the adjacency list, we need only a Mapper task.
        job.setMapperClass(InitializationMapperAdjacency.class);
        job.setNumReduceTasks(0);
    } else {
        // Set up the special folder.
        MultipleOutputs.addNamedOutput(job, MOS_OUTPUT_NAME, SequenceFileOutputFormat.class, IntWritable.class,
                IntWritable.class);
        MultipleOutputs.setCountersEnabled(job, true);
        // In order to obtain the edges list from the cliques list, we need only a Mapper task
        // and we save the result into the special folder.
        // Then, we need a Reducer task in order to count the initial number of nodes
        job.setMapperClass(InitializationMapperClique.class);
        job.setCombinerClass(InitializationCombinerNumNodes.class);
        job.setReducerClass(InitializationReducerNumNodes.class);
    }

    if (!job.waitForCompletion(verbose))
        return 1;

    // Set up the private variables looking to the counters value
    this.numCliques = job.getCounters().findCounter(UtilCounters.NUM_CLIQUES).getValue();
    this.numInitialNodes = job.getCounters().findCounter(UtilCounters.NUM_INITIAL_NODES).getValue();

    if (this.type == InputType.CLIQUES_LIST) {
        FileSystem fs = FileSystem.get(conf);

        // Delete the empty outputs of the Job
        FileStatus[] filesStatus = fs.listStatus(this.output);
        for (FileStatus fileStatus : filesStatus)
            if (fileStatus.getPath().getName().contains("part"))
                fs.delete(fileStatus.getPath(), false);

        // Move the real outputs into the parent folder
        filesStatus = fs.listStatus(this.output.suffix("/" + MOS_OUTPUT_NAME));
        for (FileStatus fileStatus : filesStatus)
            fs.rename(fileStatus.getPath(), this.output.suffix("/" + fileStatus.getPath().getName()));

        // Delete empty special folder
        fs.delete(this.output.suffix("/" + MOS_OUTPUT_NAME), true);
    }

    return 0;
}