Example usage for org.apache.hadoop.fs FileSystem delete

List of usage examples for org.apache.hadoop.fs FileSystem delete

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem delete.

Prototype

public abstract boolean delete(Path f, boolean recursive) throws IOException;

Source Link

Document

Delete a file.

Usage

From source file:com.asakusafw.testdriver.file.FileExporterRetriever.java

License:Apache License

@Override
public void truncate(FileExporterDescription description, TestContext context) throws IOException {
    LOG.info("cleaning output files: {}", description);
    VariableTable variables = createVariables(context);
    Configuration config = configurations.newInstance();
    String resolved = variables.parse(description.getPathPrefix(), false);
    Path path = new Path(resolved);
    FileSystem fs = path.getFileSystem(config);
    Path output = path.getParent();
    Path target;/*from  ww w.  j  a va  2s .  co m*/
    if (output == null) {
        LOG.warn(
                "?????????: {}",
                path);
        target = fs.makeQualified(path);
    } else {
        LOG.warn("??????: {}", output);
        target = fs.makeQualified(output);
    }
    LOG.debug("start removing file: {}", target);
    boolean succeed = fs.delete(target, true);
    LOG.debug("finish removing file (succeed={}): {}", succeed, target);
}

From source file:com.asakusafw.testdriver.file.FileImporterPreparator.java

License:Apache License

@Override
public void truncate(FileImporterDescription description, TestContext context) throws IOException {
    LOG.info("cleaning input files: {}", description);
    VariableTable variables = createVariables(context);
    Configuration config = configurations.newInstance();
    FileSystem fs = FileSystem.get(config);
    for (String path : description.getPaths()) {
        String resolved = variables.parse(path, false);
        Path target = fs.makeQualified(new Path(resolved));
        LOG.debug("start removing file: {}", target);
        boolean succeed = fs.delete(target, true);
        LOG.debug("finish removing file (succeed={}): {}", succeed, target);
    }// w  w  w  .  j  a v  a 2  s. co m
}

From source file:com.asakusafw.testdriver.JobflowExecutor.java

License:Apache License

/**
 * Cleans up the working directory on the DFS.
 * @throws IOException if failed to clean up
 *//*ww w  . j  a  v a 2 s.c o  m*/
public void cleanWorkingDirectory() throws IOException {
    Configuration conf = configurations.newInstance();
    FileSystem fs = FileSystem.get(conf);
    Path path = new Path(CompilerConstants.getRuntimeWorkingDirectory());
    Path fullPath = fs.makeQualified(path);
    LOG.debug("start initializing working directory on the testing runtime: {}", fullPath); //$NON-NLS-1$
    boolean deleted = fs.delete(fullPath, true);
    if (deleted) {
        LOG.debug("finish initializing working directory on the testing runtime: {}", fullPath); //$NON-NLS-1$
    } else {
        LOG.debug("failed to initialize working directory on the testing runtime: {}", fullPath); //$NON-NLS-1$
    }
}

From source file:com.asakusafw.testdriver.LegacyJobflowExecutor.java

License:Apache License

/**
 * Cleans up the working directory on the DFS.
 * @throws IOException if failed to clean up
 *///from ww  w.java2 s. co  m
public void cleanWorkingDirectory() throws IOException {
    Configuration conf = configurations.newInstance();
    FileSystem fs = FileSystem.get(conf);
    Path path = new Path(context.getClusterWorkDir());
    Path fullPath = fs.makeQualified(path);
    LOG.debug("start initializing working directory on the testing runtime: {}", fullPath); //$NON-NLS-1$
    boolean deleted = fs.delete(fullPath, true);
    if (deleted) {
        LOG.debug("finish initializing working directory on the testing runtime: {}", fullPath); //$NON-NLS-1$
    } else {
        LOG.debug("failed to initialize working directory on the testing runtime: {}", fullPath); //$NON-NLS-1$
    }
}

From source file:com.asakusafw.testdriver.mapreduce.io.TemporaryInputPreparator.java

License:Apache License

static void delete(FileSystem fs, Path target) throws IOException {
    FileStatus[] stats = fs.globStatus(target);
    if (stats == null || stats.length == 0) {
        return;/*w ww  .j  a v  a2 s .c o  m*/
    }
    for (FileStatus s : stats) {
        Path path = s.getPath();
        LOG.debug("deleting file: {}", path); //$NON-NLS-1$
        boolean succeed = fs.delete(path, true);
        LOG.debug("deleted file (succeed={}): {}", succeed, path); //$NON-NLS-1$
    }
}

From source file:com.asakusafw.testdriver.testing.moderator.MockExporterRetriever.java

License:Apache License

@Override
public void truncate(MockExporterDescription description, TestContext context) throws IOException {
    LOG.debug("deleting output directory: {}", description); //$NON-NLS-1$
    Configuration config = configurations.newInstance();
    FileSystem fs = FileSystem.get(config);
    Path path = new Path(description.getGlob());
    try {/*from w  w  w.  jav a  2 s . c  o  m*/
        FileStatus[] stats = fs.globStatus(path);
        for (FileStatus s : stats) {
            fs.delete(s.getPath(), false);
        }
    } catch (IOException e) {
        LOG.debug("exception in truncate", e);
    }
}

From source file:com.asakusafw.testdriver.testing.moderator.MockImporterPreparator.java

License:Apache License

@Override
public void truncate(MockImporterDescription description, TestContext context) throws IOException {
    Configuration config = configurations.newInstance();
    FileSystem fs = FileSystem.get(config);
    Path target = fs.makeQualified(new Path(description.getDirectory()));
    if (fs.exists(target)) {
        fs.delete(target, true);
    }/*from  ww w .  j  ava 2 s  .  c  o m*/
}

From source file:com.asakusafw.windgate.hadoopfs.ssh.WindGateHadoopDelete.java

License:Apache License

private void doDelete(FileSystem fs, FileStatus status, FileList.Writer drain) throws IOException {
    assert fs != null;
    assert status != null;
    assert drain != null;
    WGLOG.info("I22004", fs.getUri(), status.getPath());
    try (OutputStream output = drain.openNext(status.getPath())) {
        String failReason = null;
        try {// w  ww .j ava2 s  .c o m
            boolean deleted;
            if (RuntimeContext.get().isSimulation()) {
                deleted = true;
            } else {
                deleted = fs.delete(status.getPath(), true);
            }
            if (deleted == false) {
                if (fs.exists(status.getPath())) {
                    WGLOG.warn("W22001", fs.getUri(), status.getPath());
                    failReason = "Unknown";
                }
            }
        } catch (IOException e) {
            WGLOG.warn(e, "W22001", fs.getUri(), status.getPath());
            failReason = e.toString();
        }
        if (failReason != null) {
            output.write(failReason.getBytes(UTF8));
        }
    }
}

From source file:com.asakusafw.workflow.hadoop.HadoopDelete.java

License:Apache License

private static void delete(Configuration conf, Path path) throws IOException {
    FileSystem fs = path.getFileSystem(conf);
    if (LOG.isDebugEnabled()) {
        LOG.debug("deleting file: {}", fs.makeQualified(path));
    }//from www  .ja va2  s .  co m
    boolean deleted = fs.delete(path, true);
    if (LOG.isDebugEnabled()) {
        if (deleted) {
            LOG.debug("delete success: {}", fs.makeQualified(path));
        } else if (fs.exists(path)) {
            LOG.debug("delete failed: {}", fs.makeQualified(path));
        } else {
            LOG.debug("target file is not found: {}", fs.makeQualified(path));
        }
    }
}

From source file:com.awcoleman.StandaloneJava.AvroCombinerByBlock.java

License:Apache License

public AvroCombinerByBlock(String inDirStr, String outDirStr, String handleExisting) throws IOException {

    //handle both an output directory and an output filename (ending with .avro)
    String outputFilename = DEFAULTOUTPUTFILENAME;
    if (outDirStr.endsWith(".avro")) {
        isOutputNameSpecifiedAndAFile = true;
        //String[] outputParts = outDirStr.split(":?\\\\");
        String[] outputParts = outDirStr.split("/");

        outputFilename = outputParts[outputParts.length - 1];

        //remove outputFilename from outDirStr to get new outDirStr which is just directory (and trailing /)
        outDirStr = outDirStr.replaceAll(Pattern.quote(outputFilename), "");
        outDirStr = outDirStr.substring(0, outDirStr.length() - (outDirStr.endsWith("/") ? 1 : 0));
    }//  w ww  .j ava  2s  . c  om

    //Get block size - not needed
    //long hdfsBlockSize = getBlockSize();
    //System.out.println("HDFS FS block size: "+hdfsBlockSize);

    //Get list of input files
    ArrayList<FileStatus> inputFileList = new ArrayList<FileStatus>();

    Configuration conf = new Configuration();
    conf.addResource(new Path("/etc/hadoop/conf/core-site.xml"));
    conf.set("dfs.replication", "1"); //see http://stackoverflow.com/questions/24548699/how-to-append-to-an-hdfs-file-on-an-extremely-small-cluster-3-nodes-or-less

    FileSystem hdfs = null;
    try {
        hdfs = FileSystem.get(conf);
    } catch (java.io.IOException ioe) {
        System.out.println("Error opening HDFS filesystem. Exiting. Error message: " + ioe.getMessage());
        System.exit(1);
    }
    if (hdfs.getStatus() == null) {
        System.out.println("Unable to contact HDFS filesystem. Exiting.");
        System.exit(1);
    }

    //Check if input and output dirs exist
    Path inDir = new Path(inDirStr);
    Path outDir = new Path(outDirStr);
    if (!(hdfs.exists(inDir) || hdfs.isDirectory(inDir))) {
        System.out.println("Input directory ( " + inDirStr + " ) not found or is not directory. Exiting.");
        System.exit(1);
    }

    if (!(hdfs.exists(outDir) || hdfs.isDirectory(outDir))) {
        if (hdfs.exists(outDir)) { //outDir exists and is a symlink or file, must die
            System.out.println("Requested output directory name ( " + outDirStr
                    + " ) exists but is not a directory. Exiting.");
            System.exit(1);
        } else {
            hdfs.mkdirs(outDir);
        }
    }

    RemoteIterator<LocatedFileStatus> fileStatusListIterator = hdfs.listFiles(inDir, true);
    while (fileStatusListIterator.hasNext()) {
        LocatedFileStatus fileStatus = fileStatusListIterator.next();

        if (fileStatus.isFile() && !fileStatus.getPath().getName().equals("_SUCCESS")) {
            inputFileList.add((FileStatus) fileStatus);
        }
    }

    if (inputFileList.size() <= 1 && !isOutputNameSpecifiedAndAFile) { //If an output file is specified assume we just want a rename.
        System.out.println("Only one or zero files found in input directory ( " + inDirStr + " ). Exiting.");
        System.exit(1);
    }

    //Get Schema and Compression Codec from seed file since we need it for the writer
    Path firstFile = inputFileList.get(0).getPath();
    FsInput fsin = new FsInput(firstFile, conf);
    DataFileReader<Object> dfrFirstFile = new DataFileReader<Object>(fsin, new GenericDatumReader<Object>());
    Schema fileSchema = dfrFirstFile.getSchema();
    String compCodecName = dfrFirstFile.getMetaString("avro.codec");
    //compCodecName should be null, deflate, snappy, or bzip2
    if (compCodecName == null) {
        compCodecName = "deflate"; //set to deflate even though original is no compression
    }
    dfrFirstFile.close();

    //Create Empty HDFS file in output dir
    String seedFileStr = outDirStr + "/" + outputFilename;
    Path seedFile = new Path(seedFileStr);
    FSDataOutputStream hdfsdos = null;
    try {
        hdfsdos = hdfs.create(seedFile, false);
    } catch (org.apache.hadoop.fs.FileAlreadyExistsException faee) {
        if (handleExisting.equals("overwrite")) {
            hdfs.delete(seedFile, false);
            hdfsdos = hdfs.create(seedFile, false);
        } else if (handleExisting.equals("append")) {
            hdfsdos = hdfs.append(seedFile);
        } else {
            System.out
                    .println("File " + seedFileStr + " exists and will not overwrite. handleExisting is set to "
                            + handleExisting + ". Exiting.");
            System.exit(1);
        }
    }
    if (hdfsdos == null) {
        System.out.println("Unable to create or write to output file ( " + seedFileStr
                + " ). handleExisting is set to " + handleExisting + ". Exiting.");
        System.exit(1);
    }

    //Append other files
    GenericDatumWriter gdw = new GenericDatumWriter(fileSchema);
    DataFileWriter dfwBase = new DataFileWriter(gdw);
    //Set compression to that found in the first file
    dfwBase.setCodec(CodecFactory.fromString(compCodecName));

    DataFileWriter dfw = dfwBase.create(fileSchema, hdfsdos);
    for (FileStatus thisFileStatus : inputFileList) {

        //_SUCCESS files are 0 bytes
        if (thisFileStatus.getLen() == 0) {
            continue;
        }

        FsInput fsin1 = new FsInput(thisFileStatus.getPath(), conf);
        DataFileReader dfr = new DataFileReader<Object>(fsin1, new GenericDatumReader<Object>());

        dfw.appendAllFrom(dfr, false);

        dfr.close();
    }

    dfw.close();
    dfwBase.close();

}