Example usage for org.apache.hadoop.fs FileSystem deleteOnExit

List of usage examples for org.apache.hadoop.fs FileSystem deleteOnExit

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem deleteOnExit.

Prototype

Set deleteOnExit

To view the source code for org.apache.hadoop.fs FileSystem deleteOnExit.

Click Source Link

Document

A cache of files that should be deleted when the FileSystem is closed or the JVM is exited.

Usage

From source file:org.apache.pirk.utils.HDFS.java

License:Apache License

public static void writeFileIntegers(List<Integer> elements, FileSystem fs, Path path, boolean deleteOnExit) {
    try {/*www  .ja  va2  s  .c  o m*/
        // create writer
        BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(fs.create(path, true)));

        // write each element on a new line
        for (Integer element : elements) {
            bw.write(element.toString());
            bw.newLine();
        }
        bw.close();

        // delete file once the filesystem is closed
        if (deleteOnExit) {
            fs.deleteOnExit(path);
        }
    } catch (IOException e) {
        e.printStackTrace();
    }

}

From source file:org.apache.tajo.engine.query.TestCreateIndex.java

License:Apache License

private static void assertIndexNotExist(String databaseName, String indexName) throws IOException {
    Path indexPath = new Path(conf.getVar(ConfVars.WAREHOUSE_DIR), databaseName + "/" + indexName);
    FileSystem fs = indexPath.getFileSystem(conf);
    if (fs.exists(indexPath)) {
        fs.deleteOnExit(indexPath);
        assertFalse("Index is not deleted from the file system.", true);
    }//from  w w  w.j a v  a  2s.  co  m
}

From source file:org.culturegraph.mf.cluster.job.merge.Union.java

License:Apache License

private String makeTmp() throws IOException {
    final String temporary = "tmp/unionfind/" + UUID.randomUUID() + "/";
    final Path tmp = new Path(temporary);
    final FileSystem fileSys = tmp.getFileSystem(getConf());
    fileSys.mkdirs(tmp);/* w ww .j a v  a  2 s  .  com*/
    fileSys.deleteOnExit(tmp);
    return temporary;
}

From source file:org.culturegraph.mf.cluster.util.MapReduceUtil.java

License:Apache License

public static String makeTmp(final Configuration configuration) throws IOException {
    final String temporary = "tmp/" + UUID.randomUUID() + "/";
    final Path tmp = new Path(temporary);
    final FileSystem fileSystem = tmp.getFileSystem(configuration);
    fileSystem.mkdirs(tmp);/*w  w  w .j  a v  a  2  s .c  o m*/
    fileSystem.deleteOnExit(tmp);
    return temporary;
}

From source file:org.deeplearning4j.hadoop.util.HdfsUtils.java

License:Apache License

public static Path makeTemporaryFile(Configuration jobConf, String filename) throws IOException {
    final int randomKey = jobConf.getInt("terrier.tempfile.id", random.nextInt());
    jobConf.setInt("terrier.tempfile.id", randomKey);
    FileSystem defFS = FileSystem.get(jobConf);
    final Path tempFile = new Path("/tmp/" + (randomKey) + "-" + filename);
    defFS.deleteOnExit(tempFile);
    return tempFile;
}

From source file:org.dutir.lucene.io.HadoopPlugin.java

License:Mozilla Public License

public void initialise() throws Exception {
    config = getGlobalConfiguration();//from w  w  w . j av a  2 s. co m

    final org.apache.hadoop.fs.FileSystem DFS = hadoopFS = org.apache.hadoop.fs.FileSystem.get(config);

    FileSystem terrierDFS = new FileSystem() {
        public String name() {
            return "hdfs";
        }

        /** capabilities of the filesystem */
        public byte capabilities() {
            return FSCapability.READ | FSCapability.WRITE | FSCapability.RANDOM_READ | FSCapability.STAT
                    | FSCapability.DEL_ON_EXIT | FSCapability.LS_DIR;
        }

        public String[] schemes() {
            return new String[] { "dfs", "hdfs" };
        }

        /** returns true if the path exists */
        public boolean exists(String filename) throws IOException {
            if (logger.isDebugEnabled())
                logger.debug("Checking that " + filename + " exists answer=" + DFS.exists(new Path(filename)));
            return DFS.exists(new Path(filename));
        }

        /** open a file of given filename for reading */
        public InputStream openFileStream(String filename) throws IOException {
            if (logger.isDebugEnabled())
                logger.debug("Opening " + filename);
            return DFS.open(new Path(filename));
        }

        /** open a file of given filename for writing */
        public OutputStream writeFileStream(String filename) throws IOException {
            if (logger.isDebugEnabled())
                logger.debug("Creating " + filename);
            return DFS.create(new Path(filename));
        }

        public boolean mkdir(String filename) throws IOException {
            return DFS.mkdirs(new Path(filename));
        }

        public RandomDataOutput writeFileRandom(String filename) throws IOException {
            throw new IOException("HDFS does not support random writing");
        }

        public RandomDataInput openFileRandom(String filename) throws IOException {
            return new HadoopFSRandomAccessFile(DFS, filename);
        }

        public boolean delete(String filename) throws IOException {
            return DFS.delete(new Path(filename), true);
        }

        public boolean deleteOnExit(String filename) throws IOException {
            return DFS.deleteOnExit(new Path(filename));
        }

        public String[] list(String path) throws IOException {
            final FileStatus[] contents = DFS.listStatus(new Path(path));
            final String[] names = new String[contents.length];
            for (int i = 0; i < contents.length; i++) {
                names[i] = contents[i].getPath().getName();
            }
            return names;
        }

        public String getParent(String path) throws IOException {
            return new Path(path).getParent().getName();
        }

        public boolean rename(String source, String destination) throws IOException {
            return DFS.rename(new Path(source), new Path(destination));
        }

        public boolean isDirectory(String path) throws IOException {
            return DFS.getFileStatus(new Path(path)).isDir();
        }

        public long length(String path) throws IOException {
            return DFS.getFileStatus(new Path(path)).getLen();
        }

        public boolean canWrite(String path) throws IOException {
            return DFS.getFileStatus(new Path(path)).getPermission().getUserAction().implies(FsAction.WRITE);
        }

        public boolean canRead(String path) throws IOException {
            return DFS.getFileStatus(new Path(path)).getPermission().getUserAction().implies(FsAction.READ);
        }
    };
    Files.addFileSystemCapability(terrierDFS);
}

From source file:org.dutir.lucene.io.HadoopUtility.java

License:Mozilla Public License

protected static Path makeTemporaryFile(JobConf jobConf, String filename) throws IOException {
    FileSystem defFS = FileSystem.get(jobConf);
    Path tempFile = new Path("/tmp/" + (random.nextInt()) + "-" + filename);
    defFS.deleteOnExit(tempFile);
    return tempFile;
}

From source file:org.dutir.lucene.io.HadoopUtility.java

License:Mozilla Public License

protected static void saveApplicationSetupToJob(JobConf jobConf, boolean getFreshProperties) throws Exception {
    // Do we load a fresh properties File?
    //TODO fix, if necessary
    //if (getFreshProperties)
    //   loadApplicationSetup(new Path(ApplicationSetup.TERRIER_HOME));

    FileSystem remoteFS = FileSystem.get(jobConf);
    URI remoteFSURI = remoteFS.getUri();
    //make a copy of the current application setup properties, these may be amended
    //as some files are more globally accessible
    final Properties propertiesDuringJob = new Properties();
    Properties appProperties = ApplicationSetup.getProperties();
    for (Object _key : appProperties.keySet()) {
        String key = (String) _key;
        propertiesDuringJob.put(key, appProperties.get(key));
    }//from w  ww  .j a  va 2 s . c  o  m

    //the share folder is needed during indexing, save this on DFS
    if (Files.getFileSystemName(ApplicationSetup.LUCENE_SHARE).equals("local")) {
        Path tempTRShare = makeTemporaryFile(jobConf, "terrier.share");
        propertiesDuringJob.setProperty("terrier.share", remoteFSURI.resolve(tempTRShare.toUri()).toString());
        logger.info("Copying terrier share/ directory to shared storage area ("
                + remoteFSURI.resolve(tempTRShare.toUri()).toString() + ")");
        FileUtil.copy(FileSystem.getLocal(jobConf), new Path(ApplicationSetup.LUCENE_SHARE), remoteFS,
                tempTRShare, false, false, jobConf);
    }

    //copy the terrier.properties content over
    Path tempTRProperties = makeTemporaryFile(jobConf, "terrier.properties");
    logger.debug("Writing terrier properties out to DFS " + tempTRProperties.toString());
    OutputStream out = remoteFS.create(tempTRProperties);
    remoteFS.deleteOnExit(tempTRProperties);
    propertiesDuringJob.store(out, "Automatically generated by HadoopPlugin.saveApplicationSetupToJob()");
    out.close();
    out = null;
    DistributedCache.addCacheFile(tempTRProperties.toUri().resolve(new URI("#terrier.properties")), jobConf);
    DistributedCache.createSymlink(jobConf);

    //copy the non-JVM system properties over as well
    Path tempSysProperties = makeTemporaryFile(jobConf, "system.properties");
    DataOutputStream dos = FileSystem.get(jobConf).create(tempSysProperties);
    logger.debug("Writing system properties out to DFS " + tempSysProperties.toString());
    for (Object _propertyKey : System.getProperties().keySet()) {
        String propertyKey = (String) _propertyKey;
        if (!startsWithAny(propertyKey, checkSystemProperties)) {
            dos.writeUTF(propertyKey);
            dos.writeUTF(System.getProperty(propertyKey));
        }
    }
    dos.writeUTF("FIN");
    dos.close();
    dos = null;
    DistributedCache.addCacheFile(tempSysProperties.toUri().resolve(new URI("#system.properties")), jobConf);
}

From source file:org.godhuli.rhipe.FileUtils.java

License:Apache License

public void makeFolderToDelete(String s) throws IOException {
    Path p = new Path(s);
    FileSystem fs = FileSystem.get(cfg);
    fs.mkdirs(p);/*from   w ww .jav a 2 s .c o  m*/
    fs.deleteOnExit(p);
}

From source file:org.kitesdk.tools.JobClasspathHelper.java

License:Apache License

/**
 * @param fs/*  w  w  w .j  ava 2 s.c  o  m*/
 *            File system where to upload the jar.
 * @param localJarPath
 *            The local path where we find the jar.
 * @param md5sum
 *            The MD5 sum of the local jar.
 * @param remoteJarPath
 *            The remote path where to upload the jar.
 * @param remoteMd5Path
 *            The remote path where to create the MD5 file.
 * 
 * @throws IOException
 */
private void copyJarToHDFS(FileSystem fs, Path localJarPath, String md5sum, Path remoteJarPath,
        Path remoteMd5Path) throws IOException {

    LOG.info("Copying {} to {}", localJarPath.toUri().toASCIIString(), remoteJarPath.toUri().toASCIIString());
    fs.copyFromLocalFile(localJarPath, remoteJarPath);
    // create the MD5 file for this jar.
    createMd5SumFile(fs, md5sum, remoteMd5Path);

    // we need to clean the tmp files that are are created by JarFinder after the JVM exits.
    if (remoteJarPath.getName().startsWith(JarFinder.TMP_HADOOP)) {
        fs.deleteOnExit(remoteJarPath);
    }
    // same for the MD5 file.
    if (remoteMd5Path.getName().startsWith(JarFinder.TMP_HADOOP)) {
        fs.deleteOnExit(remoteMd5Path);
    }
}