Example usage for org.apache.hadoop.fs FileSystem delete

List of usage examples for org.apache.hadoop.fs FileSystem delete

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem delete.

Prototype

public abstract boolean delete(Path f, boolean recursive) throws IOException;

Source Link

Document

Delete a file.

Usage

From source file:com.ema.hadoop.test_hdfs.TestWrite.java

public static void main(String[] args) throws IOException, URISyntaxException {

    Configuration configuration = new Configuration();
    FileSystem hdfs = FileSystem.get(new URI("hdfs://localhost:9000"), configuration);
    Path file = new Path("hdfs://localhost:9000/user/student/text_file_write.txt");
    if (hdfs.exists(file)) {
        hdfs.delete(file, true);
    }/*from w w w.java 2  s .  c o m*/
    OutputStream os = hdfs.create(file, new Progressable() {
        @Override
        public void progress() {
            out.println("...bytes written");
        }
    });
    BufferedWriter br = new BufferedWriter(new OutputStreamWriter(os, "UTF-8"));
    br.write("This is just a test to check if it is possible to write a file on HDFS using the Java API");
    br.close();
    hdfs.close();

}

From source file:com.ema.hadoop.wordcount.WordCount_cache.java

public static void main(String[] args) throws Exception {

    if (args.length != 2) {
        System.err.println("Usage: WordCount <input path> <output path>");
        System.exit(-1);//w  ww  . j  av a  2s . c o m
    }

    // First we write the stop word list
    // it could also be a file manually loaded into HDFS

    String[] stopwords = { "the", "a" };
    Configuration configuration = new Configuration();
    FileSystem hdfs = FileSystem.get(new URI("hdfs://localhost:9000"), configuration);
    Path file = new Path("hdfs://localhost:9000/user/student/stop_words.txt");
    if (hdfs.exists(file)) {
        hdfs.delete(file, true);
    }
    OutputStream os = hdfs.create(file, new Progressable() {
        @Override
        public void progress() {
            out.println("...bytes written");
        }
    });
    BufferedWriter br = new BufferedWriter(new OutputStreamWriter(os, "UTF-8"));
    for (String w : stopwords) {
        br.write(w + "\n");
    }

    br.close();
    hdfs.close();

    Job job = Job.getInstance();
    job.addCacheFile(new Path("hdfs://localhost:9000/user/student/stop_words.txt").toUri());

    job.setJarByClass(WordCount_cache.class);
    job.setJobName("Word count job");

    FileInputFormat.setInputPaths(job, new Path(args[0]));
    FileOutputFormat.setOutputPath(job, new Path(args[1]));

    job.setMapperClass(WCMapper_cache.class);
    job.setReducerClass(WCReducer.class);

    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(IntWritable.class);

    System.exit(job.waitForCompletion(true) ? 0 : 1);
}

From source file:com.example.sparkservice.SparkService.java

/**
 * Metodo che permette l'eliminazione della directory in cui  salvato
 * il modello precedentemente addestrato.
 * Il server prima di effettuare un nuovo addestramento eliminer tale
 * directory.//ww  w . j  a v  a  2s  .c o  m
 * @return True se l'eliminazione viene eseguita con successo; 
 *         False se viene sollevata qualche eccezione.
 */
public boolean deleteModel() {
    //Configuration di HADOOP
    Configuration config = new Configuration();
    try {
        //File system HDFS
        FileSystem fs = FileSystem.get(URI.create(PATH_MODEL), config);
        fs.delete(new Path(PATH_MODEL), true);
        fs.close();
    } catch (IOException ex) {
        Logger.getLogger(SparkService.class.getName()).log(Level.SEVERE, null, ex);
        return false;
    }
    return true;
}

From source file:com.facebook.hiveio.common.FileSystems.java

License:Apache License

/**
 * Move a file or directory from source to destination, recursively copying
 * subdirectories./* w ww . ja va2 s  .c o  m*/
 *
 * @param fs FileSystem
 * @param file path to copy (file or directory)
 * @param src path to source directory
 * @param dest path to destination directory
 * @throws IOException I/O problems
 */
public static void move(FileSystem fs, Path file, Path src, Path dest) throws IOException {
    Path destFilePath = pathInDestination(file, src, dest);
    if (fs.isFile(file)) {
        if (fs.exists(destFilePath)) {
            if (!fs.delete(destFilePath, true)) {
                throw new IllegalArgumentException("Could not remove existing file " + destFilePath);
            }
        }
        if (!fs.rename(file, destFilePath)) {
            throw new IllegalArgumentException("Could not move " + file + " to " + destFilePath);
        }
    } else if (fs.getFileStatus(file).isDir()) {
        FileStatus[] statuses = fs.listStatus(file);
        fs.mkdirs(destFilePath);
        if (statuses != null) {
            for (FileStatus status : statuses) {
                move(fs, status.getPath(), src, dest);
            }
        }
    }
}

From source file:com.facebook.hiveio.output.HiveApiOutputCommitter.java

License:Apache License

/**
 * Table has no partitions, just copy data
 *
 * @param conf Configuration/*from w w  w .ja  v  a2s  .  c o m*/
 * @param outputInfo OutputInfo
 * @throws IOException I/O errors
 */
private void noPartitionsCopyData(Configuration conf, OutputInfo outputInfo) throws IOException {
    Preconditions.checkArgument(!outputInfo.hasPartitionInfo());
    Path tablePath = new Path(outputInfo.getTableRoot());
    Path writePath = new Path(outputInfo.getPartitionPath());
    FileSystem tableFs = tablePath.getFileSystem(conf);
    FileSystem writePathFs = writePath.getFileSystem(conf);
    if (!tableFs.getUri().equals(writePathFs.getUri())) {
        LOG.error("Table's root path fs {} is not on same as its partition path fs {}", tableFs.getUri(),
                writePathFs.getUri());
        throw new IllegalStateException("Table's root path fs " + tableFs.getUri()
                + " is not on same as its partition path fs " + writePathFs.getUri());
    }
    LOG.info("No partitions, copying data from {} to {}", writePath, tablePath);
    FileSystems.move(tableFs, writePath, writePath, tablePath);
    tableFs.delete(writePath, true);
}

From source file:com.facebook.LinkBench.LinkBenchDriverMR.java

License:Apache License

/**
 * main route of the LOAD phase/*from ww w  .j  a va  2s.com*/
 */
private void load() throws IOException, InterruptedException {
    boolean loaddata = (!props.containsKey(Config.LOAD_DATA)) || ConfigUtil.getBool(props, Config.LOAD_DATA);
    if (!loaddata) {
        logger.info("Skipping load data per the config");
        return;
    }

    int nloaders = ConfigUtil.getInt(props, Config.NUM_LOADERS);
    final JobConf jobconf = createJobConf(LOAD, nloaders);
    FileSystem fs = setupInputFiles(jobconf, nloaders);

    try {
        logger.info("Starting loaders " + nloaders);
        final long starttime = System.currentTimeMillis();
        JobClient.runJob(jobconf);
        long loadtime = (System.currentTimeMillis() - starttime);

        // compute total #links loaded
        long maxid1 = ConfigUtil.getLong(props, Config.MAX_ID);
        long startid1 = ConfigUtil.getLong(props, Config.MIN_ID);
        int nlinks_default = ConfigUtil.getInt(props, Config.NLINKS_DEFAULT);
        long expectedlinks = (1 + nlinks_default) * (maxid1 - startid1);
        long actuallinks = readOutput(fs, jobconf);

        logger.info("LOAD PHASE COMPLETED. Expected to load " + expectedlinks + " links. " + actuallinks
                + " loaded in " + (loadtime / 1000) + " seconds." + "Links/second = "
                + ((1000 * actuallinks) / loadtime));
    } finally {
        fs.delete(TMP_DIR, true);
    }
}

From source file:com.facebook.LinkBench.LinkBenchDriverMR.java

License:Apache License

/**
 * main route of the REQUEST phase/* w w  w  . ja  va2s  . c o  m*/
 */
private void sendrequests() throws IOException, InterruptedException {
    // config info for requests
    int nrequesters = ConfigUtil.getInt(props, Config.NUM_REQUESTERS);
    final JobConf jobconf = createJobConf(REQUEST, nrequesters);
    FileSystem fs = setupInputFiles(jobconf, nrequesters);

    try {
        logger.info("Starting requesters " + nrequesters);
        final long starttime = System.currentTimeMillis();
        JobClient.runJob(jobconf);
        long endtime = System.currentTimeMillis();

        // request time in millis
        long requesttime = (endtime - starttime);
        long requestsdone = readOutput(fs, jobconf);

        logger.info("REQUEST PHASE COMPLETED. " + requestsdone + " requests done in " + (requesttime / 1000)
                + " seconds." + "Requests/second = " + (1000 * requestsdone) / requesttime);
    } finally {
        fs.delete(TMP_DIR, true);
    }
}

From source file:com.facebook.presto.hive.AbstractTestHiveFileSystem.java

License:Apache License

@Test
public void testRename() throws Exception {
    Path basePath = new Path(getBasePath(), UUID.randomUUID().toString());
    FileSystem fs = hdfsEnvironment.getFileSystem(TESTING_CONTEXT, basePath);
    assertFalse(fs.exists(basePath));// w  ww  .j a va 2 s.com

    // create file foo.txt
    Path path = new Path(basePath, "foo.txt");
    assertTrue(fs.createNewFile(path));
    assertTrue(fs.exists(path));

    // rename foo.txt to bar.txt when bar does not exist
    Path newPath = new Path(basePath, "bar.txt");
    assertFalse(fs.exists(newPath));
    assertTrue(fs.rename(path, newPath));
    assertFalse(fs.exists(path));
    assertTrue(fs.exists(newPath));

    // rename foo.txt to foo.txt when foo.txt does not exist
    assertFalse(fs.rename(path, path));

    // create file foo.txt and rename to existing bar.txt
    assertTrue(fs.createNewFile(path));
    assertFalse(fs.rename(path, newPath));

    // rename foo.txt to foo.txt when foo.txt exists
    assertFalse(fs.rename(path, path));

    // delete foo.txt
    assertTrue(fs.delete(path, false));
    assertFalse(fs.exists(path));

    // create directory source with file
    Path source = new Path(basePath, "source");
    assertTrue(fs.createNewFile(new Path(source, "test.txt")));

    // rename source to non-existing target
    Path target = new Path(basePath, "target");
    assertFalse(fs.exists(target));
    assertTrue(fs.rename(source, target));
    assertFalse(fs.exists(source));
    assertTrue(fs.exists(target));

    // create directory source with file
    assertTrue(fs.createNewFile(new Path(source, "test.txt")));

    // rename source to existing target
    assertTrue(fs.rename(source, target));
    assertFalse(fs.exists(source));
    target = new Path(target, "source");
    assertTrue(fs.exists(target));
    assertTrue(fs.exists(new Path(target, "test.txt")));

    // delete target
    target = new Path(basePath, "target");
    assertTrue(fs.exists(target));
    assertTrue(fs.delete(target, true));
    assertFalse(fs.exists(target));

    // cleanup
    fs.delete(basePath, true);
}

From source file:com.facebook.presto.hive.metastore.SemiTransactionalHiveMetastore.java

License:Apache License

/**
 * Attempts to remove the file or empty directory.
 *
 * @return true if the location no longer exists
 *//*  w w w .j  a  va2s  .  c om*/
private static boolean deleteIfExists(FileSystem fileSystem, Path path, boolean recursive) {
    try {
        // attempt to delete the path
        if (fileSystem.delete(path, recursive)) {
            return true;
        }

        // delete failed
        // check if path still exists
        return !fileSystem.exists(path);
    } catch (FileNotFoundException ignored) {
        // path was already removed or never existed
        return true;
    } catch (IOException ignored) {
    }
    return false;
}

From source file:com.facebook.presto.hive.OrcFileWriterFactory.java

License:Apache License

@Override
public Optional<HiveFileWriter> createFileWriter(Path path, List<String> inputColumnNames,
        StorageFormat storageFormat, Properties schema, JobConf configuration, ConnectorSession session) {
    if (!HiveSessionProperties.isOrcOptimizedWriterEnabled(session)) {
        return Optional.empty();
    }//from   w  ww  . jav a  2  s.  co m

    boolean isDwrf;
    if (OrcOutputFormat.class.getName().equals(storageFormat.getOutputFormat())) {
        isDwrf = false;
    } else if (com.facebook.hive.orc.OrcOutputFormat.class.getName().equals(storageFormat.getOutputFormat())) {
        isDwrf = true;
    } else {
        return Optional.empty();
    }

    CompressionKind compression = getCompression(schema, configuration);

    // existing tables and partitions may have columns in a different order than the writer is providing, so build
    // an index to rearrange columns in the proper order
    List<String> fileColumnNames = Splitter.on(',').trimResults().omitEmptyStrings()
            .splitToList(schema.getProperty(META_TABLE_COLUMNS, ""));
    List<Type> fileColumnTypes = toHiveTypes(schema.getProperty(META_TABLE_COLUMN_TYPES, "")).stream()
            .map(hiveType -> hiveType.getType(typeManager)).collect(toList());

    int[] fileInputColumnIndexes = fileColumnNames.stream().mapToInt(inputColumnNames::indexOf).toArray();

    try {
        FileSystem fileSystem = hdfsEnvironment.getFileSystem(session.getUser(), path, configuration);
        OutputStream outputStream = fileSystem.create(path);

        Optional<Supplier<OrcDataSource>> validationInputFactory = Optional.empty();
        if (HiveSessionProperties.isOrcOptimizedWriterValidate(session)) {
            validationInputFactory = Optional.of(() -> {
                try {
                    return new HdfsOrcDataSource(new OrcDataSourceId(path.toString()),
                            fileSystem.getFileStatus(path).getLen(), getOrcMaxMergeDistance(session),
                            getOrcMaxBufferSize(session), getOrcStreamBufferSize(session), false,
                            fileSystem.open(path), stats);
                } catch (IOException e) {
                    throw new PrestoException(HIVE_WRITE_VALIDATION_FAILED, e);
                }
            });
        }

        Callable<Void> rollbackAction = () -> {
            fileSystem.delete(path, false);
            return null;
        };

        return Optional.of(new OrcFileWriter(outputStream, rollbackAction, isDwrf, fileColumnNames,
                fileColumnTypes, compression, fileInputColumnIndexes,
                ImmutableMap.<String, String>builder()
                        .put(HiveMetadata.PRESTO_VERSION_NAME, nodeVersion.toString())
                        .put(HiveMetadata.PRESTO_QUERY_ID_NAME, session.getQueryId()).build(),
                hiveStorageTimeZone, validationInputFactory));
    } catch (IOException e) {
        throw new PrestoException(HIVE_WRITER_OPEN_ERROR, "Error creating ORC file", e);
    }
}