Example usage for org.apache.hadoop.fs FileSystem delete

List of usage examples for org.apache.hadoop.fs FileSystem delete

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem delete.

Prototype

public abstract boolean delete(Path f, boolean recursive) throws IOException;

Source Link

Document

Delete a file.

Usage

From source file:BwaInterpreter.java

License:Open Source License

/**
 * Runs BWA with the specified options/* w w w  .  j a va 2  s .co  m*/
 * @brief This function runs BWA with the input data selected and with the options also selected by the user.
 */
public void RunBwa() {
    LOG.info("JMAbuin:: Starting BWA");
    Bwa bwa = new Bwa(this.options);

    List<String> returnedValues;
    if (bwa.isPairedReads()) {
        JavaRDD<Tuple2<String, String>> readsRDD = handlePairedReadsSorting();
        returnedValues = MapPairedBwa(bwa, readsRDD);
    } else {
        JavaRDD<String> readsRDD = handleSingleReadsSorting();
        returnedValues = MapSingleBwa(bwa, readsRDD);
    }

    LOG.info("BwaRDD :: Total of returned lines from RDDs :: " + returnedValues.size());

    // In the case of use a reducer the final output has to be stored in just one file
    if (bwa.isUseReducer()) {
        combineOutputSamFiles(bwa.getOutputHdfsDir(), returnedValues);
    } else {
        for (String outputFile : returnedValues) {
            LOG.info("JMAbuin:: SparkBWA:: Returned file ::" + outputFile);
        }
    }

    //After the execution, if the inputTmp exists, it should be deleted
    try {
        if ((this.inputTmpFileName != null) && (!this.inputTmpFileName.isEmpty())) {
            FileSystem fs = FileSystem.get(this.conf);

            fs.delete(new Path(this.inputTmpFileName), true);

            fs.close();
        }

    } catch (IOException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
        LOG.error(e.toString());

    }
}

From source file:SingleFileWriter.java

License:Apache License

public int run(String[] args) throws Exception {
    if (args.length < 1) {
        System.err.println("SingleFileWriter [fileSize ie. 1g/10g/100g]");
        return 1;
    }//from  w  ww  . java 2s .co  m

    double fileSize = Double.parseDouble((args[0].split("g|G"))[0]) * 1024 * 1024 * 1024;

    String hdfsFolder = "/hdfs_test/";
    String hdfsFile = hdfsFolder + args[0];
    short replication = 1;
    boolean overWrite = true;
    int bufferSize = 65536;
    int blockSize = 536870912;
    double numIters = fileSize / (double) bufferSize;

    /* Initialize byte buffer */
    ByteBuffer buf = ByteBuffer.allocate(bufferSize);
    buf.order(ByteOrder.nativeOrder());
    for (int k = 0; k < bufferSize / Integer.SIZE; k++) {
        buf.putInt(k);
    }
    buf.flip();

    /* Create file on HDFS */
    Configuration conf = getConf();
    FileSystem fs = FileSystem.get(conf);
    Path hdfsFilePath = new Path(hdfsFile);
    OutputStream os = fs.create(hdfsFilePath, overWrite, bufferSize, replication, blockSize);
    /* Write the content of the byte buffer 
     to the HDFS file*/
    Timer t = new Timer();
    t.start(0);
    for (long i = 0; i < numIters; i++) {
        os.write(buf.array());
        buf.flip();
    }
    t.end(0);
    os.close();
    fs.delete(hdfsFilePath, true);

    t.dump();

    return 0;
}

From source file:ColumnStorageBasicTest.java

License:Open Source License

public void testLoadNaviFromHead() {
    try {//  ww  w .j av  a 2s .  co m
        Configuration conf = new Configuration();
        FileSystem fs = FileSystem.get(conf);
        Path path = new Path(prefix);

        fs.delete(path, true);
        createAllSingleProject(fs);

        ColumnProject cp = new ColumnProject(conf);
        cp.loadColmnInfoFromHeadInfo(fs, path);

        checkAllColumnInfo(cp.infos());
    } catch (Exception e) {
        e.printStackTrace();
        fail("get exception:" + e.getMessage());
    }
}

From source file:ColumnStorageBasicTest.java

License:Open Source License

public void testConstructorFieldNoExist() {
    try {//from w w  w .j  a v  a 2s .  c  om
        Configuration conf = new Configuration();
        Path path = new Path(prefix);

        FileSystem fs = FileSystem.get(conf);
        fs.delete(path, true);

        createAllSingleProject(fs);
        createMultiProject(fs);

        ArrayList<Short> idxs = new ArrayList<Short>(10);
        idxs.add((short) 10);
        ColumnStorageClient client = new ColumnStorageClient(path, idxs, conf);

        fail("should get exception");
    } catch (SEException.InvalidParameterException e) {

    } catch (Exception e) {
        e.printStackTrace();
        fail("get exception:" + e.getMessage());
    }
}

From source file:AggregatedLogsPurger.java

License:Apache License

public boolean purge() throws IOException {
    LocalDateTime now = LocalDateTime.now();
    LocalDateTime deleteLogsOlderThanTime = now.minusDays(deleteOlderThanDays);

    //Identify which log dirs should be deleted
    FileSystem fs = rootLogDir.getFileSystem(conf);
    try {// w ww.j  a  va 2  s.c om

        long totalBytes = 0;
        for (FileStatus userDir : fs.listStatus(rootLogDir)) {
            if (userDir.isDirectory()) {
                Path userDirPath = new Path(userDir.getPath(), suffix);
                System.out.println("Checking for userDir : " + userDirPath);
                for (FileStatus appDir : fs.listStatus(userDirPath)) {
                    LocalDateTime appDirDate = getAppDirDateTime(appDir.getModificationTime());
                    if (appDirDate.isBefore(deleteLogsOlderThanTime)) {
                        long size = getLengthRecursively(fs, appDir.getPath());
                        System.out.println(appDir.getPath() + ", " + appDir.getOwner() + ", "
                                + appDirDate.toString() + ", size=" + size);
                        totalBytes += size;
                        if (shouldDelete) {
                            System.out.println("Deleting " + appDir.getPath());
                            fs.delete(appDir.getPath(), true);
                        }
                    }
                }
            }
        }
        System.out.println("Savings : " + totalBytes);
    } catch (IOException e) {
        e.printStackTrace();
        return false;
    } finally {
        fs.close();
    }
    return true;
}

From source file:LobFileStressTest.java

License:Apache License

private void writeIntegerFile(boolean compress) throws Exception {
    boolean passed = false;
    try {//  ww  w.j ava 2s.  co  m
        System.out.print("Writing integers file. compress=" + compress + ". ");
        Path p = getPath(compress);
        FileSystem fs = FileSystem.getLocal(conf);
        if (fs.exists(p)) {
            fs.delete(p, false);
        }
        String codecName = compress ? "deflate" : null;
        LobFile.Writer w = LobFile.create(p, conf, false, codecName);

        int numRecords = getNumRecords(compress);
        for (int i = 0; i < numRecords; i++) {
            setLastRecordPos(w.tell(), compress);
            OutputStream os = w.writeBlobRecord(0);
            DataOutputStream dos = new DataOutputStream(os);
            dos.writeInt(i);
            dos.close();
            os.close();
        }

        w.close();
        System.out.println("PASS");
        passed = true;
    } finally {
        if (!passed) {
            allPassed = false;
            System.out.println("FAIL");
        }
    }
}

From source file:LobFileStressTest.java

License:Apache License

private void testBigFile(boolean compress) throws Exception {
    // Write a file containing 5 GB records.

    final int NUM_RECORDS = 5;
    boolean passed = false;

    try {//  ww w  .jav  a2s .  c om
        System.out.print("Testing large file operations. compress=" + compress + ". ");

        Path p = getBigFilePath(compress);
        long[] startOffsets = new long[NUM_RECORDS];

        // Write the file. Five records, 5 GB a piece.
        System.out.print("Testing write. ");
        FileSystem fs = FileSystem.getLocal(conf);
        if (fs.exists(p)) {
            fs.delete(p, false);
        }
        String codecName = compress ? "deflate" : null;
        System.out.println("record size: " + LARGE_RECORD_LEN);
        LobFile.Writer w = LobFile.create(p, conf, false, codecName);
        for (int i = 0; i < NUM_RECORDS; i++) {
            startOffsets[i] = w.tell();
            System.out.println("Starting record " + i + " at " + startOffsets[i]);
            OutputStream os = w.writeBlobRecord(0);
            for (long v = 0; v < LARGE_RECORD_LEN; v++) {
                long byteVal = (((long) i) + v) & 0xFF;
                os.write((int) byteVal);
            }
            os.close();
        }
        w.close();
        System.out.println("PASS");

        // Iterate past three records, read the fourth.
        System.out.print("Testing iterated skipping. ");
        LobFile.Reader r = LobFile.open(p, conf);
        for (int i = 0; i < 4; i++) {
            r.next();
        }

        checkBigRecord(r, 3);
        System.out.println("PASS");

        // Seek directly to record 2, read it through.
        System.out.print("Testing large backward seek. ");
        r.seek(startOffsets[2]);
        r.next();
        checkBigRecord(r, 2);
        System.out.println("PASS");

        passed = true;
    } finally {
        if (!passed) {
            allPassed = false;
            System.out.println("FAIL");
        }
    }
}

From source file:FormatStorage2ColumnStorageMR.java

License:Open Source License

@SuppressWarnings("deprecation")
public static void main(String[] args) throws Exception {

    if (args.length != 2) {
        System.out.println("FormatStorage2ColumnStorageMR <input> <output>");
        System.exit(-1);/*from  www.j av a  2  s  . c o m*/
    }

    JobConf conf = new JobConf(FormatStorageMR.class);

    conf.setJobName("FormatStorage2ColumnStorageMR");

    conf.setNumMapTasks(1);
    conf.setNumReduceTasks(4);

    conf.setOutputKeyClass(LongWritable.class);
    conf.setOutputValueClass(Unit.Record.class);

    conf.setMapperClass(FormatStorageMapper.class);
    conf.setReducerClass(ColumnStorageReducer.class);

    conf.setInputFormat(FormatStorageInputFormat.class);
    conf.set("mapred.output.compress", "flase");

    Head head = new Head();
    initHead(head);

    head.toJobConf(conf);

    FileInputFormat.setInputPaths(conf, args[0]);
    Path outputPath = new Path(args[1]);
    FileOutputFormat.setOutputPath(conf, outputPath);

    FileSystem fs = outputPath.getFileSystem(conf);
    fs.delete(outputPath, true);

    JobClient jc = new JobClient(conf);
    RunningJob rj = null;
    rj = jc.submitJob(conf);

    String lastReport = "";
    SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd hh:mm:ss,SSS");
    long reportTime = System.currentTimeMillis();
    long maxReportInterval = 3 * 1000;
    while (!rj.isComplete()) {
        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
        }

        int mapProgress = Math.round(rj.mapProgress() * 100);
        int reduceProgress = Math.round(rj.reduceProgress() * 100);

        String report = " map = " + mapProgress + "%,  reduce = " + reduceProgress + "%";

        if (!report.equals(lastReport) || System.currentTimeMillis() >= reportTime + maxReportInterval) {

            String output = dateFormat.format(Calendar.getInstance().getTime()) + report;
            System.out.println(output);
            lastReport = report;
            reportTime = System.currentTimeMillis();
        }
    }

    System.exit(0);

}

From source file:HoodieJavaStreamingApp.java

License:Apache License

/**
 *
 * @throws Exception/*from   w ww  .j a v a  2 s . co  m*/
 */
public void run() throws Exception {
    // Spark session setup..
    SparkSession spark = SparkSession.builder().appName("Hoodie Spark Streaming APP")
            .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer").master("local[1]")
            .getOrCreate();
    JavaSparkContext jssc = new JavaSparkContext(spark.sparkContext());

    // folder path clean up and creation, preparing the environment
    FileSystem fs = FileSystem.get(jssc.hadoopConfiguration());
    fs.delete(new Path(streamingSourcePath), true);
    fs.delete(new Path(streamingCheckpointingPath), true);
    fs.delete(new Path(tablePath), true);
    fs.mkdirs(new Path(streamingSourcePath));

    // Generator of some records to be loaded in.
    HoodieTestDataGenerator dataGen = new HoodieTestDataGenerator();

    List<String> records1 = DataSourceTestUtils.convertToStringList(dataGen.generateInserts("001", 100));
    Dataset<Row> inputDF1 = spark.read().json(jssc.parallelize(records1, 2));

    List<String> records2 = DataSourceTestUtils.convertToStringList(dataGen.generateUpdates("002", 100));

    Dataset<Row> inputDF2 = spark.read().json(jssc.parallelize(records2, 2));

    // setup the input for streaming
    Dataset<Row> streamingInput = spark.readStream().schema(inputDF1.schema()).json(streamingSourcePath);

    // start streaming and showing
    ExecutorService executor = Executors.newFixedThreadPool(2);

    // thread for spark strucutured streaming
    Future<Void> streamFuture = executor.submit(new Callable<Void>() {
        public Void call() throws Exception {
            logger.info("===== Streaming Starting =====");
            stream(streamingInput);
            logger.info("===== Streaming Ends =====");
            return null;
        }
    });

    // thread for adding data to the streaming source and showing results over time
    Future<Void> showFuture = executor.submit(new Callable<Void>() {
        public Void call() throws Exception {
            logger.info("===== Showing Starting =====");
            show(spark, fs, inputDF1, inputDF2);
            logger.info("===== Showing Ends =====");
            return null;
        }
    });

    // let the threads run
    streamFuture.get();
    showFuture.get();

    executor.shutdown();
}

From source file:alluxio.hadoop.FileSystemAclIntegrationTest.java

License:Apache License

public static void cleanup(org.apache.hadoop.fs.FileSystem fs) throws IOException {
    FileStatus[] statuses = fs.listStatus(new Path("/"));
    for (FileStatus f : statuses) {
        fs.delete(f.getPath(), true);
    }/*w ww.  j a  v  a  2 s .co m*/
}