Example usage for org.apache.hadoop.fs FileSystem delete

List of usage examples for org.apache.hadoop.fs FileSystem delete

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem delete.

Prototype

public abstract boolean delete(Path f, boolean recursive) throws IOException;

Source Link

Document

Delete a file.

Usage

From source file:com.cloudera.sqoop.mapreduce.db.TestDataDrivenDBInputFormat.java

License:Apache License

public void testDateSplits() throws Exception {
    Statement s = connection.createStatement();
    final String DATE_TABLE = "datetable";
    final String COL = "foo";
    try {/*from w w  w  .j  a va  2s  .c o  m*/
        try {
            // delete the table if it already exists.
            s.executeUpdate("DROP TABLE " + DATE_TABLE);
        } catch (SQLException e) {
            // Ignored; proceed regardless of whether we deleted the table;
            // it may have simply not existed.
        }

        // Create the table.
        s.executeUpdate("CREATE TABLE " + DATE_TABLE + "(" + COL + " TIMESTAMP)");
        s.executeUpdate("INSERT INTO " + DATE_TABLE + " VALUES('2010-04-01')");
        s.executeUpdate("INSERT INTO " + DATE_TABLE + " VALUES('2010-04-02')");
        s.executeUpdate("INSERT INTO " + DATE_TABLE + " VALUES('2010-05-01')");
        s.executeUpdate("INSERT INTO " + DATE_TABLE + " VALUES('2011-04-01')");

        // commit this tx.
        connection.commit();

        Configuration conf = new Configuration();
        conf.set("fs.defaultFS", "file:///");
        FileSystem fs = FileSystem.getLocal(conf);
        fs.delete(new Path(OUT_DIR), true);

        // now do a dd import
        Job job = new Job(conf);
        job.setMapperClass(ValMapper.class);
        job.setReducerClass(Reducer.class);
        job.setMapOutputKeyClass(DateCol.class);
        job.setMapOutputValueClass(NullWritable.class);
        job.setOutputKeyClass(DateCol.class);
        job.setOutputValueClass(NullWritable.class);
        job.setNumReduceTasks(1);
        job.getConfiguration().setInt("mapreduce.map.tasks", 2);
        FileOutputFormat.setOutputPath(job, new Path(OUT_DIR));
        DBConfiguration.configureDB(job.getConfiguration(), DRIVER_CLASS, DB_URL, (String) null, (String) null);
        DataDrivenDBInputFormat.setInput(job, DateCol.class, DATE_TABLE, null, COL, COL);

        boolean ret = job.waitForCompletion(true);
        assertTrue("job failed", ret);

        // Check to see that we imported as much as we thought we did.
        assertEquals("Did not get all the records", 4, job.getCounters()
                .findCounter("org.apache.hadoop.mapred.Task$Counter", "REDUCE_OUTPUT_RECORDS").getValue());
    } finally {
        s.close();
    }
}

From source file:com.cloudera.sqoop.mapreduce.TestImportJob.java

License:Apache License

public void testDeleteTargetDir() throws Exception {
    // Make sure that if a MapReduce job to do the import fails due
    // to an IOException, we tell the user about it.

    // Create a table to attempt to import.
    createTableForColType("VARCHAR(32)", "'meep'");

    Configuration conf = new Configuration();

    // Make the output dir does not exist
    Path outputPath = new Path(new Path(getWarehouseDir()), getTableName());
    FileSystem fs = FileSystem.getLocal(conf);
    fs.delete(outputPath, true);
    assertTrue(!fs.exists(outputPath));/* w ww. j  a  va 2s  .  co  m*/

    String[] argv = getArgv(true, new String[] { "DATA_COL0" }, conf);
    argv = Arrays.copyOf(argv, argv.length + 1);
    argv[argv.length - 1] = "--delete-target-dir";

    Sqoop importer = new Sqoop(new ImportTool());
    try {
        int ret = Sqoop.runSqoop(importer, argv);
        assertTrue("Expected job to go through if target directory" + " does not exist.", 0 == ret);
        assertTrue(fs.exists(outputPath));
        // expecting one _SUCCESS file and one file containing data
        assertTrue("Expecting two files in the directory.", fs.listStatus(outputPath).length == 2);
        String[] output = getContent(conf, outputPath);
        assertEquals("Expected output and actual output should be same.", "meep", output[0]);

        ret = Sqoop.runSqoop(importer, argv);
        assertTrue("Expected job to go through if target directory exists.", 0 == ret);
        assertTrue(fs.exists(outputPath));
        // expecting one _SUCCESS file and one file containing data
        assertTrue("Expecting two files in the directory.", fs.listStatus(outputPath).length == 2);
        output = getContent(conf, outputPath);
        assertEquals("Expected output and actual output should be same.", "meep", output[0]);
    } catch (Exception e) {
        // In debug mode, ImportException is wrapped in RuntimeException.
        LOG.info("Got exceptional return (expected: ok). msg is: " + e);
    }
}

From source file:com.cloudera.sqoop.TestAppendUtils.java

License:Apache License

/**
 * Test for ouput path file-count increase, current files untouched and new
 * correct partition number./*from  w  w  w  . j av a 2s  . c om*/
 *
 * @throws IOException
 */
public void runAppendTest(ArrayList args, Path outputPath) throws IOException {

    try {

        // ensure non-existing output dir for insert phase
        FileSystem fs = FileSystem.get(getConf());
        if (fs.exists(outputPath)) {
            fs.delete(outputPath, true);
        }

        // run Sqoop in INSERT mode
        String[] argv = (String[]) args.toArray(new String[0]);
        runUncleanImport(argv);

        // get current file count
        FileStatus[] fileStatuses = listFiles(fs, outputPath);
        Arrays.sort(fileStatuses, new StatusPathComparator());
        int previousFileCount = fileStatuses.length;

        // get string image with all file creation dates
        String previousImage = getFileCreationTimeImage(fs, outputPath, previousFileCount);

        // get current last partition number
        Path lastFile = fileStatuses[fileStatuses.length - 1].getPath();
        int lastPartition = getFilePartition(lastFile);

        // run Sqoop in APPEND mode
        args.add("--append");
        argv = (String[]) args.toArray(new String[0]);
        runUncleanImport(argv);

        // check directory file increase
        fileStatuses = listFiles(fs, outputPath);
        Arrays.sort(fileStatuses, new StatusPathComparator());
        int currentFileCount = fileStatuses.length;
        assertTrue("Output directory didn't got increased in file count ",
                currentFileCount > previousFileCount);

        // check previous files weren't modified, also works for partition
        // overlapping
        String currentImage = getFileCreationTimeImage(fs, outputPath, previousFileCount);
        assertEquals("Previous files to appending operation were modified", currentImage, previousImage);

        // check that exists at least 1 new correlative partition
        // let's use a different way than the code being tested
        Path newFile = fileStatuses[previousFileCount].getPath(); // there is a
                                                                  // new bound now
        int newPartition = getFilePartition(newFile);
        assertTrue("New partition file isn't correlative", lastPartition + 1 == newPartition);

    } catch (Exception e) {
        LOG.error("Got Exception: " + StringUtils.stringifyException(e));
        fail(e.toString());
    }
}

From source file:com.cloudera.sqoop.TestExport.java

License:Apache License

/** Removing an existing table directory from the filesystem. */
private void removeTablePath() throws IOException {
    Configuration conf = new Configuration();
    if (!BaseSqoopTestCase.isOnPhysicalCluster()) {
        conf.set(CommonArgs.FS_DEFAULT_NAME, CommonArgs.LOCAL_FS);
    }//from  w  w  w  . ja  v  a  2  s . c o m
    FileSystem fs = FileSystem.get(conf);
    fs.delete(getTablePath(), true);
}

From source file:com.cloudera.sqoop.TestIncrementalImport.java

License:Apache License

/**
 * Delete all files in a directory for a table.
 *//*from www.  j  ava2s .co  m*/
public void clearDir(String tableName) {
    try {
        FileSystem fs = FileSystem.getLocal(new Configuration());
        Path warehouse = new Path(BaseSqoopTestCase.LOCAL_WAREHOUSE_DIR);
        Path tableDir = new Path(warehouse, tableName);
        fs.delete(tableDir, true);
    } catch (Exception e) {
        fail("Got unexpected exception: " + StringUtils.stringifyException(e));
    }
}

From source file:com.cloudera.sqoop.TestTargetDir.java

License:Apache License

/** test target-dir contains imported files. */
public void testTargetDir() throws IOException {

    try {//from   w w w  . j  av  a 2 s. c om
        String targetDir = getWarehouseDir() + "/tempTargetDir";

        ArrayList args = getOutputArgv(true);
        args.add("--target-dir");
        args.add(targetDir);

        // delete target-dir if exists and recreate it
        FileSystem fs = FileSystem.get(getConf());
        Path outputPath = new Path(targetDir);
        if (fs.exists(outputPath)) {
            fs.delete(outputPath, true);
        }

        String[] argv = (String[]) args.toArray(new String[0]);
        runImport(argv);

        ContentSummary summ = fs.getContentSummary(outputPath);

        assertTrue("There's no new imported files in target-dir", summ.getFileCount() > 0);

    } catch (Exception e) {
        LOG.error("Got Exception: " + StringUtils.stringifyException(e));
        fail(e.toString());
    }
}

From source file:com.cloudera.sqoop.testutil.BaseSqoopTestCase.java

License:Apache License

private void guaranteeCleanWarehouse() {
    if (isOnPhysicalCluster()) {
        Path warehousePath = new Path(this.getWarehouseDir());
        try {/*from ww w . ja  v a2 s  . c  o  m*/
            FileSystem fs = FileSystem.get(getConf());
            fs.delete(warehousePath, true);
        } catch (IOException e) {
            LOG.warn(e);
        }
    }
    File s = new File(getWarehouseDir());
    if (!s.delete()) {
        LOG.warn("Can't delete " + s.getPath());
    }
}

From source file:com.cloudera.sqoop.util.AppendUtils.java

License:Apache License

/**
 * Moves the imported files from temporary directory to specified target-dir,
 * renaming partition number if appending file exists.
 *///from www .  j a va  2 s.com
public void append() throws IOException {

    SqoopOptions options = context.getOptions();
    FileSystem fs = FileSystem.get(options.getConf());
    Path tempDir = context.getDestination();

    // Try in this order: target-dir or warehouse-dir
    Path userDestDir = null;
    if (options.getTargetDir() != null) {
        userDestDir = new Path(options.getTargetDir());
    } else if (options.getWarehouseDir() != null) {
        userDestDir = new Path(options.getWarehouseDir(), context.getTableName());
    } else {
        userDestDir = new Path(context.getTableName());
    }

    int nextPartition = 0;

    if (!fs.exists(tempDir)) {
        // This occurs if there was no source (tmp) dir. This might happen
        // if the import was an HBase-target import, but the user specified
        // --append anyway. This is a warning, not an error.
        LOG.warn("Cannot append files to target dir; no such directory: " + tempDir);
        return;
    }

    // Create target directory.
    if (!fs.exists(userDestDir)) {
        LOG.info("Creating missing output directory - " + userDestDir.getName());
        fs.mkdirs(userDestDir);
        nextPartition = 0;
    } else {
        LOG.info("Appending to directory " + userDestDir.getName());
        // Get the right next partition for the imported files
        nextPartition = getNextPartition(fs, userDestDir);
    }

    // move files
    moveFiles(fs, tempDir, userDestDir, nextPartition);

    // delete temporary path
    LOG.debug("Deleting temporary folder " + tempDir.getName());
    fs.delete(tempDir, true);
}

From source file:com.cloudy.mapred.base.JobUtil.java

License:Apache License

public static void delete(Configuration conf, Path path) throws IOException {
    if (conf == null) {
        conf = new Configuration();
    }/*from   w  ww.ja va  2s. com*/
    FileSystem fs = path.getFileSystem(conf);
    if (fs.exists(path)) {
        log.info("Deleting {}", path);
        fs.delete(path, true);
    }
}

From source file:com.constellio.sdk.tests.FactoriesTestFeatures.java

private void deleteFromHadoop(String user, String url) {
    System.setProperty("HADOOP_USER_NAME", user);
    Configuration hadoopConfig = new Configuration();

    if (url == null || user == null) {
        throw new RuntimeException("No config");
    }/* w  w  w.  j a v  a2 s  . c o m*/

    hadoopConfig.set("fs.defaultFS", url);
    hadoopConfig.set("hadoop.job.ugi", user);

    try {
        FileSystem hdfs = FileSystem.get(hadoopConfig);
        for (FileStatus file : hdfs.globStatus(new Path("*"))) {
            hdfs.delete(file.getPath(), true);
        }

    } catch (IOException e) {
        throw new RuntimeException(e);
    }
}