Example usage for org.apache.hadoop.fs FileSystem delete

List of usage examples for org.apache.hadoop.fs FileSystem delete

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem delete.

Prototype

public abstract boolean delete(Path f, boolean recursive) throws IOException;

Source Link

Document

Delete a file.

Usage

From source file:com.blackberry.logdriver.util.IndexLogs.java

License:Apache License

public static void main(String args[]) throws IOException, ParseException {
    // Create blank map for components
    Map<String, Map<String, Map<String, Map<String, Component>>>> data = new HashMap<String, Map<String, Map<String, Map<String, Component>>>>();
    List<String> unmergedCSVStrings = new ArrayList<String>();
    unmergedCSVStrings.add("DC,Service,Type,Component,Date,Hour\n");

    // Set the output format
    Boolean humanReadable = false;
    Boolean writeIndex = true;/* ww w . j a  v a 2  s  .c  o  m*/
    Boolean removeOldIndexes = false;
    Boolean forceRemove = false;
    Boolean confirmRemoval = false;

    for (int i = 0; i < args.length; i++) {
        if (args[i].matches("-t")) {
            humanReadable = true;
        } else if (args[i].matches("-n")) {
            writeIndex = false;
        } else if (args[i].matches("-r")) {
            removeOldIndexes = true;
        } else if (args[i].matches("-f")) {
            forceRemove = true;
        } else {
            System.out.println(
                    "Usage: indexlogs [-t -n -r -f]\n    -t      Print results to STDOUT in human-readable tree\n"
                            + "    -n      Don't write index files into HDFS\n"
                            + "    -r      Remove old index files from HDFS\n"
                            + "    -f      Force remove old index files (requires -r)");
            System.exit(0);
        }
    }

    // Set up HDFS filesystem
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(conf);

    // Search the /service folder for matching paths
    if (!humanReadable && !writeIndex) {
        System.out.println("Warning: -n set without -t, not doing anything.\n");
        System.exit(0);
    }

    if (forceRemove && !removeOldIndexes) {
        System.out.println(
                "Warning: Asked to force-remove (-f) indexes without removing old index files (-r). Aborting.");
        System.exit(0);
    }

    //Confirm removal of old IndexLogs files
    if (removeOldIndexes) {
        if (!forceRemove) {
            Scanner reader = new Scanner(System.in);
            String input;
            boolean validAction = false;
            do {
                System.out.println("Delete *all* IndexLogs files from HDFS? [y/n]");
                input = reader.nextLine();
                if (input.startsWith("y")) {
                    validAction = true;
                    confirmRemoval = true;
                }

            } while (!validAction && !(input.startsWith("n")));
            reader.close();
        }
    }

    System.out.println("Indexing logs...");
    findComponents(data, unmergedCSVStrings, fs, new Path("/service"));

    // Remove old IndexLogs files
    if (confirmRemoval || forceRemove) {
        System.out.println("Removing all old IndexLogs Files from HDFS...");
        //WARNING: DO NOT CHANGE THIS UNLESS ABSOLUTELY NECESSARY.
        fs.delete(new Path("/service/.index/"), true);
    } else {
        System.out.println("Not removing old IndexLogs files.");
    }

    // Output the generated index
    if (humanReadable) {
        humanPrint(data);
        System.out.println("");
    }

    if (writeIndex) {
        long currentTime = System.currentTimeMillis() / 1000;
        FSDataOutputStream outputCSV = fs.create(new Path("/service/.index/logindex." + currentTime + ".csv"));
        writeCSV(data, outputCSV);
        outputCSV.close();
        FSDataOutputStream outputJSON = fs
                .create(new Path("/service/.index/logindex." + currentTime + ".json"));
        writeJSON(data, outputJSON);
        outputJSON.close();
        System.out.println("Index files written to /service/.index/logindex." + currentTime
                + ".csv and /service/.index/logindex." + currentTime + ".json");
        FSDataOutputStream unmergedCSV = fs
                .create(new Path("/service/.index/unmerged." + currentTime + ".csv"));
        Iterator<String> i = unmergedCSVStrings.iterator();
        while (i.hasNext()) {
            unmergedCSV.writeBytes(i.next());
        }
        unmergedCSV.close();

        System.out.println("Unmerged report written to /service/.index/unmerged." + currentTime + ".csv");
    }
}

From source file:com.chinamobile.bcbsp.client.BSPJobClient.java

License:Apache License

/**
 * Submit a new job to run.//ww w  .j a  v a  2 s.c o m
 * @param job BSPJob
 * @return Review comments: (1)The content of submitJobDir is decided by the
 *         client. I think it is dangerous because two different clients maybe
 *         generate the same submitJobDir. Review time: 2011-11-30; Reviewer:
 *         Hongxu Zhang. Fix log: (1)In order to avoid the conflict, I use the
 *         jobId to generate the submitJobDir. Because the jobId is unique so
 *         this problem can be solved. Fix time: 2011-12-04; Programmer:
 *         Zhigang Wang. Review comments: (2)There, the client must submit
 *         relative information about the job. There maybe some exceptions
 *         during this process. When exceptions occur, this job should not be
 *         executed and the relative submitJobDir must be cleanup. Review
 *         time: 2011-12-04; Reviewer: Hongxu Zhang. Fix log: (2)The process
 *         of submiting files has been surrounded by try-catch. The
 *         submitJobDir will be cleanup in the catch process. Fix time:
 *         2011-12-04; Programmer: Zhigang Wang.
 */
public RunningJob submitJobInternal(BSPJob job) {
    BSPJobID jobId = null;
    Path submitJobDir = null;
    try {
        jobId = jobSubmitClient.getNewJobId();
        submitJobDir = new Path(getSystemDir(), "submit_" + jobId.toString());
        Path submitJarFile = null;
        LOG.info("debug: job type is " + job.getJobType());
        if (Constants.USER_BC_BSP_JOB_TYPE_C.equals(job.getJobType())) {
            submitJarFile = new Path(submitJobDir, "jobC");
            LOG.info("debug:" + submitJarFile.toString());
        } else {
            LOG.info("debug: before  submitJarFile = new " + "Path(submitJobDir,job.jar);");
            submitJarFile = new Path(submitJobDir, "job.jar");
            LOG.info("debug:" + submitJarFile.toString());
        }
        Path submitJobFile = new Path(submitJobDir, "job.xml");
        Path submitSplitFile = new Path(submitJobDir, "job.split");
        // set this user's id in job configuration, so later job files can
        // be accessed using this user's id
        UnixUserGroupInformation ugi = getUGI(job.getConf());
        // Create a number of filenames in the BSPController's fs namespace
        FileSystem files = getFs();
        files.delete(submitJobDir, true);
        submitJobDir = files.makeQualified(submitJobDir);
        submitJobDir = new Path(submitJobDir.toUri().getPath());
        BSPFsPermission bspSysPerms = new BSPFspermissionImpl(2);
        FileSystem.mkdirs(files, submitJobDir, bspSysPerms.getFp());
        files.mkdirs(submitJobDir);
        short replication = (short) job.getInt("bsp.submit.replication", 10);
        String originalJarPath = null;
        LOG.info("debug: job type is " + job.getJobType());
        if (Constants.USER_BC_BSP_JOB_TYPE_C.equals(job.getJobType())) {
            LOG.info("debug: originalJarPath = job.getJobExe();" + job.getJobExe());
            originalJarPath = job.getJobExe();
            LOG.info("debug:" + submitJarFile.toString());
            job.setJobExe(submitJarFile.toString());
        } else {
            LOG.info("debug: jar");
            originalJarPath = job.getJar();
            job.setJar(submitJarFile.toString());
        }
        if (originalJarPath != null) {
            // copy jar to BSPController's fs
            // use jar name if job is not named.
            if ("".equals(job.getJobName())) {
                job.setJobName(new Path(originalJarPath).getName());
            }
            // job.setJar(submitJarFile.toString());
            fs.copyFromLocalFile(new Path(originalJarPath), submitJarFile);
            fs.setReplication(submitJarFile, replication);
            fs.setPermission(submitJarFile, new BSPFspermissionImpl(0).getFp());
        } else {
            LOG.warn("No job jar file set.  User classes may not be found. "
                    + "See BSPJob#setJar(String) or check Your jar file.");
        }
        // Set the user's name and working directory
        job.setUser(ugi.getUserName());
        if (ugi.getGroupNames().length > 0) {
            job.set("group.name", ugi.getGroupNames()[0]);
        }
        if (new BSPHdfsImpl().getWorkingDirectory() == null) {
            job.setWorkingDirectory(fs.getWorkingDirectory());
        }
        int maxClusterStaffs = jobSubmitClient.getClusterStatus(false).getMaxClusterStaffs();
        if (job.getNumPartition() == 0) {
            job.setNumPartition(maxClusterStaffs);
        }
        if (job.getNumPartition() > maxClusterStaffs) {
            job.setNumPartition(maxClusterStaffs);
        }
        job.setNumBspStaff(job.getNumPartition());
        int splitNum = 0;
        splitNum = writeSplits(job, submitSplitFile);
        if (splitNum > job.getNumPartition() && splitNum <= maxClusterStaffs) {
            job.setNumPartition(splitNum);
            job.setNumBspStaff(job.getNumPartition());
        }
        if (splitNum > maxClusterStaffs) {
            LOG.error("Sorry, the number of files is more than maxClusterStaffs:" + maxClusterStaffs);
            throw new IOException("Could not launch job");
        }
        job.set(Constants.USER_BC_BSP_JOB_SPLIT_FILE, submitSplitFile.toString());
        LOG.info("[Max Staff Number] " + maxClusterStaffs);
        LOG.info("The number of splits for the job is: " + splitNum);
        LOG.info("The number of staffs for the job is: " + job.getNumBspStaff());
        BSPFSDataOutputStream bspout = new BSPFSDataOutputStreamImpl(fs, submitJobFile,
                new BSPFspermissionImpl(0).getFp());
        try {
            job.writeXml(bspout.getOut());
        } finally {
            bspout.close();
        }
        // Now, actually submit the job (using the submit name)
        JobStatus status = jobSubmitClient.submitJob(jobId, submitJobFile.toString());
        if (status != null) {
            return new NetworkedJob(status);
        } else {
            throw new IOException("Could not launch job");
        }
    } catch (FileNotFoundException fnfE) {
        LOG.error("Exception has been catched in BSPJobClient--submitJobInternal !", fnfE);
        Fault f = new Fault(Fault.Type.SYSTEMSERVICE, Fault.Level.INDETERMINATE, "null", fnfE.toString());
        jobSubmitClient.recordFault(f);
        jobSubmitClient.recovery(jobId);
        try {
            FileSystem files = getFs();
            files.delete(submitJobDir, true);
        } catch (IOException e) {
            //LOG.error("Failed to cleanup the submitJobDir:" + submitJobDir);
            throw new RuntimeException("Failed to cleanup the submitJobDir", e);
        }
        return null;
    } catch (ClassNotFoundException cnfE) {
        LOG.error("Exception has been catched in BSPJobClient--submitJobInternal !", cnfE);
        Fault f = new Fault(Fault.Type.SYSTEMSERVICE, Fault.Level.WARNING, "null", cnfE.toString());
        jobSubmitClient.recordFault(f);
        jobSubmitClient.recovery(jobId);
        try {
            FileSystem files = getFs();
            files.delete(submitJobDir, true);
        } catch (IOException e) {
            //LOG.error("Failed to cleanup the submitJobDir:" + submitJobDir);
            throw new RuntimeException("Failed to cleanup the submitJobDir", e);
        }
        return null;
    } catch (InterruptedException iE) {
        LOG.error("Exception has been catched in BSPJobClient--submitJobInternal !", iE);
        Fault f = new Fault(Fault.Type.SYSTEMSERVICE, Fault.Level.CRITICAL, "null", iE.toString());
        jobSubmitClient.recordFault(f);
        jobSubmitClient.recovery(jobId);
        try {
            FileSystem files = getFs();
            files.delete(submitJobDir, true);
        } catch (IOException e) {
            //LOG.error("Failed to cleanup the submitJobDir:" + submitJobDir);
            throw new RuntimeException("Failed to cleanup the submitJobDir", e);
        }
        return null;
    } catch (Exception ioE) {
        LOG.error("Exception has been catched in BSPJobClient--submitJobInternal !", ioE);
        Fault f = new Fault(Fault.Type.DISK, Fault.Level.CRITICAL, "null", ioE.toString());
        jobSubmitClient.recordFault(f);
        jobSubmitClient.recovery(jobId);
        try {
            FileSystem files = getFs();
            files.delete(submitJobDir, true);
        } catch (IOException e) {
            //LOG.error("Failed to cleanup the submitJobDir:" + submitJobDir);
            throw new RuntimeException("Failed to cleanup the submitJobDir", e);
        }
        return null;
    }
}

From source file:com.cip.crane.agent.utils.TaskHelper.java

License:Open Source License

private void writeFileToHdfs(String srcFile, String destFile) throws IOException {
    File file = new File(srcFile);
    if (!file.exists()) {
        throw new FileNotFoundException("File not found");
    }//w w w. ja  v a 2  s  . co  m
    byte[] buf = new byte[BUFFER_SIZE];
    FileInputStream input = new FileInputStream(file);
    FileSystem fs = FileSystem.get(URI.create(destFile), conf);
    Path destPath = new Path(destFile);
    if (fs.exists(destPath)) {
        fs.delete(destPath, true);
    }
    FSDataOutputStream hdfsoutput = fs.create(destPath, (short) 2);
    int num = input.read(buf);
    while (num != (-1)) {// ?
        hdfsoutput.write(buf, 0, num);// ?
        hdfsoutput.flush();// ?
        num = input.read(buf);// ??
    }
    input.close();
    hdfsoutput.close();
    fs.close();
}

From source file:com.citic.zxyjs.zwlscx.mapreduce.lib.input.HFileOutputFormatBase.java

License:Apache License

public RecordWriter<ImmutableBytesWritable, KeyValue> getRecordWriter(final TaskAttemptContext context)
        throws IOException, InterruptedException {
    // Get the path of the temporary output file
    final Path outputPath = FileOutputFormat.getOutputPath(context);
    final Path outputdir = new FileOutputCommitter(outputPath, context).getWorkPath();
    final Path ignoreOutputPath = new Path(outputPath + "_ignore");

    final Configuration conf = context.getConfiguration();
    final FileSystem fs = outputdir.getFileSystem(conf);
    // These configs. are from hbase-*.xml
    final long maxsize = conf.getLong(HConstants.HREGION_MAX_FILESIZE, HConstants.DEFAULT_MAX_FILE_SIZE);
    // Invented config. Add to hbase-*.xml if other than default
    // compression.
    final String defaultCompression = conf.get("hfile.compression", Compression.Algorithm.NONE.getName());
    final boolean compactionExclude = conf.getBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
            false);//from   w  w  w .  j a v a 2 s  .c  o  m

    if (fs.exists(ignoreOutputPath)) {
        LOG.info("Deleted " + ignoreOutputPath.toString() + " success.");
        fs.delete(ignoreOutputPath, true);
    }

    // create a map from column family to the compression algorithm
    final Map<byte[], String> compressionMap = createFamilyCompressionMap(conf);
    final Map<byte[], String> bloomTypeMap = createFamilyBloomMap(conf);
    final Map<byte[], String> blockSizeMap = createFamilyBlockSizeMap(conf);

    String dataBlockEncodingStr = conf.get(DATABLOCK_ENCODING_CONF_KEY);
    final HFileDataBlockEncoder encoder;
    if (dataBlockEncodingStr == null) {
        encoder = NoOpDataBlockEncoder.INSTANCE;
    } else {
        try {
            encoder = new HFileDataBlockEncoderImpl(DataBlockEncoding.valueOf(dataBlockEncodingStr));
        } catch (IllegalArgumentException ex) {
            throw new RuntimeException("Invalid data block encoding type configured for the param "
                    + DATABLOCK_ENCODING_CONF_KEY + " : " + dataBlockEncodingStr);
        }
    }

    return new RecordWriter<ImmutableBytesWritable, KeyValue>() {
        // Map of families to writers and how much has been output on the
        // writer.
        private final Map<byte[], WriterLength> writers = new TreeMap<byte[], WriterLength>(
                Bytes.BYTES_COMPARATOR);
        private final FSDataOutputStream dos = fs.create(ignoreOutputPath);
        private byte[] previousRow = HConstants.EMPTY_BYTE_ARRAY;
        private final byte[] now = Bytes.toBytes(System.currentTimeMillis());
        private boolean rollRequested = false;

        public void write(ImmutableBytesWritable row, KeyValue kv) throws IOException {
            // null input == user explicitly wants to flush
            if (row == null && kv == null) {
                rollWriters();
                return;
            }

            byte[] rowKey = kv.getRow();
            long length = kv.getLength();
            byte[] family = kv.getFamily();

            if (ignore(kv)) {
                byte[] readBuf = rowKey;
                dos.write(readBuf, 0, readBuf.length);
                dos.write(Bytes.toBytes("\n"));
                return;
            }
            WriterLength wl = this.writers.get(family);

            // If this is a new column family, verify that the directory
            // exists
            if (wl == null) {
                Path path = null;
                path = new Path(outputdir, Bytes.toString(family));
                fs.mkdirs(path);
            }

            // If any of the HFiles for the column families has reached
            // maxsize, we need to roll all the writers
            if (wl != null && wl.written + length >= maxsize) {
                this.rollRequested = true;
            }

            // This can only happen once a row is finished though
            if (rollRequested && Bytes.compareTo(this.previousRow, rowKey) != 0) {
                rollWriters();
            }

            // create a new HLog writer, if necessary
            if (wl == null || wl.writer == null) {
                wl = getNewWriter(family, conf);
            }

            // we now have the proper HLog writer. full steam ahead
            kv.updateLatestStamp(this.now);
            wl.writer.append(kv);
            wl.written += length;

            // Copy the row so we know when a row transition.
            this.previousRow = rowKey;
        }

        private void rollWriters() throws IOException {
            for (WriterLength wl : this.writers.values()) {
                if (wl.writer != null) {
                    LOG.info("Writer=" + wl.writer.getPath()
                            + ((wl.written == 0) ? "" : ", wrote=" + wl.written));
                    close(wl.writer);
                }
                wl.writer = null;
                wl.written = 0;
            }
            this.rollRequested = false;
        }

        /*
         * Create a new StoreFile.Writer.
         * @param family
         * @return A WriterLength, containing a new StoreFile.Writer.
         * @throws IOException
         */
        private WriterLength getNewWriter(byte[] family, Configuration conf) throws IOException {
            WriterLength wl = new WriterLength();
            Path familydir = new Path(outputdir, Bytes.toString(family));
            String compression = compressionMap.get(family);
            compression = compression == null ? defaultCompression : compression;
            String bloomTypeStr = bloomTypeMap.get(family);
            BloomType bloomType = BloomType.NONE;
            if (bloomTypeStr != null) {
                bloomType = BloomType.valueOf(bloomTypeStr);
            }
            String blockSizeString = blockSizeMap.get(family);
            int blockSize = blockSizeString == null ? HConstants.DEFAULT_BLOCKSIZE
                    : Integer.parseInt(blockSizeString);
            Configuration tempConf = new Configuration(conf);
            tempConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);
            wl.writer = new StoreFile.WriterBuilder(conf, new CacheConfig(tempConf), fs, blockSize)
                    .withOutputDir(familydir)
                    .withCompression(AbstractHFileWriter.compressionByName(compression))
                    .withBloomType(bloomType).withComparator(KeyValue.COMPARATOR).withDataBlockEncoder(encoder)
                    .withChecksumType(HStore.getChecksumType(conf))
                    .withBytesPerChecksum(HStore.getBytesPerChecksum(conf)).build();

            this.writers.put(family, wl);
            return wl;
        }

        private void close(final StoreFile.Writer w) throws IOException {
            if (w != null) {
                w.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY, Bytes.toBytes(System.currentTimeMillis()));
                w.appendFileInfo(StoreFile.BULKLOAD_TASK_KEY,
                        Bytes.toBytes(context.getTaskAttemptID().toString()));
                w.appendFileInfo(StoreFile.MAJOR_COMPACTION_KEY, Bytes.toBytes(true));
                w.appendFileInfo(StoreFile.EXCLUDE_FROM_MINOR_COMPACTION_KEY, Bytes.toBytes(compactionExclude));
                w.appendTrackedTimestampsToMetadata();
                w.close();
            }
        }

        public void close(TaskAttemptContext c) throws IOException, InterruptedException {
            dos.flush();
            dos.close();
            for (WriterLength wl : this.writers.values()) {
                close(wl.writer);
            }
        }
    };
}

From source file:com.cloudera.castagna.logparser.mr.StatusCodesStats.java

License:Apache License

@Override
public int run(String[] args) throws Exception {
    if (args.length != 2) {
        System.err.printf("Usage: %s [generic options] <input> <output>\n", getClass().getName());
        ToolRunner.printGenericCommandUsage(System.err);
        return -1;
    }//from   w  w  w. ja  va2 s . co m

    Configuration configuration = getConf();
    boolean useCompression = configuration.getBoolean(Constants.OPTION_USE_COMPRESSION,
            Constants.OPTION_USE_COMPRESSION_DEFAULT);

    if (useCompression) {
        configuration.setBoolean("mapred.compress.map.output", true);
        configuration.set("mapred.output.compression.type", "BLOCK");
        configuration.set("mapred.map.output.compression.codec", "org.apache.hadoop.io.compress.GzipCodec");
    }

    boolean overrideOutput = configuration.getBoolean(Constants.OPTION_OVERWRITE_OUTPUT,
            Constants.OPTION_OVERWRITE_OUTPUT_DEFAULT);
    FileSystem fs = FileSystem.get(new Path(args[1]).toUri(), configuration);
    if (overrideOutput) {
        fs.delete(new Path(args[1]), true);
    }

    Job job = Job.getInstance(configuration);
    job.setJobName(Constants.STATUS_CODES_STATS);
    job.setJarByClass(getClass());

    FileInputFormat.addInputPath(job, new Path(args[0]));
    FileOutputFormat.setOutputPath(job, new Path(args[1]));

    job.setInputFormatClass(TextInputFormat.class);

    job.setMapperClass(StatusCodesStatsMapper.class);
    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(Text.class);

    job.setCombinerClass(StatusCodesStatsCombiner.class);

    job.setReducerClass(StatusCodesStatsReducer.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(Text.class);

    Utils.setReducers(job, configuration, log);

    job.setOutputFormatClass(TextOutputFormat.class);

    if (log.isDebugEnabled())
        Utils.log(job, log);

    return job.waitForCompletion(true) ? 0 : 1;
}

From source file:com.cloudera.castagna.logparser.mr.TranscodeLogs.java

License:Apache License

@Override
public int run(String[] args) throws Exception {
    if (args.length != 2) {
        System.err.printf("Usage: %s [generic options] <input> <output>\n", getClass().getName());
        ToolRunner.printGenericCommandUsage(System.err);
        return -1;
    }/*w  w w . j  a  v a  2s .  c o m*/

    Configuration configuration = getConf();

    boolean overrideOutput = configuration.getBoolean(Constants.OPTION_OVERWRITE_OUTPUT,
            Constants.OPTION_OVERWRITE_OUTPUT_DEFAULT);
    FileSystem fs = FileSystem.get(new Path(args[1]).toUri(), configuration);
    if (overrideOutput) {
        fs.delete(new Path(args[1]), true);
    }

    Job job = Job.getInstance(configuration);
    job.setJobName(Constants.STATUS_CODES_STATS);
    job.setJarByClass(getClass());

    FileInputFormat.addInputPath(job, new Path(args[0]));
    FileOutputFormat.setOutputPath(job, new Path(args[1]));

    job.setInputFormatClass(TextInputFormat.class);

    job.setMapperClass(TranscodeLogsMapper.class);
    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(Text.class);

    job.setNumReduceTasks(0);

    job.setOutputFormatClass(TextOutputFormat.class);

    if (log.isDebugEnabled())
        Utils.log(job, log);

    return job.waitForCompletion(true) ? 0 : 1;
}

From source file:com.cloudera.cdk.data.filesystem.FileSystemDatasetRepository.java

License:Apache License

@SuppressWarnings("deprecation")
@Override//from   ww  w.  j  a  v a 2 s. co m
public boolean delete(String name) {
    Preconditions.checkArgument(name != null, "Name can not be null");

    logger.debug("Deleting dataset:{}", name);

    final DatasetDescriptor descriptor;
    try {
        descriptor = metadataProvider.load(name);
    } catch (com.cloudera.cdk.data.NoSuchDatasetException ex) {
        return false;
    }

    boolean changed;
    try {
        // don't care about the return value here -- if it already doesn't exist
        // we still need to delete the data directory
        changed = metadataProvider.delete(name);
    } catch (MetadataProviderException ex) {
        throw new DatasetRepositoryException("Failed to delete descriptor for name:" + name, ex);
    }

    final Path dataLocation = new Path(descriptor.getLocation());
    final FileSystem fs = fsForPath(dataLocation, conf);

    try {
        if (fs.exists(dataLocation)) {
            if (fs.delete(dataLocation, true)) {
                changed = true;
            } else {
                throw new DatasetRepositoryException(
                        "Failed to delete dataset name:" + name + " location:" + dataLocation);
            }
        }
    } catch (IOException e) {
        throw new DatasetRepositoryException("Internal failure when removing location:" + dataLocation);
    }

    return changed;
}

From source file:com.cloudera.cdk.data.filesystem.FileSystemView.java

License:Apache License

private static boolean cleanlyDelete(FileSystem fs, Path root, Path dir) {
    try {//from www  . jav  a2  s  . c om
        boolean deleted = false;
        if (dir.isAbsolute()) {
            deleted = fs.delete(dir, true /* include any files */ );
        } else {
            // the path should be treated as relative to the root path
            Path absolute = new Path(root, dir);
            deleted = fs.delete(absolute, true /* include any files */ );
            // iterate up to the root, removing empty directories
            for (Path current = absolute.getParent(); !current.equals(root)
                    && !current.isRoot(); current = current.getParent()) {
                final FileStatus[] stats = fs.listStatus(current);
                if (stats == null || stats.length == 0) {
                    // dir is empty and should be removed
                    deleted = fs.delete(current, true) || deleted;
                } else {
                    // all parent directories will be non-empty
                    break;
                }
            }
        }
        return deleted;
    } catch (IOException ex) {
        throw new DatasetIOException("Could not cleanly delete path:" + dir, ex);
    }
}

From source file:com.cloudera.cdk.maven.plugins.DeployAppMojo.java

License:Apache License

public void execute() throws MojoExecutionException, MojoFailureException {
    try {/*w  ww.  j av  a2s.  c om*/
        Configuration conf = new Configuration();
        Path appPath = getAppPath();
        getLog().info("Deploying " + localApplicationFile + " to " + appPath);

        FileSystem destFileSystem = FileSystem.get(new URI(deployFileSystem), conf);
        if (destFileSystem.exists(appPath)) {
            if (!updateApplication) {
                throw new MojoExecutionException("Application already exists at " + appPath
                        + ". Use 'updateApplication' option to force deployment.");
            }
            boolean success = destFileSystem.delete(appPath, true);
            if (!success) {
                throw new MojoExecutionException("Error deleting existing application at " + appPath);
            }
        }
        boolean success = FileUtil.copy(localApplicationFile, destFileSystem, appPath, false, conf);
        if (!success) {
            throw new MojoExecutionException(
                    "Error creating parent directories " + "for deploying Oozie application");
        }
    } catch (URISyntaxException e) {
        throw new MojoExecutionException("Syntax error in 'deployFileSystem': " + deployFileSystem, e);
    } catch (IOException e) {
        throw new MojoExecutionException("Error deploying application", e);
    }
}

From source file:com.cloudera.crunch.impl.mr.MRPipeline.java

License:Open Source License

private void cleanup() {
    if (!outputTargets.isEmpty()) {
        LOG.warn("Not running cleanup while output targets remain");
        return;/*www  .  j  av a2  s.co m*/
    }
    try {
        FileSystem fs = FileSystem.get(conf);
        if (fs.exists(tempDirectory)) {
            fs.delete(tempDirectory, true);
        }
    } catch (IOException e) {
        LOG.info("Exception during cleanup", e);
    }
}