Example usage for org.apache.hadoop.fs FileSystem delete

List of usage examples for org.apache.hadoop.fs FileSystem delete

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem delete.

Prototype

public abstract boolean delete(Path f, boolean recursive) throws IOException;

Source Link

Document

Delete a file.

Usage

From source file:com.inmobi.messaging.consumer.databus.TestAbstractDatabusConsumer.java

License:Apache License

public void cleanup() throws IOException {
    testConsumer.close();/*from w  w  w  .j a v a  2 s  .c  o  m*/
    for (Path p : rootDirs) {
        FileSystem fs = p.getFileSystem(conf);
        LOG.debug("Cleaning up the dir: " + p);
        fs.delete(p, true);
    }
    FileSystem lfs = new Path(chkpointPathPrefix).getFileSystem(conf);
    lfs.delete(new Path(chkpointPathPrefix).getParent(), true);
}

From source file:com.inmobi.messaging.consumer.databus.TestConsumerPartitionMinList.java

License:Apache License

@AfterTest
public void cleanUp() throws IOException {
    testConsumer.close();/*from  w  w w .  ja  v  a  2 s  .  c o m*/
    FileSystem fs = FileSystem.getLocal(new Configuration());
    fs.delete(new Path(chkpointPath).getParent(), true);
}

From source file:com.inmobi.messaging.consumer.hadoop.TestAbstractHadoopConsumer.java

License:Apache License

public void cleanup() throws IOException {
    FileSystem lfs = FileSystem.getLocal(conf);
    for (Path rootDir : rootDirs) {
        LOG.debug("Cleaning up the dir: " + rootDir.getParent());
        lfs.delete(rootDir.getParent(), true);
    }//from  w ww .  j av  a2  s . co m
    lfs.delete(new Path(chkpointPathPrefix).getParent(), true);
}

From source file:com.inmobi.messaging.consumer.hadoop.TestConsumerPartitionRetention.java

License:Apache License

@AfterTest
public void cleanup() throws Exception {
    FileSystem lfs = FileSystem.getLocal(conf);
    for (Path rootDir : rootDirs) {
        LOG.debug("Cleaning Up the dir: " + rootDir.getParent());
        lfs.delete(rootDir.getParent(), true);
    }/*from  w ww .  ja  va2s . c o m*/
    lfs.delete(new Path(chkpointPath).getParent(), true);
}

From source file:com.inmobi.messaging.consumer.hadoop.TestConsumerPartitionStartTime.java

License:Apache License

@AfterTest
public void cleanup() throws Exception {
    FileSystem lfs = FileSystem.getLocal(conf);
    for (Path rootDir : rootDirs) {
        LOG.debug("Cleaning up the dir: " + rootDir.getParent());
        lfs.delete(rootDir.getParent(), true);
    }//from   w  ww  .j av  a  2 s . c  om
    lfs.delete(new Path(chkpointPath).getParent(), true);
}

From source file:com.inmobi.messaging.consumer.hadoop.TestHadoopConsumerWithPartitionList.java

License:Apache License

@AfterTest
public void cleanup() throws IOException {
    FileSystem lfs = FileSystem.getLocal(conf);
    for (Path rootDir : rootDirs) {
        LOG.debug("Cleaning up the dir: " + rootDir);
        lfs.delete(rootDir.getParent(), true);
    }//from  ww w . j av  a  2  s .c o  m
    // delete checkpoint dir
    lfs.delete(new Path(ck1).getParent(), true);
}

From source file:com.inmobi.messaging.consumer.util.HadoopUtil.java

License:Apache License

public static void setUpHadoopFiles(Path streamDirPrefix, Configuration conf, String[] files,
        String[] suffixDirs, Path[] finalFiles, boolean alternateEmptyFiles, Date minuteDirTimeStamp, int index,
        int startIndex) throws Exception {
    FileSystem fs = streamDirPrefix.getFileSystem(conf);
    Path rootDir = streamDirPrefix.getParent();
    Path tmpDataDir = new Path(rootDir, "data");
    boolean emptyFile = false;
    // setup data dirs
    if (files != null) {
        int i = startIndex;
        int j = index;
        for (String file : files) {
            if (alternateEmptyFiles && emptyFile) {
                MessageUtil.createEmptySequenceFile(file, fs, tmpDataDir, conf);
                emptyFile = false;//from ww w . j  av a  2s. c  o  m
            } else {
                MessageUtil.createMessageSequenceFile(file, fs, tmpDataDir, i, conf);
                emptyFile = true;
                i += 100;
            }
            Path srcPath = new Path(tmpDataDir, file);
            Date commitTime = getCommitDateForFile(file, minuteDirTimeStamp);
            TestUtil.publishMissingPaths(fs, streamDirPrefix, lastCommitTime, commitTime);
            lastCommitTime = commitTime;
            Path targetDateDir = getTargetDateDir(streamDirPrefix, commitTime);
            List<Path> targetDirs = new ArrayList<Path>();
            if (suffixDirs != null) {
                for (String suffixDir : suffixDirs) {
                    targetDirs.add(new Path(targetDateDir, suffixDir));
                }
            } else {
                targetDirs.add(targetDateDir);
            }
            for (Path targetDir : targetDirs) {
                fs.mkdirs(targetDir);
                Path targetPath = new Path(targetDir, file);
                fs.copyFromLocalFile(srcPath, targetPath);
                LOG.info("Copied " + srcPath + " to " + targetPath);
                if (finalFiles != null) {
                    finalFiles[j] = targetPath;
                    j++;
                }
                Thread.sleep(1000);
            }
            fs.delete(srcPath, true);
        }
        TestUtil.publishLastPath(fs, streamDirPrefix, lastCommitTime);
    }
}

From source file:com.inmobi.messaging.consumer.util.HadoopUtil.java

License:Apache License

public static void setupHadoopCluster(Configuration conf, String[] files, String[] suffixDirs,
        Path[] finalFiles, Path finalDir, boolean withEmptyFiles, boolean createFilesInNextHour)
        throws Exception {
    FileSystem fs = finalDir.getFileSystem(conf);

    Path rootDir = finalDir.getParent();
    fs.delete(rootDir, true);
    Path tmpDataDir = new Path(rootDir, "data");
    fs.mkdirs(tmpDataDir);/*ww  w .jav a 2 s.c o m*/

    if (!createFilesInNextHour) {
        setUpHadoopFiles(finalDir, conf, files, suffixDirs, finalFiles, withEmptyFiles, null, 0, 0);
    } else {
        // start from 1 hour back as we need files in two diff hours.
        Calendar cal = Calendar.getInstance();
        cal.setTime(startCommitTime);
        cal.add(Calendar.HOUR_OF_DAY, -1);

        setUpHadoopFiles(finalDir, conf, files, suffixDirs, finalFiles, withEmptyFiles, cal.getTime(), 0, 0);
        // go to next hour
        cal.add(Calendar.HOUR_OF_DAY, 1);
        int index = files.length;
        // find number of non empty(i.e. data) files in 1 hour
        int numberOfNonEmptyFiles = withEmptyFiles ? (int) Math.ceil(index / 2.0) : index;
        int startIndex = numberOfNonEmptyFiles * 100;
        setUpHadoopFiles(finalDir, conf, files, suffixDirs, finalFiles, withEmptyFiles, cal.getTime(), index,
                startIndex);
    }
}

From source file:com.inmobi.messaging.consumer.util.TestUtil.java

License:Apache License

public static Path moveCollectorFile(FileSystem fs, String streamName, String collectorName,
        ClusterUtil cluster, Path collectorDir, String collectorfileName, StreamType streamType)
        throws Exception {
    Path targetFile = getTargetPath(fs, streamName, collectorName, cluster, collectorfileName, streamType);
    Path srcPath = copyCollectorFile(targetFile, cluster, collectorDir, collectorfileName);
    fs.delete(srcPath, true);
    return targetFile;
}

From source file:com.inmobi.messaging.consumer.util.TestUtil.java

License:Apache License

private static ClusterUtil setupCluster(String className, String testStream, PartitionId pid, String hdfsUrl,
        String[] collectorFiles, String[] emptyFiles, Path[] databusFiles, int numFilesToMoveToStreamLocal,
        int numFilesToMoveToStreams, String testRootDir) throws Exception {
    Set<String> sourceNames = new HashSet<String>();
    sourceNames.add(testStream);//from  w  w  w.  j a  v a2  s.  co m
    Map<String, String> clusterConf = new HashMap<String, String>();
    clusterConf.put("hdfsurl", hdfsUrl);
    clusterConf.put("jturl", "local");
    clusterConf.put("name", pid.getCluster());
    clusterConf.put("jobqueue", "default");

    ClusterUtil cluster = new ClusterUtil(clusterConf, new Path(testRootDir, className).toString(),
            sourceNames);

    // setup stream and collector dirs
    FileSystem fs = FileSystem.get(cluster.getHadoopConf());
    Path collectorDir = getCollectorDir(cluster, testStream, pid.getCollector());
    fs.delete(collectorDir, true);
    fs.delete(new Path(cluster.getLocalFinalDestDirRoot()), true);
    fs.delete(new Path(cluster.getFinalDestDirRoot()), true);
    fs.mkdirs(collectorDir);

    setUpFiles(cluster, pid.getCollector(), collectorFiles, emptyFiles, databusFiles,
            numFilesToMoveToStreamLocal, numFilesToMoveToStreams);

    return cluster;
}