Example usage for org.apache.hadoop.fs FileSystem mkdirs

List of usage examples for org.apache.hadoop.fs FileSystem mkdirs

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem mkdirs.

Prototype

public boolean mkdirs(Path f) throws IOException 

Source Link

Document

Call #mkdirs(Path,FsPermission) with default permission.

Usage

From source file:com.asakusafw.runtime.directio.hadoop.HadoopDataSourceCore.java

License:Apache License

@Override
public void setupAttemptOutput(OutputAttemptContext context) throws IOException, InterruptedException {
    if (profile.isOutputStreaming() == false && isLocalAttemptOutput() == false) {
        LOG.warn(MessageFormat.format(
                "Streaming output is disabled but the local temporary directory ({1}) is not defined (id={0})",
                profile.getId(), HadoopDataSourceUtil.KEY_LOCAL_TEMPDIR));
    }//from w w w. j  a  va 2  s.co m
    if (isLocalAttemptOutput()) {
        FileSystem fs = profile.getLocalFileSystem();
        Path attempt = getLocalAttemptOutput(context);
        if (LOG.isDebugEnabled()) {
            LOG.debug(MessageFormat.format("Create local attempt area (id={0}, path={1})", //$NON-NLS-1$
                    profile.getId(), attempt));
        }
        fs.mkdirs(attempt);
    } else {
        FileSystem fs = profile.getFileSystem();
        Path attempt = getAttemptOutput(context);
        if (LOG.isDebugEnabled()) {
            LOG.debug(MessageFormat.format("Create attempt area (id={0}, path={1})", //$NON-NLS-1$
                    profile.getId(), attempt));
        }
        fs.mkdirs(attempt);
    }
}

From source file:com.asakusafw.runtime.directio.hadoop.HadoopDataSourceCore.java

License:Apache License

@Override
public void setupTransactionOutput(OutputTransactionContext context) throws IOException, InterruptedException {
    if (profile.isOutputStaging()) {
        FileSystem fs = profile.getFileSystem();
        Path staging = getStagingOutput(context);
        if (LOG.isDebugEnabled()) {
            LOG.debug(MessageFormat.format("Create staging area (id={0}, path={1})", //$NON-NLS-1$
                    profile.getId(), staging));
        }//w w w. j a  va 2s.  c  o m
        fs.mkdirs(staging);
    }
}

From source file:com.asakusafw.runtime.directio.hadoop.HadoopDataSourceUtil.java

License:Apache License

private static void move(Counter counter, FileSystem fromFs, Path from, FileSystem toFs, Path to,
        boolean fromLocal) throws IOException {
    if (counter == null) {
        throw new IllegalArgumentException("counter must not be null"); //$NON-NLS-1$
    }/* w  w  w .j  a v  a 2s  .  c o m*/
    if (fromFs == null) {
        throw new IllegalArgumentException("fromFs must not be null"); //$NON-NLS-1$
    }
    if (from == null) {
        throw new IllegalArgumentException("from must not be null"); //$NON-NLS-1$
    }
    if (toFs == null) {
        throw new IllegalArgumentException("toFs must not be null"); //$NON-NLS-1$
    }
    if (to == null) {
        throw new IllegalArgumentException("to must not be null"); //$NON-NLS-1$
    }
    if (fromLocal && isLocalPath(from) == false) {
        throw new IllegalArgumentException("from must be on local file system"); //$NON-NLS-1$
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug(MessageFormat.format("Start moving files (from={0}, to={1})", //$NON-NLS-1$
                from, to));
    }
    Path source = fromFs.makeQualified(from);
    Path target = toFs.makeQualified(to);
    List<Path> list = createFileListRelative(counter, fromFs, source);
    if (list.isEmpty()) {
        return;
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug(MessageFormat.format("Process moving files (from={0}, to={1}, count={2})", //$NON-NLS-1$
                from, to, list.size()));
    }
    Set<Path> directoryCreated = new HashSet<>();
    for (Path path : list) {
        Path sourceFile = new Path(source, path);
        Path targetFile = new Path(target, path);
        if (LOG.isTraceEnabled()) {
            FileStatus stat = fromFs.getFileStatus(sourceFile);
            LOG.trace(MessageFormat.format("Moving file (from={0}, to={1}, size={2})", //$NON-NLS-1$
                    sourceFile, targetFile, stat.getLen()));
        }
        try {
            FileStatus stat = toFs.getFileStatus(targetFile);
            if (LOG.isDebugEnabled()) {
                LOG.debug(MessageFormat.format("Deleting file: {0}", //$NON-NLS-1$
                        targetFile));
            }
            if (FileSystemCompatibility.isDirectory(stat)) {
                toFs.delete(targetFile, true);
            } else {
                toFs.delete(targetFile, false);
            }
        } catch (FileNotFoundException e) {
            Path targetParent = targetFile.getParent();
            if (directoryCreated.contains(targetParent) == false) {
                if (LOG.isDebugEnabled()) {
                    LOG.debug(MessageFormat.format("Creating directory: {0}", //$NON-NLS-1$
                            targetParent));
                }
                toFs.mkdirs(targetParent);
                directoryCreated.add(targetParent);
            }
        }
        counter.add(1);
        if (fromLocal) {
            toFs.moveFromLocalFile(sourceFile, targetFile);
        } else {
            boolean succeed = toFs.rename(sourceFile, targetFile);
            if (succeed == false) {
                throw new IOException(
                        MessageFormat.format("Failed to move file (from={0}, to={1})", sourceFile, targetFile));
            }
        }
        counter.add(1);
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug(MessageFormat.format("Finish moving files (from={0}, to={1}, count={2})", //$NON-NLS-1$
                from, to, list.size()));
    }
}

From source file:com.asakusafw.testdriver.testing.dsl.SimpleBatchAction.java

License:Apache License

@Override
public int run(String[] args) throws Exception {
    MacSnappyJavaWorkaround.install();/*from   w  ww  . java  2 s . c o  m*/
    FileSystem fs = FileSystem.get(getConf());
    fs.mkdirs(new Path(SimpleExporter.DIRECTORY));
    Path inputDir = new Path(SimpleImporter.DIRECTORY);
    int index = 0;
    for (FileStatus input : fs.listStatus(inputDir)) {
        Path output = new Path(SimpleExporter.OUTPUT_PREFIX + index++);
        process(input.getPath(), output);
    }
    extra();
    return 0;
}

From source file:com.awcoleman.StandaloneJava.AvroCombinerByBlock.java

License:Apache License

public AvroCombinerByBlock(String inDirStr, String outDirStr, String handleExisting) throws IOException {

    //handle both an output directory and an output filename (ending with .avro)
    String outputFilename = DEFAULTOUTPUTFILENAME;
    if (outDirStr.endsWith(".avro")) {
        isOutputNameSpecifiedAndAFile = true;
        //String[] outputParts = outDirStr.split(":?\\\\");
        String[] outputParts = outDirStr.split("/");

        outputFilename = outputParts[outputParts.length - 1];

        //remove outputFilename from outDirStr to get new outDirStr which is just directory (and trailing /)
        outDirStr = outDirStr.replaceAll(Pattern.quote(outputFilename), "");
        outDirStr = outDirStr.substring(0, outDirStr.length() - (outDirStr.endsWith("/") ? 1 : 0));
    }//from w  w  w.j  av a 2s . c  om

    //Get block size - not needed
    //long hdfsBlockSize = getBlockSize();
    //System.out.println("HDFS FS block size: "+hdfsBlockSize);

    //Get list of input files
    ArrayList<FileStatus> inputFileList = new ArrayList<FileStatus>();

    Configuration conf = new Configuration();
    conf.addResource(new Path("/etc/hadoop/conf/core-site.xml"));
    conf.set("dfs.replication", "1"); //see http://stackoverflow.com/questions/24548699/how-to-append-to-an-hdfs-file-on-an-extremely-small-cluster-3-nodes-or-less

    FileSystem hdfs = null;
    try {
        hdfs = FileSystem.get(conf);
    } catch (java.io.IOException ioe) {
        System.out.println("Error opening HDFS filesystem. Exiting. Error message: " + ioe.getMessage());
        System.exit(1);
    }
    if (hdfs.getStatus() == null) {
        System.out.println("Unable to contact HDFS filesystem. Exiting.");
        System.exit(1);
    }

    //Check if input and output dirs exist
    Path inDir = new Path(inDirStr);
    Path outDir = new Path(outDirStr);
    if (!(hdfs.exists(inDir) || hdfs.isDirectory(inDir))) {
        System.out.println("Input directory ( " + inDirStr + " ) not found or is not directory. Exiting.");
        System.exit(1);
    }

    if (!(hdfs.exists(outDir) || hdfs.isDirectory(outDir))) {
        if (hdfs.exists(outDir)) { //outDir exists and is a symlink or file, must die
            System.out.println("Requested output directory name ( " + outDirStr
                    + " ) exists but is not a directory. Exiting.");
            System.exit(1);
        } else {
            hdfs.mkdirs(outDir);
        }
    }

    RemoteIterator<LocatedFileStatus> fileStatusListIterator = hdfs.listFiles(inDir, true);
    while (fileStatusListIterator.hasNext()) {
        LocatedFileStatus fileStatus = fileStatusListIterator.next();

        if (fileStatus.isFile() && !fileStatus.getPath().getName().equals("_SUCCESS")) {
            inputFileList.add((FileStatus) fileStatus);
        }
    }

    if (inputFileList.size() <= 1 && !isOutputNameSpecifiedAndAFile) { //If an output file is specified assume we just want a rename.
        System.out.println("Only one or zero files found in input directory ( " + inDirStr + " ). Exiting.");
        System.exit(1);
    }

    //Get Schema and Compression Codec from seed file since we need it for the writer
    Path firstFile = inputFileList.get(0).getPath();
    FsInput fsin = new FsInput(firstFile, conf);
    DataFileReader<Object> dfrFirstFile = new DataFileReader<Object>(fsin, new GenericDatumReader<Object>());
    Schema fileSchema = dfrFirstFile.getSchema();
    String compCodecName = dfrFirstFile.getMetaString("avro.codec");
    //compCodecName should be null, deflate, snappy, or bzip2
    if (compCodecName == null) {
        compCodecName = "deflate"; //set to deflate even though original is no compression
    }
    dfrFirstFile.close();

    //Create Empty HDFS file in output dir
    String seedFileStr = outDirStr + "/" + outputFilename;
    Path seedFile = new Path(seedFileStr);
    FSDataOutputStream hdfsdos = null;
    try {
        hdfsdos = hdfs.create(seedFile, false);
    } catch (org.apache.hadoop.fs.FileAlreadyExistsException faee) {
        if (handleExisting.equals("overwrite")) {
            hdfs.delete(seedFile, false);
            hdfsdos = hdfs.create(seedFile, false);
        } else if (handleExisting.equals("append")) {
            hdfsdos = hdfs.append(seedFile);
        } else {
            System.out
                    .println("File " + seedFileStr + " exists and will not overwrite. handleExisting is set to "
                            + handleExisting + ". Exiting.");
            System.exit(1);
        }
    }
    if (hdfsdos == null) {
        System.out.println("Unable to create or write to output file ( " + seedFileStr
                + " ). handleExisting is set to " + handleExisting + ". Exiting.");
        System.exit(1);
    }

    //Append other files
    GenericDatumWriter gdw = new GenericDatumWriter(fileSchema);
    DataFileWriter dfwBase = new DataFileWriter(gdw);
    //Set compression to that found in the first file
    dfwBase.setCodec(CodecFactory.fromString(compCodecName));

    DataFileWriter dfw = dfwBase.create(fileSchema, hdfsdos);
    for (FileStatus thisFileStatus : inputFileList) {

        //_SUCCESS files are 0 bytes
        if (thisFileStatus.getLen() == 0) {
            continue;
        }

        FsInput fsin1 = new FsInput(thisFileStatus.getPath(), conf);
        DataFileReader dfr = new DataFileReader<Object>(fsin1, new GenericDatumReader<Object>());

        dfw.appendAllFrom(dfr, false);

        dfr.close();
    }

    dfw.close();
    dfwBase.close();

}

From source file:com.awcoleman.StandaloneJava.AvroCombinerByRecord.java

License:Apache License

public AvroCombinerByRecord(String inDirStr, String outDirStr) throws IOException {

    //Get list of input files
    ArrayList<FileStatus> inputFileList = new ArrayList<FileStatus>();

    Configuration conf = new Configuration();
    conf.addResource(new Path("/etc/hadoop/conf/core-site.xml"));

    FileSystem hdfs = FileSystem.get(conf);

    //Check if input and output dirs exist
    Path inDir = new Path(inDirStr);
    Path outDir = new Path(outDirStr);
    if (!(hdfs.exists(inDir) || hdfs.isDirectory(inDir))) {
        System.out.println("Input directory ( " + inDirStr + " ) not found or is not directory. Exiting.");
        System.exit(1);//from   www  .  j  a  v  a 2  s .co m
    }

    if (!(hdfs.exists(outDir) || hdfs.isDirectory(outDir))) {
        if (hdfs.exists(outDir)) { //outDir exists and is a symlink or file, must die
            System.out.println("Requested output directory name ( " + outDirStr
                    + " ) exists but is not a directory. Exiting.");
            System.exit(1);
        } else {
            hdfs.mkdirs(outDir);
        }
    }

    RemoteIterator<LocatedFileStatus> fileStatusListIterator = hdfs.listFiles(inDir, true);
    while (fileStatusListIterator.hasNext()) {
        LocatedFileStatus fileStatus = fileStatusListIterator.next();

        if (fileStatus.isFile()) {
            inputFileList.add((FileStatus) fileStatus);
        }
    }

    if (inputFileList.size() <= 1) {
        System.out.println("Only one or zero files found in input directory ( " + inDirStr + " ). Exiting.");
        System.exit(1);
    }

    //Get Schema and Compression Codec from seed file since we need it for the writer
    Path firstFile = inputFileList.get(0).getPath();
    FsInput fsin = new FsInput(firstFile, conf);
    DataFileReader<Object> dfrFirstFile = new DataFileReader<Object>(fsin, new GenericDatumReader<Object>());
    Schema fileSchema = dfrFirstFile.getSchema();
    String compCodecName = dfrFirstFile.getMetaString("avro.codec");
    dfrFirstFile.close();

    //Create Empty HDFS file in output dir
    Path seedFile = new Path(outDirStr + "/combinedByRecord.avro");
    FSDataOutputStream hdfsdos = hdfs.create(seedFile, false);

    //Append other files
    GenericDatumWriter gdw = new GenericDatumWriter(fileSchema);
    DataFileWriter dfwBase = new DataFileWriter(gdw);
    //Set compression to that found in the first file
    dfwBase.setCodec(CodecFactory.fromString(compCodecName));

    DataFileWriter dfw = dfwBase.create(fileSchema, hdfsdos);

    for (FileStatus thisFileStatus : inputFileList) {

        DataFileStream<Object> avroStream = null;
        FSDataInputStream inStream = hdfs.open(thisFileStatus.getPath());
        GenericDatumReader<Object> reader = new GenericDatumReader<Object>();
        avroStream = new DataFileStream<Object>(inStream, reader);

        long recordCounter = 0;
        while (avroStream.hasNext()) {
            dfw.append(avroStream.next());

            recordCounter++;
        }
        avroStream.close();
        inStream.close();

        System.out.println("Appended " + recordCounter + " records from " + thisFileStatus.getPath().getName()
                + " to " + seedFile.getName());
    }

    dfw.close();
    dfwBase.close();
}

From source file:com.benchmark.mapred.dancing.DistributedPentomino.java

License:Apache License

/**
 * Create the input file with all of the possible combinations of the 
 * given depth.//from   w  w w  .jav a2  s .  c  o m
 * @param fs the filesystem to write into
 * @param dir the directory to write the input file into
 * @param pent the puzzle 
 * @param depth the depth to explore when generating prefixes
 */
private static void createInputDirectory(FileSystem fs, Path dir, Pentomino pent, int depth)
        throws IOException {
    fs.mkdirs(dir);
    List<int[]> splits = pent.getSplits(depth);
    PrintStream file = new PrintStream(new BufferedOutputStream(fs.create(new Path(dir, "part1")), 64 * 1024));
    for (int[] prefix : splits) {
        for (int i = 0; i < prefix.length; ++i) {
            if (i != 0) {
                file.print(',');
            }
            file.print(prefix[i]);
        }
        file.print('\n');
    }
    file.close();
}

From source file:com.benchmark.mapred.PiEstimator.java

License:Apache License

/**
 * Run a map/reduce job for estimating Pi.
 *
 * @return the estimated value of Pi/* ww  w.jav a2 s  . c o m*/
 */
public static BigDecimal estimate(int numMaps, long numPoints, JobConf jobConf) throws IOException {
    //setup job conf
    jobConf.setJobName(PiEstimator.class.getSimpleName());

    jobConf.setInputFormat(SequenceFileInputFormat.class);

    jobConf.setOutputKeyClass(BooleanWritable.class);
    jobConf.setOutputValueClass(LongWritable.class);
    jobConf.setOutputFormat(SequenceFileOutputFormat.class);

    jobConf.setMapperClass(PiMapper.class);
    jobConf.setNumMapTasks(numMaps);

    jobConf.setReducerClass(PiReducer.class);
    jobConf.setNumReduceTasks(1);

    // turn off speculative execution, because DFS doesn't handle
    // multiple writers to the same file.
    jobConf.setSpeculativeExecution(false);

    //setup input/output directories
    //final Path inDir = new Path(TMP_DIR, "in");
    final Path inDir = new Path("/home/hadoop1/tmp_dir", "in");
    System.out.println("inDir =" + inDir.toString());
    //final Path outDir = new Path(TMP_DIR, "out");
    final Path outDir = new Path("/home/hadoop1/tmp_dir", "out");
    System.out.println("outDir =" + outDir.toString());
    FileInputFormat.setInputPaths(jobConf, inDir);
    FileOutputFormat.setOutputPath(jobConf, outDir);

    final FileSystem fs = FileSystem.get(jobConf);
    if (fs.exists(TMP_DIR)) {
        throw new IOException(
                "Tmp directory " + fs.makeQualified(TMP_DIR) + " already exists.  Please remove it first.");
    }
    if (!fs.mkdirs(inDir)) {
        throw new IOException("Cannot create input directory " + inDir);
    }

    try {
        //generate an input file for each map task
        for (int i = 0; i < numMaps; ++i) {
            final Path file = new Path(inDir, "part" + i);
            final LongWritable offset = new LongWritable(i * numPoints);
            final LongWritable size = new LongWritable(numPoints);
            final SequenceFile.Writer writer = SequenceFile.createWriter(fs, jobConf, file, LongWritable.class,
                    LongWritable.class, CompressionType.NONE);
            try {
                writer.append(offset, size);
            } finally {
                writer.close();
            }
            System.out.println("Wrote input for Map #" + i);
        }

        //start a map/reduce job
        System.out.println("Starting Job");
        final long startTime = System.currentTimeMillis();
        JobClient.runJob(jobConf);
        final double duration = (System.currentTimeMillis() - startTime) / 1000.0;
        System.out.println("Job Finished in " + duration + " seconds");

        //read outputs
        Path inFile = new Path(outDir, "reduce-out");
        LongWritable numInside = new LongWritable();
        LongWritable numOutside = new LongWritable();
        SequenceFile.Reader reader = new SequenceFile.Reader(fs, inFile, jobConf);
        try {
            reader.next(numInside, numOutside);
        } finally {
            reader.close();
        }

        //compute estimated value
        return BigDecimal.valueOf(4).setScale(20).multiply(BigDecimal.valueOf(numInside.get()))
                .divide(BigDecimal.valueOf(numMaps)).divide(BigDecimal.valueOf(numPoints));
    } finally {
        fs.delete(TMP_DIR, true);
    }
}

From source file:com.bigdog.hadoop.hdfs.HDFS_Test.java

public void mkdir(String dir) throws IOException {
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(conf);

    fs.mkdirs(new Path(dir));

    fs.close();/*from  w ww.  j  a  va2s  .  c o m*/
}

From source file:com.blackberry.logdriver.LockedFs.java

License:Apache License

@SuppressWarnings("deprecation")
public void move(Configuration conf, String[] from, String to) throws IOException {
    FileSystem fs = FileSystem.get(conf);

    List<FileStatus> fromList = new ArrayList<FileStatus>();
    for (String s : from) {
        FileStatus[] statuses = fs.globStatus(new Path(s));
        if (statuses == null) {
            continue;
        }/*from w w w.  j  a v  a  2 s .c  o  m*/
        for (FileStatus status : statuses) {
            fromList.add(status);
        }
    }

    Path toPath = new Path(to);
    Boolean toExists = fs.exists(toPath);
    FileStatus toFileStatus = null;
    if (toExists) {
        toFileStatus = fs.getFileStatus(toPath);
    }

    // If there is no from, that's a problem.
    if (fromList.isEmpty()) {
        throw new IOException("No input files found");
    }

    // If the to exists, and is a file, that's a problem too.
    if (toExists && !toFileStatus.isDir()) {
        throw new IOException("Destination file exists:" + to);
    }

    // If the destination exists, and is a directory, then ensure that none of
    // the from list names will clash with existing contents of the directory.
    if (toExists && toFileStatus.isDir()) {
        for (FileStatus fromStatus : fromList) {
            String name = fromStatus.getPath().getName();
            if (fs.exists(new Path(toPath, name))) {
                throw new IOException("Destination file exists:" + to + "/" + name);
            }
        }
    }

    // If the destination doesn't exist, but it ends with a slash, then create
    // it as a directory.
    if (!toExists && to.endsWith("/")) {
        fs.mkdirs(toPath);
        toFileStatus = fs.getFileStatus(toPath);
        toExists = true;
    }

    // If the destination doesn't exist, and there is more than one 'from', then
    // create a directory.
    if (!toExists && fromList.size() > 1) {
        fs.mkdirs(toPath);
        toFileStatus = fs.getFileStatus(toPath);
    }

    // If there was only one from, then just rename it to to
    if (fromList.size() == 1) {
        fs.mkdirs(toPath.getParent());
        fs.rename(fromList.get(0).getPath(), toPath);
    }

    // If there was more than one from, then for each file in the from list,
    // move it to the to directory.
    if (fromList.size() > 1) {
        for (FileStatus fromStatus : fromList) {
            String name = fromStatus.getPath().getName();
            fs.rename(fromStatus.getPath(), new Path(toPath, name));
        }
    }
}