Example usage for org.apache.hadoop.fs FileSystem get

List of usage examples for org.apache.hadoop.fs FileSystem get

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem get.

Prototype

public static FileSystem get(URI uri, Configuration conf) throws IOException 

Source Link

Document

Get a FileSystem for this URI's scheme and authority.

Usage

From source file:cmd.stats.java

License:Apache License

@Override
public int run(String[] args) throws Exception {
    if (args.length != 2) {
        System.err.printf("Usage: %s [generic options] <input> <output>\n", getClass().getName());
        ToolRunner.printGenericCommandUsage(System.err);
        return -1;
    }//w ww  . java2  s  . c  o  m

    Configuration configuration = getConf();
    boolean overrideOutput = configuration.getBoolean(Constants.OPTION_OVERRIDE_OUTPUT,
            Constants.OPTION_OVERRIDE_OUTPUT_DEFAULT);

    FileSystem fs = FileSystem.get(new Path(args[1]).toUri(), configuration);
    if (overrideOutput) {
        fs.delete(new Path(args[1]), true);
    }

    Tool stats = new StatsDriver(configuration);
    stats.run(new String[] { args[0], args[1] });

    return 0;
}

From source file:cmd.tdbloader4.java

License:Apache License

@Override
public int run(String[] args) throws Exception {
    if (args.length != 2) {
        System.err.printf("Usage: %s [generic options] <input> <output>\n", getClass().getName());
        ToolRunner.printGenericCommandUsage(System.err);
        return -1;
    }//from   ww  w . ja va2  s. c o  m

    Configuration configuration = getConf();
    configuration.set(Constants.RUN_ID, String.valueOf(System.currentTimeMillis()));
    boolean overrideOutput = configuration.getBoolean(Constants.OPTION_OVERRIDE_OUTPUT,
            Constants.OPTION_OVERRIDE_OUTPUT_DEFAULT);
    boolean copyToLocal = configuration.getBoolean(Constants.OPTION_COPY_TO_LOCAL,
            Constants.OPTION_COPY_TO_LOCAL_DEFAULT);
    boolean verify = configuration.getBoolean(Constants.OPTION_VERIFY, Constants.OPTION_VERIFY_DEFAULT);
    boolean runLocal = configuration.getBoolean(Constants.OPTION_RUN_LOCAL, Constants.OPTION_RUN_LOCAL_DEFAULT);

    FileSystem fs = FileSystem.get(new Path(args[1]).toUri(), configuration);
    if (overrideOutput) {
        fs.delete(new Path(args[1]), true);
        fs.delete(new Path(args[1] + OUTPUT_PATH_POSTFIX_1), true);
        fs.delete(new Path(args[1] + OUTPUT_PATH_POSTFIX_2), true);
        fs.delete(new Path(args[1] + OUTPUT_PATH_POSTFIX_3), true);
        fs.delete(new Path(args[1] + OUTPUT_PATH_POSTFIX_4), true);
    }

    if ((copyToLocal) || (runLocal)) {
        File path = new File(args[1]);
        path.mkdirs();
    }

    Tool first = new FirstDriver(configuration);
    int status = first.run(new String[] { args[0], args[1] + OUTPUT_PATH_POSTFIX_1 });
    if (status != 0) {
        return status;
    }

    createOffsetsFile(fs, args[1] + OUTPUT_PATH_POSTFIX_1, args[1] + OUTPUT_PATH_POSTFIX_1);
    Path offsets = new Path(args[1] + OUTPUT_PATH_POSTFIX_1, Constants.OFFSETS_FILENAME);
    DistributedCache.addCacheFile(offsets.toUri(), configuration);

    Tool second = new SecondDriver(configuration);
    status = second.run(new String[] { args[0], args[1] + OUTPUT_PATH_POSTFIX_2 });
    if (status != 0) {
        return status;
    }

    Tool third = new ThirdDriver(configuration);
    status = third.run(new String[] { args[1] + OUTPUT_PATH_POSTFIX_2, args[1] + OUTPUT_PATH_POSTFIX_3 });
    if (status != 0) {
        return status;
    }

    Tool fourth = new FourthDriver(configuration);
    status = fourth.run(new String[] { args[1] + OUTPUT_PATH_POSTFIX_3, args[1] + OUTPUT_PATH_POSTFIX_4 });
    if (status != 0) {
        return status;
    }

    if (copyToLocal) {
        Tool download = new download(configuration);
        download.run(
                new String[] { args[1] + OUTPUT_PATH_POSTFIX_2, args[1] + OUTPUT_PATH_POSTFIX_4, args[1] });
    }

    if (verify) {
        DatasetGraphTDB dsgMem = load(args[0]);
        Location location = new Location(args[1]);

        if (!copyToLocal) {
            // TODO: this is a sort of a cheat and it could go away (if it turns out to be too slow)!
            download.fixNodeTable2(location);
        }

        DatasetGraphTDB dsgDisk = SetupTDB.buildDataset(location);
        boolean isomorphic = isomorphic(dsgMem, dsgDisk);
        System.out.println("> " + isomorphic);
    }

    return status;
}

From source file:cn.lhfei.hadoop.ch03.FileCopyWithProgress.java

License:Apache License

public static void main(String[] args) {

    String localSrc = args[0];/*from ww  w  .ja  v a2s.c  om*/
    String dst = args[1];
    FileSystem fs = null;
    InputStream in = null;
    OutputStream out = null;

    try {
        Configuration conf = new Configuration();
        fs = FileSystem.get(URI.create(localSrc), conf);
        in = new BufferedInputStream(new FileInputStream(localSrc));
        out = fs.create(new Path(dst), new Progressable() {
            @Override
            public void progress() {
                log.info("... ...");
            }
        });

        IOUtils.copyBytes(in, out, 4096, true);

    } catch (FileNotFoundException e) {
        // e.printStackTrace();
        log.error(e.getMessage(), e);
    } catch (IOException e) {
        // e.printStackTrace();
        log.error(e.getMessage(), e);
    } finally {
        IOUtils.closeStream(in);
        IOUtils.closeStream(out);
    }
}

From source file:cn.lhfei.hadoop.ch03.FileSystemCat.java

License:Apache License

public static void main(String[] args) {

    Logger log = LoggerFactory.getLogger(FileSystemCat.class);

    String uri = args[0];//  www. j av a2s .c om
    Configuration conf = new Configuration();

    FileSystem fs = null;
    InputStream in = null;

    try {
        fs = FileSystem.get(URI.create(uri), conf);
        in = fs.open(new Path(uri));
        IOUtils.copyBytes(in, System.out, 4096, false);

    } catch (IOException e) {
        log.error(e.getMessage(), e);
    } finally {
        IOUtils.closeStream(in);
    }
}

From source file:cn.lhfei.hadoop.ch03.FileSystemDoubleCat.java

License:Apache License

public static void main(String[] args) {

    String uri = args[0];//from w  w  w.  j  av  a 2  s . c o  m
    FSDataInputStream in = null;
    FileSystem fs = null;
    Configuration conf = new Configuration();

    try {
        fs = FileSystem.get(URI.create(uri), conf);
        in = fs.open(new Path(uri));

        IOUtils.copyBytes(in, System.out, 4096, false);
        in.seek(0l); // go back to the start of the file

        IOUtils.copyBytes(in, System.out, 4096, false);

    } catch (IOException e) {
        e.printStackTrace();
    }
}

From source file:cn.lhfei.hadoop.ch03.ListStatus.java

License:Apache License

public static void main(String[] args) {

    String uri = args[0];/*from  w  w w  . j a v  a2s . c  om*/
    Configuration conf = new Configuration();
    FileSystem fs = null;

    try {
        fs = FileSystem.get(URI.create(uri), conf);

        Path[] paths = new Path[args.length];
        for (int i = 0; i < paths.length; i++) {
            paths[i] = new Path(args[i]);
        }

        FileStatus[] status = fs.listStatus(paths);
        Path[] listPath = FileUtil.stat2Paths(status);

        for (Path p : listPath) {
            log.info(p.toString());
        }

    } catch (IOException e) {
        e.printStackTrace();
    }
}

From source file:cn.lhfei.hadoop.ch04.FileDecompressor.java

License:Apache License

/**
 * use case: % hadoop FileDecompressor file.gz
 * @param args/*from ww w  .  j  av a 2 s  .c om*/
 */
public static void main(String[] args) {
    FileSystem fs = null;
    String uri = args[0];
    Path inputPath = null;
    Configuration conf = new Configuration();
    CompressionCodecFactory factory = null;

    InputStream in = null;
    OutputStream out = null;

    try {
        fs = FileSystem.get(URI.create(uri), conf);
        inputPath = new Path(uri);
        factory = new CompressionCodecFactory(conf);
        CompressionCodec codec = factory.getCodec(inputPath);
        if (codec == null) {
            System.err.println("No codec found for " + uri);
            System.exit(1);
        }

        String outputUri = CompressionCodecFactory.removeSuffix(uri, codec.getDefaultExtension());

        in = codec.createInputStream(fs.open(inputPath));
        out = fs.create(new Path(outputUri));

        IOUtils.copyBytes(in, out, conf);

    } catch (IOException e) {
        e.printStackTrace();
    } finally {
        IOUtils.closeStream(in);
        IOUtils.closeStream(out);
    }
}

From source file:cn.lhfei.hadoop.ch04.MapFileFixer.java

License:Apache License

public static void main(String[] args) throws Exception {
    String mapUri = args[0];//from   ww w  .ja  v  a2  s  . c  o m

    Configuration conf = new Configuration();

    FileSystem fs = FileSystem.get(URI.create(mapUri), conf);
    Path map = new Path(mapUri);
    Path mapData = new Path(map, MapFile.DATA_FILE_NAME);

    // Get key and value types from data sequence file
    SequenceFile.Reader reader = new SequenceFile.Reader(fs, mapData, conf);

    Class keyClass = reader.getKeyClass();
    Class valueClass = reader.getValueClass();
    reader.close();

    // Create the map file index file
    long entries = MapFile.fix(fs, map, keyClass, valueClass, false, conf);
    System.out.printf("Created MapFile %s with %d entries\n", map, entries);
}

From source file:cn.lhfei.hadoop.ch04.MapFileWriteDemo.java

License:Apache License

public static void main(String[] args) {
    String uri = args[0];//from www. ja  v a2  s .c om
    Configuration conf = new Configuration();
    FileSystem fs = null;

    IntWritable key = new IntWritable();
    Text value = new Text();
    MapFile.Writer writer = null;
    try {
        fs = FileSystem.get(URI.create(uri), conf);
        /*writer = new MapFile.Writer(conf, fs, uri, key.getClass(),
              value.getClass());*/

        writer = new MapFile.Writer(conf, new Path(uri), Writer.keyClass(key.getClass()),
                Writer.valueClass(value.getClass()));

        for (int i = 0; i < 1024; i++) {
            key.set(i + 1);
            value.set(DATA[i % DATA.length]);
            writer.append(key, value);
        }
    } catch (IOException e) {
        e.printStackTrace();
    } finally {
        IOUtils.closeStream(writer);
    }
}

From source file:cn.lhfei.hadoop.ch04.SequenceFileReadDemo.java

License:Apache License

public static void main(String[] args) {
    String uri = args[0];/*from   w w w  . j a va  2  s  .  co m*/
    Configuration conf = new Configuration();
    FileSystem fs = null;
    Path path = new Path(uri);

    SequenceFile.Reader reader = null;

    try {
        fs = FileSystem.get(URI.create(uri), conf);

        reader = new SequenceFile.Reader(fs, path, conf);
        Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
        Writable value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
        long position = reader.getPosition();
        while (reader.next(key, value)) {
            String syncSeen = reader.syncSeen() ? "*" : "";
            System.out.printf("[%s%s]\t%s\t%s\n", position, syncSeen, key, value);
            position = reader.getPosition(); // beginning of next record
        }
    } catch (IOException e) {
        e.printStackTrace();
    } finally {
        IOUtils.closeStream(reader);
    }

}