Example usage for org.apache.hadoop.fs FileSystem get

List of usage examples for org.apache.hadoop.fs FileSystem get

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem get.

Prototype

public static FileSystem get(URI uri, Configuration conf) throws IOException 

Source Link

Document

Get a FileSystem for this URI's scheme and authority.

Usage

From source file:CloseHangTest.java

License:Apache License

public static void main(String[] args) throws Exception {
    if (args.length < 1) {
        System.out.println("CloseHangTest: must supply the HDFS uri.");
        System.exit(1);//from  w ww.j  ava2  s  .  c om
    }
    String hdfsUri = args[0];
    final Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(new URI(hdfsUri), conf);
    UploadManager manager = new UploadManager(fs);
    manager.startWorkers();
    manager.monitor();
}

From source file:DisplayClustering.java

License:Apache License

protected static void writeSampleData(Path output) throws IOException {
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(output.toUri(), conf);
    SequenceFile.Writer writer = new SequenceFile.Writer(fs, conf, output, Text.class, VectorWritable.class);
    try {/*from   w  w w. ja  v  a  2 s .c  om*/
        int i = 0;
        for (VectorWritable vw : SAMPLE_DATA) {
            writer.append(new Text("sample_" + i++), vw);
        }
    } finally {
        Closeables.close(writer, false);
    }
}

From source file:DisplayClustering.java

License:Apache License

protected static void loadClustersWritable(Path output) throws IOException {
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(output.toUri(), conf);
    for (FileStatus s : fs.listStatus(output, new ClustersFilter())) {
        List<Cluster> clusters = readClustersWritable(s.getPath());
        CLUSTERS.add(clusters);//from   w  ww .j a  va  2 s.c  o m
    }
}

From source file:alluxio.checker.MapReduceIntegrationChecker.java

License:Apache License

/**
 * Creates the HDFS filesystem to store output files.
 *
 * @param conf Hadoop configuration// w  ww. j a  v  a2s . c o  m
 */
private void createHdfsFilesystem(Configuration conf) throws Exception {
    // Inits HDFS file system object
    mFileSystem = FileSystem.get(URI.create(conf.get("fs.defaultFS")), conf);
    mOutputFilePath = new Path("./MapReduceOutputFile");
    if (mFileSystem.exists(mOutputFilePath)) {
        mFileSystem.delete(mOutputFilePath, true);
    }
}

From source file:alluxio.yarn.YarnUtils.java

License:Apache License

/**
 * Creates a local resource for a file on HDFS.
 *
 * @param yarnConf YARN configuration//from   ww  w. j av  a2  s .co m
 * @param resource the path to a resource file on HDFS
 * @throws IOException if the file can not be found on HDFS
 * @return the created local resource
 */
public static LocalResource createLocalResourceOfFile(YarnConfiguration yarnConf, String resource)
        throws IOException {
    LocalResource localResource = Records.newRecord(LocalResource.class);

    Path resourcePath = new Path(resource);

    FileStatus jarStat = FileSystem.get(resourcePath.toUri(), yarnConf).getFileStatus(resourcePath);
    localResource.setResource(ConverterUtils.getYarnUrlFromPath(resourcePath));
    localResource.setSize(jarStat.getLen());
    localResource.setTimestamp(jarStat.getModificationTime());
    localResource.setType(LocalResourceType.FILE);
    localResource.setVisibility(LocalResourceVisibility.PUBLIC);
    return localResource;
}

From source file:at.illecker.hadoop.rootbeer.examples.matrixmultiplication.DistributedRowMatrix.java

License:Apache License

@Override
public void setConf(Configuration conf) {
    this.conf = conf;
    try {/*w w  w.  j  a v  a2  s.com*/
        FileSystem fs = FileSystem.get(inputPath.toUri(), conf);
        rowPath = fs.makeQualified(inputPath);
        outputTmpBasePath = fs.makeQualified(outputTmpPath);
        keepTempFiles = conf.getBoolean(KEEP_TEMP_FILES, false);
    } catch (IOException ioe) {
        throw new IllegalStateException(ioe);
    }
}

From source file:at.illecker.hama.hybrid.examples.matrixmultiplication.util.DistributedRowMatrix.java

License:Apache License

@Override
public void setConf(Configuration conf) {
    this.conf = conf;
    try {/*from   w w w.j  av  a2  s.  com*/
        FileSystem fs = FileSystem.get(inputPath.toUri(), conf);
        rowPath = fs.makeQualified(inputPath);
        outputTmpBasePath = fs.makeQualified(outputTmpPath);
    } catch (IOException ioe) {
        throw new IllegalStateException(ioe);
    }
}

From source file:azkaban.jobtype.ReportalPrestoRunner.java

License:Apache License

private String decrypt(final String encrypted, final String keyPath) throws IOException {
    final FileSystem fs = FileSystem.get(URI.create("file:///"), new Configuration());
    return new Decryptions().decrypt(encrypted, keyPath, fs);
}

From source file:azkaban.jobtype.TestWhitelist.java

License:Open Source License

@Before
@SuppressWarnings("DefaultCharset")
public void setup() throws IOException, URISyntaxException {
    temp = File.createTempFile(TestWhitelist.class.getSimpleName(), null);
    temp.deleteOnExit();//from  w ww.  j av  a 2  s. c o m

    try (BufferedWriter bw = new BufferedWriter(new FileWriter(temp))) {
        for (String s : whitelisted) {
            bw.write(s);
            bw.newLine();
        }
    }

    FileSystem fs = FileSystem.get(new URI("file:///"), new Configuration());
    whitelist = new Whitelist(temp.getAbsolutePath(), fs);
}

From source file:batch.BatchScan2Html.java

License:Apache License

public static void writeAccumuloTableToHdfsAsHtml() throws IOException, URISyntaxException {
    Configuration configuration = new Configuration();
    //TODO add options for URI and output Path
    FileSystem hdfs = FileSystem.get(new URI("hdfs://n001:54310"), configuration);
    Path file = new Path("hdfs://n001:54310/s2013/batch/table.html");
    //TODO add option to override file default: true
    if (hdfs.exists(file)) {
        hdfs.delete(file, true);/*w  ww  .  j a  v  a2s  .co  m*/
    }
    startTime = System.currentTimeMillis();
    OutputStream os = hdfs.create(file, new Progressable() {
        public void progress() {
            // TODO add a better progress descriptor
            crudeRunTime = System.currentTimeMillis() - startTime;
            out.println("...bytes written: [ " + bytesWritten + " ]");
            out.println("...bytes / second: [ " + (bytesWritten / crudeRunTime) * 1000 + " ]");
        }
    });
    BufferedWriter br = new BufferedWriter(new OutputStreamWriter(os, "UTF-8"));
    //  TODO add option for table id { example }
    writeHtmlTableHeader(br, "example", new ArrayList<String>(Arrays.asList("Row ID", "Column Family",
            "Column Qualifier", "Column Visibility", "Timestamp", "Value")));
    writeHtmlTableBody(br);
    out.println("Total bytes written: " + bytesWritten);
    out.println("Total crude time: " + crudeRunTime / 1000);
    br.close();
    hdfs.close();
}