Example usage for org.apache.hadoop.fs FileSystem FS_DEFAULT_NAME_KEY

List of usage examples for org.apache.hadoop.fs FileSystem FS_DEFAULT_NAME_KEY

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem FS_DEFAULT_NAME_KEY.

Prototype

String FS_DEFAULT_NAME_KEY

To view the source code for org.apache.hadoop.fs FileSystem FS_DEFAULT_NAME_KEY.

Click Source Link

Usage

From source file:com.ambimmort.webos.plugins.vfs4hdfs.HdfsFileSystem.java

License:Apache License

/**
 * @see org.apache.commons.vfs2.provider.AbstractFileSystem#resolveFile(org.apache.commons.vfs2.FileName)
 *//* ww  w  . j  a v  a 2s  . c o  m*/
@Override
public FileObject resolveFile(final FileName name) throws FileSystemException {

    synchronized (this) {
        if (null == this.fs) {
            final String hdfsUri = name.getRootURI();
            final Configuration conf = new Configuration(true);
            conf.set(FileSystem.FS_DEFAULT_NAME_KEY, hdfsUri);
            this.fs = null;
            try {
                fs = FileSystem.get(conf);
            } catch (final IOException e) {
                log.error("Error connecting to filesystem " + hdfsUri, e);
                throw new FileSystemException("Error connecting to filesystem " + hdfsUri, e);
            }
        }
    }

    final boolean useCache = null != getContext().getFileSystemManager().getFilesCache();
    FileObject file;
    if (useCache) {
        file = this.getFileFromCache(name);
    } else {
        file = null;
    }
    if (null == file) {
        String path = null;
        try {
            path = URLDecoder.decode(name.getPath(), "UTF-8");
        } catch (final UnsupportedEncodingException e) {
            path = name.getPath();
        }
        final Path filePath = new Path(path);
        file = new HdfsFileObject((AbstractFileName) name, this, fs, filePath);
        if (useCache) {
            this.putFileToCache(file);
        }
    }
    /**
     * resync the file information if requested
     */
    if (getFileSystemManager().getCacheStrategy().equals(CacheStrategy.ON_RESOLVE)) {
        file.refresh();
    }
    return file;
}

From source file:com.asakusafw.operation.tools.directio.conf.ConfigurationListCommandTest.java

License:Apache License

/**
 * w/ invalid file system.//from   w  ww .  ja v a  2s  .  c o  m
 */
@Test
public void invalid_filesystem() {
    getConf().set(FileSystem.FS_DEFAULT_NAME_KEY, "invalid:///");
    addEntry("root", "/", "here");
    invoke("configuration", "list", "-v");
    // w/o error
}

From source file:com.asakusafw.operation.tools.directio.conf.ConfigurationSystemCommandTest.java

License:Apache License

/**
 * w/ invalid file system.//  w  w  w. j a v  a2 s .com
 */
@Test
public void invalid_filesystem() {
    getConf().set(FileSystem.FS_DEFAULT_NAME_KEY, "invalid:///");
    addEntry("root", "/", "here");
    invoke("configuration", "system");
    // w/o error
}

From source file:com.asakusafw.operation.tools.directio.conf.ConfigurationSystemCommandTest.java

License:Apache License

/**
 * w/ invalid source conf but system dir is valid.
 *///ww w  .  ja  v a 2  s. c  o  m
@Test
public void invalid_pair() {
    getConf().set(FileSystem.FS_DEFAULT_NAME_KEY, "invalid:///");
    getConf().set(HadoopDataSourceUtil.KEY_SYSTEM_DIR, "file:///testing");
    addEntry("invalid", "/", "testing");
    invoke("configuration", "system");
    // w/o error
}

From source file:com.datascience.hadoop.CsvHelper.java

License:Apache License

public Configuration buildConfiguration(String delimiter, String skipHeader, String recordSeparator,
        String[] columns) {//from  ww  w.  j ava2  s .c om
    Configuration conf = new Configuration();
    conf.set("fs.default.name", "file:///");
    conf.set(CsvInputFormat.CSV_READER_DELIMITER, delimiter);
    conf.set(CsvInputFormat.CSV_READER_SKIP_HEADER, skipHeader);
    conf.set(CsvInputFormat.CSV_READER_RECORD_SEPARATOR, recordSeparator);
    conf.set(FileSystem.FS_DEFAULT_NAME_KEY, FileSystem.DEFAULT_FS);
    conf.setStrings(CsvInputFormat.CSV_READER_COLUMNS, columns);
    conf.set("io.compression.codecs",
            "org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.BZip2Codec,org.apache.hadoop.io.compress.DeflateCodec,org.apache.hadoop.io.compress.SnappyCodec,org.apache.hadoop.io.compress.Lz4Codec");
    return conf;
}

From source file:com.datos.vfs.provider.hdfs.HdfsFileSystem.java

License:Apache License

/**
 * Resolve FileName into FileObject./*from w  ww . j  a va2 s.  com*/
 * @param name The name of a file on the HdfsFileSystem.
 * @return resolved FileObject.
 * @throws FileSystemException if an error occurred.
 */
@Override
public FileObject resolveFile(final FileName name) throws FileSystemException {
    synchronized (this) {
        if (this.fs == null) {
            final String hdfsUri = name.getRootURI();
            final HdfsFileSystemConfigBuilder builder = HdfsFileSystemConfigBuilder.getInstance();
            final FileSystemOptions options = getFileSystemOptions();
            final String[] configNames = builder.getConfigNames(options);
            final Path[] configPaths = builder.getConfigPaths(options);
            final URL[] configURLs = builder.getConfigURLs(options);
            final InputStream configStream = builder.getConfigInputStream(options);
            final Configuration configConfiguration = builder.getConfigConfiguration(options);

            final Configuration conf = new Configuration(true);
            conf.set(FileSystem.FS_DEFAULT_NAME_KEY, hdfsUri);

            // Load any alternate configuration parameters that may have been specified
            // no matter where they might come from
            if (configNames != null) {
                for (String configName : configNames) {
                    log.debug("Adding HDFS configuration resource: " + configName);
                    conf.addResource(configName);
                }
            }
            if (configPaths != null) {
                for (Path path : configPaths) {
                    log.debug("Adding HDFS configuration path: " + path);
                    conf.addResource(path);
                }
            }
            if (configURLs != null) {
                for (URL url : configURLs) {
                    log.debug("Adding HDFS configuration URL: " + url);
                    conf.addResource(url);
                }
            }
            if (configStream != null) {
                log.debug("Adding HDFS configuration stream");
                conf.addResource(configStream);
            }
            if (configConfiguration != null) {
                log.debug("Adding HDFS configuration object");
                conf.addResource(configConfiguration);
            }

            try {
                fs = FileSystem.get(conf);
            } catch (final IOException e) {
                log.error("Error connecting to filesystem " + hdfsUri, e);
                throw new FileSystemException("Error connecting to filesystem " + hdfsUri, e);
            }
        }
    }

    final boolean useCache = null != getContext().getFileSystemManager().getFilesCache();
    FileObject file;
    if (useCache) {
        file = this.getFileFromCache(name);
    } else {
        file = null;
    }
    if (null == file) {
        String path = null;
        try {
            path = URLDecoder.decode(name.getPath(), "UTF-8");
        } catch (final UnsupportedEncodingException e) {
            path = name.getPath();
        }
        final Path filePath = new Path(path);
        file = new HdfsFileObject((AbstractFileName) name, this, fs, filePath);
        if (useCache) {
            this.putFileToCache(file);
        }
    }
    /**
     * resync the file information if requested
     */
    if (getFileSystemManager().getCacheStrategy().equals(CacheStrategy.ON_RESOLVE)) {
        file.refresh();
    }
    return file;
}

From source file:com.mycompany.app.TestStagingDirectoryPermissions.java

License:Apache License

@Test
public void perms() throws IOException, InterruptedException {
    MiniDFSCluster minidfs = null;/*www  . jav a2 s  . c  o m*/
    FileSystem fs = null;
    MiniMRClientCluster minimr = null;
    try {
        Configuration conf = new Configuration(true);
        conf.set("fs.permission.umask-mode", "0077");
        minidfs = new MiniDFSCluster.Builder(conf).build();
        minidfs.waitActive();

        fs = minidfs.getFileSystem();
        conf.set(FileSystem.FS_DEFAULT_NAME_KEY, fs.getUri().toString());
        Path p = path("/in");
        fs.mkdirs(p);

        FSDataOutputStream os = fs.create(new Path(p, "input.txt"));
        os.write("hello!".getBytes("UTF-8"));
        os.close();

        String user = UserGroupInformation.getCurrentUser().getUserName();
        Path home = new Path("/User/" + user);
        fs.mkdirs(home);
        minimr = MiniMRClientClusterFactory.create(this.getClass(), 1, conf);
        JobConf job = new JobConf(minimr.getConfig());

        job.setJobName("PermsTest");
        JobClient client = new JobClient(job);
        FileInputFormat.addInputPath(job, p);
        FileOutputFormat.setOutputPath(job, path("/out"));
        job.setInputFormat(TextInputFormat.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Text.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);

        job.setMapperClass(MySleepMapper.class);

        job.setNumReduceTasks(1);
        RunningJob submittedJob = client.submitJob(job);

        // Sleep for a bit to let localization finish
        System.out.println("Sleeping...");
        Thread.sleep(3 * 1000l);
        System.out.println("Done sleeping...");
        assertFalse(UserGroupInformation.isSecurityEnabled());

        Path stagingRoot = path("/tmp/hadoop-yarn/staging/" + user + "/.staging/");
        assertTrue(fs.exists(stagingRoot));
        assertEquals(1, fs.listStatus(stagingRoot).length);
        Path staging = fs.listStatus(stagingRoot)[0].getPath();
        Path jobXml = path(staging + "/job.xml");

        assertTrue(fs.exists(jobXml));

        FileStatus fileStatus = fs.getFileStatus(jobXml);
        System.out.println("job.xml permission = " + fileStatus.getPermission());
        assertTrue(fileStatus.getPermission().getOtherAction().implies(FsAction.READ));
        assertTrue(fileStatus.getPermission().getGroupAction().implies(FsAction.READ));

        submittedJob.waitForCompletion();
    } finally {
        if (minimr != null) {
            minimr.stop();
        }
        if (fs != null) {
            fs.close();
        }
        if (minidfs != null) {
            minidfs.shutdown(true);
        }
    }
}

From source file:com.splicemachine.hbase.MockSnapshot.java

License:Apache License

public static void createFile(Path p) throws IOException {
    Configuration conf = new Configuration();
    conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "file:///tmp");
    FileSystem fs = FileSystem.getLocal(conf);

    FSDataOutputStream dos = fs.create(p, true);
    dos.write(0);/*from  w  ww .ja v a 2s .  c  o m*/
    dos.flush();
    dos.close();
}

From source file:com.splicemachine.hbase.MockSnapshot.java

License:Apache License

public static void deleteFile(Path p) throws IOException {
    Configuration conf = new Configuration();
    conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "file:///tmp");
    FileSystem fs = p.getFileSystem(conf);
    fs.delete(p, true);//from  ww w .  j  av a  2 s . c o m
}

From source file:com.splicemachine.hbase.SnapshotUtilsTest.java

License:Apache License

@Test
public void getColumnFamilyTest() throws IOException {

    Configuration conf = new Configuration();
    Path rootDir = new Path("hdfs://localhost/hbase");
    FSUtils.setRootDir(conf, rootDir);/*from  w  w  w.  j  ava 2  s . co  m*/
    conf.set(FileSystem.FS_DEFAULT_NAME_KEY, rootDir.toString());

    String table = "TABLE_A";
    String region = "a60772afe8c4aa3355360d3a6de0b292";
    String family = "fam_a";
    String hfile = "9fb67500d79a43e79b01da8d5d3017a4";
    Path linkPath = createPath(table, region, family, hfile);
    Assert.fail("IMPLEMENT");
    //      HFileLink link = SnapshotUtilsImpl.newLink(conf, linkPath);
    //      SnapshotUtilsImpl sui = (SnapshotUtilsImpl)SnapshotUtilsFactory.snapshotUtils;
    //      assertTrue( new String(sui.getColumnFamily(link)).equals(family));
}