Example usage for org.apache.hadoop.fs FileSystem getDefaultUri

List of usage examples for org.apache.hadoop.fs FileSystem getDefaultUri

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem getDefaultUri.

Prototype

public static URI getDefaultUri(Configuration conf) 

Source Link

Document

Get the default FileSystem URI from a configuration.

Usage

From source file:org.elasticsearch.repositories.hdfs.HdfsRepository.java

License:Apache License

private FileSystem initFileSystem(RepositorySettings repositorySettings) throws IOException {
    Configuration cfg = new Configuration(repositorySettings.settings().getAsBoolean("load_defaults",
            componentSettings.getAsBoolean("load_defaults", true)));

    String confLocation = repositorySettings.settings().get("conf_location",
            componentSettings.get("conf_location"));
    if (Strings.hasText(confLocation)) {
        for (String entry : Strings.commaDelimitedListToStringArray(confLocation)) {
            addConfigLocation(cfg, entry.trim());
        }/*w  w w  .ja  va  2  s.c o  m*/
    }

    Map<String, String> map = componentSettings.getByPrefix("conf.").getAsMap();
    for (Entry<String, String> entry : map.entrySet()) {
        cfg.set(entry.getKey(), entry.getValue());
    }

    UserGroupInformation.setConfiguration(cfg);

    String uri = repositorySettings.settings().get("uri", componentSettings.get("uri"));
    URI actualUri = (uri != null ? URI.create(uri) : FileSystem.getDefaultUri(cfg));
    String user = repositorySettings.settings().get("user", componentSettings.get("user"));

    try {
        // disable FS cache
        String disableFsCache = String.format("fs.%s.impl.disable.cache", actualUri.getScheme());
        cfg.setBoolean(disableFsCache, true);
        return (user != null ? FileSystem.get(actualUri, cfg, user) : FileSystem.get(actualUri, cfg));
    } catch (Exception ex) {
        throw new ElasticsearchGenerationException(
                String.format("Cannot create Hdfs file-system for uri [%s]", actualUri), ex);
    }
}

From source file:org.hypertable.DfsBroker.hadoop.HadoopBroker.java

License:Open Source License

public HadoopBroker(Comm comm, Properties props) throws IOException {
    String str;/*  www .  j  av  a 2 s .c  o m*/

    str = props.getProperty("verbose");
    if (str != null && str.equalsIgnoreCase("true"))
        mVerbose = true;
    else
        mVerbose = false;

    str = props.getProperty("HdfsBroker.Hadoop.ConfDir");
    if (str != null) {
        if (mVerbose)
            System.out.println("HdfsBroker.Hadoop.ConfDir=" + str);
        try {
            readHadoopConfig(str);
        } catch (Exception e) {
            log.severe("Failed to parse HdfsBroker.HdfsSite.xml(" + str + ")");
            e.printStackTrace();
            System.exit(1);
        }
    }

    // settings from the hadoop configuration are overwritten by values
    // from the configuration file
    str = props.getProperty("HdfsBroker.dfs.replication");
    if (str != null)
        mConf.setInt("dfs.replication", Integer.parseInt(str));

    str = props.getProperty("HdfsBroker.dfs.client.read.shortcircuit");
    if (str != null) {
        if (str.equalsIgnoreCase("true"))
            mConf.setBoolean("dfs.client.read.shortcircuit", true);
        else
            mConf.setBoolean("dfs.client.read.shortcircuit", false);
    }

    str = props.getProperty("HdfsBroker.fs.default.name");
    if (str != null) {
        mConf.set("fs.default.name", str);
    } else {
        // make sure that we have the fs.default.name property
        if (mConf.get("fs.default.name") == null || mConf.get("fs.default.name").equals("file:///")) {
            log.severe("Neither HdfsBroker.fs.default.name nor " + "HdfsBroker.Hadoop.ConfDir was specified.");
            System.exit(1);
        }
    }

    if (mVerbose) {
        System.out.println("HdfsBroker.dfs.client.read.shortcircuit="
                + mConf.getBoolean("dfs.client.read.shortcircuit", false));
        System.out.println("HdfsBroker.dfs.replication=" + mConf.getInt("dfs.replication", -1));
        System.out.println("HdfsBroker.Server.fs.default.name=" + mConf.get("fs.default.name"));
    }

    mConf.set("dfs.client.buffer.dir", "/tmp");
    mConf.setInt("dfs.client.block.write.retries", 3);
    mConf.setBoolean("fs.automatic.close", false);

    try {
        mFilesystem = FileSystem.get(mConf);
        mFilesystem.initialize(FileSystem.getDefaultUri(mConf), mConf);
        mFilesystem_noverify = newInstanceFileSystem();
        mFilesystem_noverify.setVerifyChecksum(false);
    } catch (Exception e) {
        log.severe("ERROR: Unable to establish connection to HDFS: " + e);
        System.exit(1);
    }
}

From source file:org.hypertable.DfsBroker.hadoop.HadoopBroker.java

License:Open Source License

/**
 * Returns a brand new instance of the FileSystem
 * /*w w  w.j  av a 2  s . com*/
 * @return A new instance of the filesystem
 */
private FileSystem newInstanceFileSystem() throws IOException {
    URI uri = FileSystem.getDefaultUri(mConf);
    Class<?> clazz = FileSystem.getFileSystemClass(uri.getScheme(), mConf);
    if (clazz == null)
        throw new IOException("HdfsBroker: No FileSystem for scheme: " + uri.getScheme());
    FileSystem fs = (FileSystem) ReflectionUtils.newInstance(clazz, mConf);
    fs.initialize(uri, mConf);
    return fs;
}

From source file:org.hypertable.FsBroker.hadoop.HadoopBroker.java

License:Open Source License

public HadoopBroker(Comm comm, Properties props) throws IOException {
    String str;/*from  w w w  . j  a  v a 2  s . c  o  m*/

    str = props.getProperty("verbose");
    if (str != null && str.equalsIgnoreCase("true"))
        mVerbose = true;
    else
        mVerbose = false;

    str = props.getProperty("HdfsBroker.Hadoop.ConfDir");
    if (str != null) {
        if (mVerbose)
            System.out.println("HdfsBroker.Hadoop.ConfDir=" + str);
        try {
            readHadoopConfig(str);
        } catch (Exception e) {
            log.severe("Failed to parse HdfsBroker.HdfsSite.xml(" + str + ")");
            e.printStackTrace();
            System.exit(1);
        }
    }

    // settings from the hadoop configuration are overwritten by values
    // from the configuration file
    str = props.getProperty("HdfsBroker.dfs.replication");
    if (str != null)
        mConf.setInt("dfs.replication", Integer.parseInt(str));

    str = props.getProperty("HdfsBroker.dfs.client.read.shortcircuit");
    if (str != null) {
        if (str.equalsIgnoreCase("true"))
            mConf.setBoolean("dfs.client.read.shortcircuit", true);
        else
            mConf.setBoolean("dfs.client.read.shortcircuit", false);
    }

    str = props.getProperty("HdfsBroker.fs.default.name");
    if (str != null) {
        mConf.set("fs.default.name", str);
    } else {
        // make sure that we have the fs.default.name property
        if (mConf.get("fs.default.name") == null || mConf.get("fs.default.name").equals("file:///")) {
            log.severe("Neither HdfsBroker.fs.default.name nor " + "HdfsBroker.Hadoop.ConfDir was specified.");
            System.exit(1);
        }
    }

    if (mVerbose) {
        System.out.println("HdfsBroker.dfs.client.read.shortcircuit="
                + mConf.getBoolean("dfs.client.read.shortcircuit", false));
        System.out.println("HdfsBroker.dfs.replication=" + mConf.getInt("dfs.replication", -1));
        System.out.println("HdfsBroker.Server.fs.default.name=" + mConf.get("fs.default.name"));
    }

    mConf.set("dfs.client.buffer.dir", "/tmp");
    mConf.setInt("dfs.client.block.write.retries", 3);
    mConf.setBoolean("fs.automatic.close", false);

    try {
        mFilesystem = FileSystem.get(mConf);
        mFilesystem.initialize(FileSystem.getDefaultUri(mConf), mConf);
        mFilesystem_noverify = newInstanceFileSystem(mConf);
        mFilesystem_noverify.setVerifyChecksum(false);
    } catch (Exception e) {
        log.severe("ERROR: Unable to establish connection to HDFS.");
        System.exit(1);
    }
}

From source file:org.hypertable.FsBroker.hadoop.HadoopBroker.java

License:Open Source License

/**
 * Returns a brand new instance of the FileSystem. It does not use
 * the FileSystem.Cache. In newer versions of HDFS, we can directly
 * invoke FileSystem.newInstance(Configuration).
 * /*from  w w  w. ja  va2s. c om*/
 * @param conf Configuration
 * @return A new instance of the filesystem
 */
private static FileSystem newInstanceFileSystem(Configuration conf) throws IOException {
    URI uri = FileSystem.getDefaultUri(conf);
    Class<?> clazz = conf.getClass("fs." + uri.getScheme() + ".impl", null);
    if (clazz == null) {
        throw new IOException("No FileSystem for scheme: " + uri.getScheme());
    }
    FileSystem fs = (FileSystem) ReflectionUtils.newInstance(clazz, conf);
    fs.initialize(uri, conf);
    return fs;
}

From source file:org.kiji.examples.phonebook.IntegrationTestPhonebookImporter.java

License:Apache License

/**
 * Generates a random HDFS path./*from  w  w  w.j a  va2s .com*/
 *
 * @param prefix Prefix for the random file name.
 * @return a random HDFS path.
 * @throws Exception on error.
 */
private Path makeRandomHdfsPath(String prefix) throws Exception {
    Preconditions.checkNotNull(mFS);
    final Path base = new Path(FileSystem.getDefaultUri(mConf));
    final Random random = new Random(System.nanoTime());
    return new Path(base, String.format("/%s-%s", prefix, random.nextLong()));
}

From source file:org.pentaho.di.job.entries.sqoop.SqoopUtils.java

License:Apache License

/**
 * Configure a {@link SqoopConfig}'s Namenode and Jobtracker connection information based off a Hadoop Configuration's
 * settings. These properties are parsed from {@code fs.default.name} and {@code mapred.job.tracker} properties.
 *
 * @param config Sqoop configuration to update
 * @param c      Hadoop configuration to parse connection information from
 *///  w w  w .ja  va2 s .  co m
public static void configureConnectionInformation(SqoopConfig config, Configuration c) {
    URI namenode = FileSystem.getDefaultUri(c);
    if (namenode != null) {
        config.setNamenodeHost(namenode.getHost());
        if (namenode.getPort() != -1) {
            config.setNamenodePort(String.valueOf(namenode.getPort()));
        }
    }

    if (!"local".equals(c.get("mapred.job.tracker", "local"))) {
        InetSocketAddress jobtracker = JobTracker.getAddress(c);
        config.setJobtrackerHost(jobtracker.getHostName());
        config.setJobtrackerPort(String.valueOf(jobtracker.getPort()));
    }
}

From source file:org.smartfrog.services.hadoop.operations.utils.DfsUtils.java

License:Open Source License

public static FileSystem createInstance(URI uri, Configuration conf) throws IOException {
    String scheme = uri.getScheme();
    if (scheme == null) {
        scheme = FileSystem.getDefaultUri(conf).getScheme();
    }//from  www . ja va  2s . c  o m
    conf.setBoolean("fs." + scheme + ".impl.disable.cache", true);
    FileSystem dfs = FileSystem.get(uri, conf);
    return dfs;
}

From source file:org.smartfrog.services.hadoop.operations.utils.DfsUtils.java

License:Open Source License

/**
 * This loads but does not initialise a filesystem.
 *
 * @param conf configuration/*from w  ww . j a  v a 2 s .  co  m*/
 * @param uri URI of the filesystem
 * @return an instance of that filesystem
 * @throws IOException if there is no filesystem of that type
 */
public static FileSystem loadFS(final Configuration conf, final URI uri) throws IOException {
    String scheme = uri.getScheme();
    String authority = uri.getAuthority();

    if (scheme == null) { // no scheme: use default FS
        return FileSystem.get(conf);
    }

    if (authority == null) { // no authority
        URI defaultUri = FileSystem.getDefaultUri(conf);
        if (scheme.equals(defaultUri.getScheme()) // if scheme matches default
                && defaultUri.getAuthority() != null) { // & default has authority
            return loadFS(conf, defaultUri); // return default
        }
    }

    String filesystemProp = "fs." + uri.getScheme() + ".impl";
    String implclass = conf.get(filesystemProp);
    Class<?> clazz = conf.getClass(filesystemProp, null);
    FileSystem.LOG.debug("Creating filesystem for " + uri + " implementation is implclass");
    if (clazz == null) {
        throw new IOException(
                "No FileSystem for scheme: " + uri.getScheme() + " and configuration option " + filesystemProp);
    }
    try {
        FileSystem fs = (FileSystem) ReflectionUtils.newInstance(clazz, conf);
        return fs;
    } catch (RuntimeException e) {
        throw new IOException(
                "Failed to create an instance of " + implclass + " to process " + uri.getScheme() + " : " + e,
                e);
    }
}

From source file:org.springframework.data.hadoop.fs.FileSystemFactoryBean.java

License:Apache License

public void afterPropertiesSet() throws Exception {
    Configuration cfg = (configuration != null ? configuration : new Configuration(true));
    if (uri == null) {
        uri = FileSystem.getDefaultUri(cfg);
    }//from  w w  w  . jav a2  s . co  m
    if (StringUtils.hasText(user)) {
        fs = FileSystem.get(uri, cfg, user);
    } else {
        fs = FileSystem.get(uri, cfg);
    }
}