Example usage for org.apache.hadoop.fs FileSystem getDefaultUri

List of usage examples for org.apache.hadoop.fs FileSystem getDefaultUri

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem getDefaultUri.

Prototype

public static URI getDefaultUri(Configuration conf) 

Source Link

Document

Get the default FileSystem URI from a configuration.

Usage

From source file:org.apache.ambari.fast_hdfs_resource.Runner.java

License:Apache License

public static void main(String[] args) throws IOException, URISyntaxException {
    // 1 - Check arguments
    if (args.length != 1) {
        System.err.println(// w w  w  .  j  av  a 2  s .  c  o m
                "Incorrect number of arguments. Please provide:\n" + "1) Path to json file\n" + "Exiting...");
        System.exit(1);
    }

    // 2 - Check if json-file exists
    final String jsonFilePath = args[0];
    File file = new File(jsonFilePath);

    if (!file.isFile()) {
        System.err.println("File " + jsonFilePath + " doesn't exist.\nExiting...");
        System.exit(1);
    }

    Gson gson = new Gson();
    Resource[] resources = null;
    FileSystem dfs = null;

    try {
        Configuration conf = new Configuration();
        dfs = FileSystem.get(conf);

        // 3 - Load data from JSON
        resources = (Resource[]) gson.fromJson(new FileReader(jsonFilePath), Resource[].class);

        // 4 - Connect to HDFS
        System.out.println("Using filesystem uri: " + FileSystem.getDefaultUri(conf).toString());
        dfs.initialize(FileSystem.getDefaultUri(conf), conf);

        for (Resource resource : resources) {
            System.out.println("Creating: " + resource);

            Resource.checkResourceParameters(resource, dfs);

            Path pathHadoop = new Path(resource.getTarget());
            if (!resource.isManageIfExists() && dfs.exists(pathHadoop)) {
                System.out.println("Skipping the operation for not managed DFS directory "
                        + resource.getTarget() + " since immutable_paths contains it.");
                continue;
            }

            if (resource.getAction().equals("create")) {
                // 5 - Create
                Resource.createResource(resource, dfs, pathHadoop);
                Resource.setMode(resource, dfs, pathHadoop);
                Resource.setOwner(resource, dfs, pathHadoop);
            } else if (resource.getAction().equals("delete")) {
                // 6 - Delete
                dfs.delete(pathHadoop, true);
            }
        }
    } catch (Exception e) {
        System.out.println("Exception occurred, Reason: " + e.getMessage());
        e.printStackTrace();
    } finally {
        dfs.close();
    }

    System.out.println("All resources created.");
}

From source file:org.apache.ambari.servicemonitor.clients.LsDir.java

License:Apache License

@Override
protected void init(CommandLine commandLine) throws IOException {
    super.init(commandLine);
    dirName = commandLine.getOptionValue("dir");
    info("Working with " + FileSystem.getDefaultUri(getConf()) + " directory: " + dirName);
}

From source file:org.apache.ambari.servicemonitor.Monitor.java

License:Apache License

/**
 * Execute the monitor. This method does not exit except by throwing exceptions or by calling System.exit().
 * @throws IOException problems/*w  ww .  ja  v a  2  s .co  m*/
 * @throws ExitMainException an explicit exit exception
 */
public void execMonitor(Reporter reporter) throws IOException {

    Configuration conf = getConf();
    int probeInterval = conf.getInt(MONITOR_PROBE_INTERVAL, PROBE_INTERVAL_DEFAULT);
    int reportInterval = conf.getInt(MONITOR_REPORT_INTERVAL, REPORT_INTERVAL_DEFAULT);
    int probeTimeout = conf.getInt(MONITOR_PROBE_TIMEOUT, PROBE_TIMEOUT_DEFAULT);
    int bootstrapTimeout = conf.getInt(MONITOR_BOOTSTRAP_TIMEOUT, BOOTSTRAP_TIMEOUT_DEFAULT);

    boolean krb5Enabled = conf.getBoolean(MONITOR_KRB5_ENABLED, MONITOR_DEFAULT_KRB5_ENABLED);
    String krb5Principal = conf.get(MONITOR_KRB5_PRINCIPAL, MONITOR_DEFAULT_KRB5_PRINCIPAL);
    String krb5Keytab = conf.get(MONITOR_KRB5_KEYTAB, MONITOR_DEFAULT_KRB5_KEYTAB);

    if (LOG.isInfoEnabled()) {
        LOG.info("krb5Enabled = " + krb5Enabled + ", krb5Principal = " + krb5Principal + ", krb5Keyab = "
                + krb5Keytab);
    }
    if (krb5Enabled) {
        UserGroupInformation.loginUserFromKeytab(krb5Principal, krb5Keytab);
        UserGroupInformation.getLoginUser();
    }

    List<Probe> probes = new ArrayList<Probe>();
    if (conf.getBoolean(PORT_PROBE_ENABLED, false)) {

        String probeHost = conf.get(PORT_PROBE_HOST, DEFAULT_PROBE_HOST);

        int probePort = conf.getInt(PORT_PROBE_PORT, DEFAULT_PROBE_PORT);

        if (probePort == -1) {
            URI fsURI = FileSystem.getDefaultUri(conf);
            probePort = fsURI.getPort();
            validateParam(probePort == -1, "No port value in " + fsURI);
        }

        PortProbe portProbe = PortProbe.createPortProbe(new Configuration(conf), probeHost, probePort);
        probes.add(portProbe);
    } else {
        LOG.debug("port probe disabled");
    }

    if (conf.getBoolean(PID_PROBE_ENABLED, false)) {
        Probe probe = PidLiveProbe.createProbe(new Configuration(conf));
        probes.add(probe);
        LOG.debug("Pid probe enabled: " + probe.toString());
    } else {
        LOG.debug("Pid probe disabled");
    }

    if (conf.getBoolean(WEB_PROBE_ENABLED, false)) {
        HttpProbe httpProbe = HttpProbe.createHttpProbe(new Configuration(conf));
        probes.add(httpProbe);
    } else {
        LOG.debug("HTTP probe disabled");
    }

    if (conf.getBoolean(LS_PROBE_ENABLED, false)) {
        String path = conf.get(LS_PROBE_PATH, LS_PROBE_DEFAULT);
        DfsListProbe lsProbe = new DfsListProbe(new Configuration(conf), path);
        probes.add(lsProbe);
    } else {
        LOG.debug("ls probe disabled");
    }

    if (conf.getBoolean(JT_PROBE_ENABLED, false)) {
        Probe jtProbe = new JTClusterStatusProbe(new Configuration(conf));
        probes.add(jtProbe);
    } else {
        LOG.debug("JT probe disabled");
    }

    List<Probe> dependencyProbes = new ArrayList<Probe>(1);

    if (conf.getBoolean(MONITOR_DEPENDENCY_DFSLIVE, false)) {
        //there's a dependency on DFS
        //add a monitor for it
        LOG.info("Adding a dependency on HDFS being live");
        dependencyProbes.add(new DfsSafeModeProbe(new Configuration(conf), true));
    }

    reportingLoop = new ReportingLoop(name, reporter, probes, dependencyProbes, probeInterval, reportInterval,
            probeTimeout, bootstrapTimeout);

    if (!reportingLoop.startReporting()) {
        throw new ExitMainException(name + ": failed to start monitoring with reporter " + reporter);
    }
    //start reporting, either in a background thread
    //or here, directly in the main thread
    reportingLoop.run();
}

From source file:org.apache.ambari.servicemonitor.probes.DfsListProbe.java

License:Apache License

public DfsListProbe(Configuration conf, String path) throws IOException {
    super("DfsListProbe " + FileSystem.getDefaultUri(conf) + path, conf);
    //make sure the probe doesn't block, regardless of
    //any site configurations
    DFSUtils.makeDfsCallsNonBlocking(conf);
    fsURI = DFSUtils.getHDFSUri(conf);//from w w w  .j a v a2 s .  co m
    LOG.info(getName());
    this.path = path;
}

From source file:org.apache.ambari.servicemonitor.utils.DFSUtils.java

License:Apache License

/**
 * Get an HDFS URI/*from   w  w  w  . j  a v  a  2 s.  c o  m*/
 * @param conf configuration
 * @return the URI
 * @throws IOException
 */
public static URI getHDFSUri(Configuration conf) throws IOException {
    URI uri = FileSystem.getDefaultUri(conf);
    if (!uri.getScheme().equals("hdfs")) {
        throw new IOException("Filesystem is not HDFS " + uri);
    }
    return uri;
}

From source file:org.apache.drill.exec.work.batch.FileTest.java

License:Apache License

public static void main(String[] args) throws IOException {
    Configuration conf = new Configuration();
    conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "sync:///");
    System.out.println(FileSystem.getDefaultUri(conf));
    FileSystem fs = FileSystem.get(conf);
    //    FileSystem fs = new LocalSyncableFileSystem(conf);
    Path path = new Path("/tmp/testFile");
    FSDataOutputStream out = fs.create(path);
    byte[] s = "hello world".getBytes();
    out.write(s);//w  ww. j a v a2  s .c  o  m
    out.sync();
    //    out.close();
    FSDataInputStream in = fs.open(path);
    byte[] bytes = new byte[s.length];
    in.read(bytes);
    System.out.println(new String(bytes));
    File file = new File("/tmp/testFile");
    FileOutputStream fos = new FileOutputStream(file);
    FileInputStream fis = new FileInputStream(file);
    fos.write(s);
    fos.getFD().sync();
    fis.read(bytes);
    System.out.println(new String(bytes));
    out = fs.create(new Path("/tmp/file"));
    for (int i = 0; i < 100; i++) {
        bytes = new byte[256 * 1024];
        Stopwatch watch = Stopwatch.createStarted();
        out.write(bytes);
        out.sync();
        long t = watch.elapsed(TimeUnit.MILLISECONDS);
        System.out.printf("Elapsed: %d. Rate %d.\n", t, (long) ((long) bytes.length * 1000L / t));
    }
}

From source file:org.apache.drill.yarn.core.DfsFacade.java

License:Apache License

/**
 * Lazy loading of YARN configuration since it takes a long time to load.
 * (YARN provides no caching, sadly.)/*from   w ww . j  ava2 s .c  o  m*/
 */

private void loadYarnConfig() {
    if (yarnConf == null) {
        yarnConf = new YarnConfiguration();
        // On some distributions, lack of proper configuration causes
        // DFS to default to the local file system. So, a local file
        // system generally means that the config is wrong, or running
        // the wrong build of Drill for the user's environment.
        URI fsUri = FileSystem.getDefaultUri(yarnConf);
        if (fsUri.toString().startsWith("file:/")) {
            System.err.println("Warning: Default DFS URI is for a local file system: " + fsUri);
        }
    }
}

From source file:org.apache.ignite.hadoop.fs.BasicHadoopFileSystemFactory.java

License:Apache License

/** {@inheritDoc} */
@Override/*from  w w w  .j a v  a 2s  .c o m*/
public void start() throws IgniteException {
    cfg = HadoopUtils.safeCreateConfiguration();

    if (cfgPaths != null) {
        for (String cfgPath : cfgPaths) {
            if (cfgPath == null)
                throw new NullPointerException(
                        "Configuration path cannot be null: " + Arrays.toString(cfgPaths));
            else {
                URL url = U.resolveIgniteUrl(cfgPath);

                if (url == null) {
                    // If secConfPath is given, it should be resolvable:
                    throw new IgniteException("Failed to resolve secondary file system configuration path "
                            + "(ensure that it exists locally and you have read access to it): " + cfgPath);
                }

                cfg.addResource(url);
            }
        }
    }

    // If secondary fs URI is not given explicitly, try to get it from the configuration:
    if (uri == null)
        fullUri = FileSystem.getDefaultUri(cfg);
    else {
        try {
            fullUri = new URI(uri);
        } catch (URISyntaxException use) {
            throw new IgniteException("Failed to resolve secondary file system URI: " + uri);
        }
    }

    if (usrNameMapper != null && usrNameMapper instanceof LifecycleAware)
        ((LifecycleAware) usrNameMapper).start();
}

From source file:org.apache.ignite.internal.processors.hadoop.fs.HadoopFileSystemCacheUtils.java

License:Apache License

/**
 * Common method to get the V1 file system in MapRed engine.
 * It gets the filesystem for the user specified in the
 * configuration with {@link MRJobConfig#USER_NAME} property.
 * The file systems are created and cached in the given map upon first request.
 *
 * @param uri The file system uri.//from   ww w.j  a  v  a 2 s .  co  m
 * @param cfg The configuration.
 * @param map The caching map.
 * @return The file system.
 * @throws IOException On error.
 */
public static FileSystem fileSystemForMrUserWithCaching(@Nullable URI uri, Configuration cfg,
        HadoopLazyConcurrentMap<FsCacheKey, FileSystem> map) throws IOException {
    assert map != null;
    assert cfg != null;

    final String usr = getMrHadoopUser(cfg);

    assert usr != null;

    if (uri == null)
        uri = FileSystem.getDefaultUri(cfg);

    final FileSystem fs;

    try {
        final FsCacheKey key = new FsCacheKey(uri, usr, cfg);

        fs = map.getOrCreate(key);
    } catch (IgniteException ie) {
        throw new IOException(ie);
    }

    assert fs != null;
    assert !(fs instanceof IgniteHadoopFileSystem) || F.eq(usr, ((IgniteHadoopFileSystem) fs).user());

    return fs;
}

From source file:org.apache.ignite.internal.processors.hadoop.fs.HadoopFileSystemCacheUtils.java

License:Apache License

/**
 * Takes Fs URI using logic similar to that used in FileSystem#get(1,2,3).
 * @param uri0 The uri.// w w w.j  a  v  a2s . c  o  m
 * @param cfg The cfg.
 * @return Correct URI.
 */
private static URI fixUri(URI uri0, Configuration cfg) {
    if (uri0 == null)
        return FileSystem.getDefaultUri(cfg);

    String scheme = uri0.getScheme();
    String authority = uri0.getAuthority();

    if (authority == null) {
        URI dfltUri = FileSystem.getDefaultUri(cfg);

        if (scheme == null || (scheme.equals(dfltUri.getScheme()) && dfltUri.getAuthority() != null))
            return dfltUri;
    }

    return uri0;
}