Example usage for org.apache.hadoop.fs FileSystem getDefaultUri

List of usage examples for org.apache.hadoop.fs FileSystem getDefaultUri

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem getDefaultUri.

Prototype

public static URI getDefaultUri(Configuration conf) 

Source Link

Document

Get the default FileSystem URI from a configuration.

Usage

From source file:de.tudarmstadt.ukp.dkpro.bigdata.io.hadoop.HdfsResourceLoader.java

License:Apache License

/**
 * Constructs a new <code>HdfsResourceLoader</code> instance.
 * //from  w ww.  jav  a  2s .  c  o m
 * @param config
 *            Hadoop configuration to use.
 * @param uri
 *            Hadoop file system URI.
 * @param user
 *            Hadoop user for accessing the file system.
 */
@SuppressWarnings("resource")
public HdfsResourceLoader(Configuration config, URI uri, String user) {
    internalFS = true;
    FileSystem tempFS = null;

    try {
        if (uri == null) {
            uri = FileSystem.getDefaultUri(config);
        }
        tempFS = (user != null ? FileSystem.get(uri, config, user) : FileSystem.get(uri, config));
    } catch (Exception ex) {
        tempFS = null;
        throw new IllegalStateException("Cannot create filesystem", ex);
    } finally {
        fs = tempFS;
    }
}

From source file:fi.aalto.seqpig.filter.CoordinateFilter.java

License:Open Source License

public CoordinateFilter(String samfileheaderfilename, String regions_str) {
    String str = "";
    this.samfileheader = "";

    this.regions_str = regions_str;

    try {// w w  w. j av  a 2  s. co  m
        Configuration conf = UDFContext.getUDFContext().getJobConf();

        if (conf == null) {
            decodeSAMFileHeader();
            return;
        }

        FileSystem fs;

        try {
            if (FileSystem.getDefaultUri(conf) == null || FileSystem.getDefaultUri(conf).toString() == "")
                fs = FileSystem.get(new URI("hdfs://"), conf);
            else
                fs = FileSystem.get(conf);
        } catch (Exception e) {
            fs = FileSystem.get(new URI("hdfs://"), conf);
            System.out.println("WARNING: problems with filesystem config?");
            System.out.println("exception was: " + e.toString());
        }

        BufferedReader in = new BufferedReader(new InputStreamReader(
                fs.open(new Path(fs.getHomeDirectory(), new Path(samfileheaderfilename)))));

        while (true) {
            str = in.readLine();

            if (str == null)
                break;
            else
                this.samfileheader += str + "\n";
        }

        in.close();
    } catch (Exception e) {
        System.out.println("ERROR: could not read BAM header from file " + samfileheaderfilename);
        System.out.println("exception was: " + e.toString());
    }

    try {
        Base64 codec = new Base64();
        Properties p = UDFContext.getUDFContext().getUDFProperties(this.getClass());

        ByteArrayOutputStream bstream = new ByteArrayOutputStream();
        ObjectOutputStream ostream = new ObjectOutputStream(bstream);
        ostream.writeObject(this.samfileheader);
        ostream.close();
        String datastr = codec.encodeBase64String(bstream.toByteArray());
        p.setProperty("samfileheader", datastr);
        p.setProperty("regionsstr", regions_str);
    } catch (Exception e) {
        System.out.println("ERROR: Unable to store SAMFileHeader in CoordinateFilter!");
    }

    this.samfileheader_decoded = getSAMFileHeader();
    populateRegions();
}

From source file:fuse4j.hadoopfs.HdfsClientImpl.java

License:Apache License

/**
 * @return the an HDFS filesystem/*from  w  w w.  ja  va 2  s  .c o  m*/
 * @throws InterruptedException
 * @throws IOException
 */
synchronized private FileSystem getDfs(int uid) throws Exception, InterruptedException {
    String user = this.userCache.getUsername(uid);
    //TODO: cache connections to NN per user
    FileSystem dfs = this.fsCache.get(uid);
    if (dfs == null) {
        dfs = FileSystem.get(FileSystem.getDefaultUri(conf), conf, user);
        this.fsCache.put(uid, dfs);
    }
    return dfs;
}

From source file:gobblin.util.filesystem.FileSystemFactory.java

License:Apache License

/**
 * Equivalent to {@link FileSystem#get(Configuration)}, but uses the input {@link SharedResourcesBroker} to configure
 * add-ons to the {@link FileSystem} (e.g. throttling, instrumentation).
 *///from   w ww.j  a v  a2 s.co  m
public static <S extends ScopeType<S>> FileSystem get(Configuration configuration,
        SharedResourcesBroker<S> broker) throws IOException {
    return get(FileSystem.getDefaultUri(configuration), configuration, broker);
}

From source file:gobblin.util.filesystem.FileSystemKey.java

License:Apache License

private URI resolveURI(URI uri, Configuration configuration) {
    String scheme = uri.getScheme();
    String authority = uri.getAuthority();

    if (scheme == null && authority == null) { // use default FS
        return FileSystem.getDefaultUri(configuration);
    }//w w w.j  ava 2 s.  c om

    if (scheme != null && authority == null) { // no authority
        URI defaultUri = FileSystem.getDefaultUri(configuration);
        if (scheme.equals(defaultUri.getScheme()) // if scheme matches default
                && defaultUri.getAuthority() != null) { // & default has authority
            return defaultUri; // return default
        }
    }

    try {
        return new URI(scheme, Strings.nullToEmpty(authority), "/", null, null);
    } catch (URISyntaxException use) {
        // This should never happen
        throw new RuntimeException(use);
    }
}

From source file:gobblin.util.ProxiedFileSystemCache.java

License:Apache License

/**
 * Gets a {@link FileSystem} that can perform any operations allowed by the specified userNameToProxyAs.
 *
 * @param userNameToProxyAs The name of the user the super user should proxy as
 * @param properties {@link java.util.Properties} containing initialization properties.
 * @param conf The {@link Configuration} for the {@link FileSystem} that should be created.
 * @return a {@link FileSystem} that can execute commands on behalf of the specified userNameToProxyAs
 * @throws IOException//  w  ww.  j a v  a  2  s . com
 * @deprecated use {@link #fromProperties}
 */
@Deprecated
public static FileSystem getProxiedFileSystem(@NonNull final String userNameToProxyAs, Properties properties,
        Configuration conf) throws IOException {
    return getProxiedFileSystem(userNameToProxyAs, properties, FileSystem.getDefaultUri(conf), conf);
}

From source file:gobblin.util.ProxiedFileSystemCache.java

License:Apache License

private static URI resolveUri(URI uri, Configuration configuration, FileSystem fileSystem) throws IOException {
    if (uri != null) {
        return uri;
    }/*from w  w  w  .  j a va  2s.c o  m*/
    if (fileSystem != null) {
        return fileSystem.getUri();
    }
    if (configuration != null) {
        return FileSystem.getDefaultUri(configuration);
    }
    throw new IOException("FileSystem URI could not be determined from available inputs.");
}

From source file:io.hops.experiments.benchmarks.blockreporting.TinyDatanodes.java

License:Apache License

public TinyDatanodes(Configuration conf, String baseDir, int numOfDataNodes, int blocksPerReport,
        int blocksPerFile, int filesPerDirectory, int replication, int blockSize, int slaveId,
        String databaseConnection, BenchMarkFileSystemName fsName) throws IOException, Exception {
    this.baseDir = baseDir;
    this.nrDatanodes = numOfDataNodes;
    this.blocksPerReport = blocksPerReport;
    this.blocksPerFile = blocksPerFile;
    this.filesPerDirectory = filesPerDirectory;
    this.replication = (short) replication;
    this.blockSize = blockSize;
    this.datanodes = new TinyDatanode[nrDatanodes];
    conf.set(ConfigKeys.DFS_NAMENODE_SELECTOR_POLICY_KEY, "ROUND_ROBIN");

    nameNodeSelector = NameNodeSelectorFactory.getSelector(fsName, conf, FileSystem.getDefaultUri(conf));
    machineName = InetAddress.getLocalHost().getHostName();
    this.helper = new TinyDatanodesHelper(slaveId, databaseConnection);
}

From source file:org.apache.accumulo.server.init.Initialize.java

License:Apache License

static boolean checkInit(Configuration conf, VolumeManager fs, SiteConfiguration sconf) throws IOException {
    @SuppressWarnings("deprecation")
    String fsUri = sconf.get(Property.INSTANCE_DFS_URI);
    if (fsUri.equals(""))
        fsUri = FileSystem.getDefaultUri(conf).toString();
    log.info("Hadoop Filesystem is " + fsUri);
    log.info("Accumulo data dirs are "
            + Arrays.asList(VolumeConfiguration.getVolumeUris(SiteConfiguration.getInstance())));
    log.info("Zookeeper server is " + sconf.get(Property.INSTANCE_ZK_HOST));
    log.info(/*from   w w w . j a  va  2 s. c  o m*/
            "Checking if Zookeeper is available. If this hangs, then you need to make sure zookeeper is running");
    if (!zookeeperAvailable()) {
        // ACCUMULO-3651 Changed level to error and added FATAL to message for slf4j compatibility
        log.error("FATAL Zookeeper needs to be up and running in order to init. Exiting ...");
        return false;
    }
    if (sconf.get(Property.INSTANCE_SECRET).equals(Property.INSTANCE_SECRET.getDefaultValue())) {
        ConsoleReader c = getConsoleReader();
        c.beep();
        c.println();
        c.println();
        c.println(
                "Warning!!! Your instance secret is still set to the default, this is not secure. We highly recommend you change it.");
        c.println();
        c.println();
        c.println("You can change the instance secret in accumulo by using:");
        c.println("   bin/accumulo " + org.apache.accumulo.server.util.ChangeSecret.class.getName());
        c.println(
                "You will also need to edit your secret in your configuration file by adding the property instance.secret to your conf/accumulo-site.xml. "
                        + "Without this accumulo will not operate correctly");
    }
    try {
        if (isInitialized(fs)) {
            printInitializeFailureMessages(sconf);
            return false;
        }
    } catch (IOException e) {
        throw new IOException("Failed to check if filesystem already initialized", e);
    }

    return true;
}

From source file:org.apache.accumulo.server.util.Initialize.java

License:Apache License

public static boolean doInit(Opts opts, Configuration conf, VolumeManager fs) throws IOException {
    if (!ServerConfiguration.getSiteConfiguration().get(Property.INSTANCE_DFS_URI).equals(""))
        log.info("Hadoop Filesystem is "
                + ServerConfiguration.getSiteConfiguration().get(Property.INSTANCE_DFS_URI));
    else//w  w w  .  j ava 2 s .  c om
        log.info("Hadoop Filesystem is " + FileSystem.getDefaultUri(conf));

    log.info("Accumulo data dirs are " + Arrays.asList(ServerConstants.getBaseDirs()));
    log.info(
            "Zookeeper server is " + ServerConfiguration.getSiteConfiguration().get(Property.INSTANCE_ZK_HOST));
    log.info(
            "Checking if Zookeeper is available. If this hangs, then you need to make sure zookeeper is running");
    if (!zookeeperAvailable()) {
        log.fatal("Zookeeper needs to be up and running in order to init. Exiting ...");
        return false;
    }
    if (ServerConfiguration.getSiteConfiguration().get(Property.INSTANCE_SECRET)
            .equals(Property.INSTANCE_SECRET.getDefaultValue())) {
        ConsoleReader c = getConsoleReader();
        c.beep();
        c.println();
        c.println();
        c.println(
                "Warning!!! Your instance secret is still set to the default, this is not secure. We highly recommend you change it.");
        c.println();
        c.println();
        c.println("You can change the instance secret in accumulo by using:");
        c.println("   bin/accumulo " + org.apache.accumulo.server.util.ChangeSecret.class.getName()
                + " oldPassword newPassword.");
        c.println(
                "You will also need to edit your secret in your configuration file by adding the property instance.secret to your conf/accumulo-site.xml. Without this accumulo will not operate correctly");
    }

    try {
        if (isInitialized(fs)) {
            log.fatal("It appears this location was previously initialized, exiting ... ");
            return false;
        }
    } catch (IOException e) {
        throw new RuntimeException(e);
    }

    // prompt user for instance name and root password early, in case they
    // abort, we don't leave an inconsistent HDFS/ZooKeeper structure
    String instanceNamePath;
    try {
        instanceNamePath = getInstanceNamePath(opts);
    } catch (Exception e) {
        log.fatal("Failed to talk to zookeeper", e);
        return false;
    }
    opts.rootpass = getRootPassword(opts);
    return initialize(opts, instanceNamePath, fs);
}