Example usage for org.apache.hadoop.conf Configuration addResource

List of usage examples for org.apache.hadoop.conf Configuration addResource

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration addResource.

Prototype

public void addResource(Configuration conf) 

Source Link

Document

Add a configuration resource.

Usage

From source file:org.apache.hoya.providers.HoyaProviderFactory.java

License:Apache License

public static Configuration loadHoyaConfiguration() {
    Configuration conf = new Configuration();
    conf.addResource(HoyaKeys.HOYA_XML);
    return conf;//from  w  w w  .  j  a v  a 2  s  .  c o m
}

From source file:org.apache.hoya.tools.ConfigHelper.java

License:Apache License

/**
 * Load a configuration from ANY FS path. The normal Configuration
 * loader only works with file:// URIs//from   ww  w  .j  av a  2  s  . c  o  m
 * @param fs filesystem
 * @param path path
 * @return a loaded resource
 * @throws IOException
 */
public static Configuration loadConfiguration(FileSystem fs, Path path) throws IOException {
    int len = (int) fs.getLength(path);
    byte[] data = new byte[len];
    FSDataInputStream in = fs.open(path);
    try {
        in.readFully(0, data);
    } catch (IOException e) {
        in.close();
    }
    ByteArrayInputStream in2;

    in2 = new ByteArrayInputStream(data);
    Configuration conf1 = new Configuration(false);
    conf1.addResource(in2);
    //now clone it while dropping all its sources
    Configuration conf2 = new Configuration(false);
    String src = path.toString();
    for (Map.Entry<String, String> entry : conf1) {
        String key = entry.getKey();
        String value = entry.getValue();
        conf2.set(key, value, src);
    }
    return conf2;
}

From source file:org.apache.hoya.tools.ConfigHelper.java

License:Apache License

/**
 * Load a Hadoop configuration from a local file.
 * @param file file to load/*ww w.j  a v a 2s  . c om*/
 * @return a configuration which hasn't actually had the load triggered
 * yet.
 * @throws FileNotFoundException file is not there
 * @throws IOException any other IO problem
 */
public static Configuration loadConfFromFile(File file) throws IOException {
    if (!file.exists()) {
        throw new FileNotFoundException("File not found :" + file.getAbsoluteFile());
    }
    Configuration conf = new Configuration(false);
    try {
        conf.addResource(file.toURI().toURL());
    } catch (MalformedURLException e) {
        //should never happen...
        throw new IOException("File " + file.toURI() + " doesn't have a valid URL");
    }
    return conf;
}

From source file:org.apache.hoya.tools.ConfigHelper.java

License:Apache License

/**
 * looks for the config under $confdir/$templateFilename; if not there
 * loads it from /conf/templateFile.//from ww  w .  j av a  2s.  c o  m
 * The property {@link HoyaKeys#KEY_TEMPLATE_ORIGIN} is set to the
 * origin to help debug what's happening.
 * @param fs Filesystem
 * @param templatePath HDFS path for template
 * @param fallbackResource resource to fall back on, or "" for no fallback
 * @return loaded conf
 * @throws IOException IO problems
 * @throws FileNotFoundException if the path doesn't have a file and there
 * was no fallback.
 */
public static Configuration loadTemplateConfiguration(FileSystem fs, Path templatePath, String fallbackResource)
        throws IOException {
    Configuration conf = null;
    String origin;
    if (fs.exists(templatePath)) {
        log.debug("Loading template configuration {}", templatePath);
        conf = loadConfiguration(fs, templatePath);
        origin = templatePath.toString();
    } else {
        if (fallbackResource.isEmpty()) {
            throw new FileNotFoundException("No config file found at " + templatePath);
        }
        log.debug("Template {} not found" + " -reverting to classpath resource {}", templatePath,
                fallbackResource);
        conf = new Configuration(false);
        conf.addResource(fallbackResource);
        origin = "Resource " + fallbackResource;
    }
    //force a get
    conf.get(HoyaXmlConfKeys.KEY_TEMPLATE_ORIGIN);
    conf.set(HoyaXmlConfKeys.KEY_TEMPLATE_ORIGIN, origin);
    //now set the origin
    return conf;
}

From source file:org.apache.hoya.tools.ConfigHelper.java

License:Apache License

/**
 * Load a configuration from a resource on this classpath.
 * If the resource is not found, an empty configuration is returned
 * @param resource the resource name//from  w ww  .j a  va 2s .c o  m
 * @return the loaded configuration.
 */
public static Configuration loadFromResource(String resource) {
    Configuration conf = new Configuration(false);
    URL resURL = ConfigHelper.class.getClassLoader().getResource(resource);
    if (resURL != null) {
        log.debug("loaded resources from {}", resURL);
        conf.addResource(resource);
    } else {
        log.debug("failed to find {} on the classpath", resource);
    }
    return conf;

}

From source file:org.apache.hoya.tools.ConfigHelper.java

License:Apache License

/**
 * Load a resource that must be there/*  www . j  a v a2  s.co  m*/
 * @param resource the resource name
 * @return the loaded configuration
 * @throws FileNotFoundException if the resource is missing
 */
public static Configuration loadMandatoryResource(String resource) throws FileNotFoundException {
    Configuration conf = new Configuration(false);
    URL resURL = ConfigHelper.class.getClassLoader().getResource(resource);
    if (resURL != null) {
        log.debug("loaded resources from {}", resURL);
        conf.addResource(resource);
    } else {
        throw new FileNotFoundException(resource);
    }
    return conf;
}

From source file:org.apache.hoya.tools.ConfigHelperTest.java

License:Apache License

private static Configuration loadConf(String s) {
    Configuration conf = new Configuration(false);
    conf.addResource(s);
    assertTrue("loaded no properties from " + s, conf.size() > 0);
    return conf;/*ww  w .java 2s.  co  m*/
}

From source file:org.apache.ignite.igfs.hadoop.v1.IgfsHadoopFileSystem.java

License:Apache License

/** {@inheritDoc} */
@Override//from   w ww.  ja  v  a  2  s .  c o m
public void initialize(URI name, Configuration cfg) throws IOException {
    enterBusy();

    try {
        if (rmtClient != null)
            throw new IOException("File system is already initialized: " + rmtClient);

        A.notNull(name, "name");
        A.notNull(cfg, "cfg");

        super.initialize(name, cfg);

        setConf(cfg);

        String disableCacheName = String.format("fs.%s.impl.disable.cache", name.getScheme());

        cacheEnabled = !cfg.getBoolean(disableCacheName, false);

        mgmt = cfg.getBoolean(IGFS_MANAGEMENT, false);

        if (!IGFS_SCHEME.equals(name.getScheme()))
            throw new IOException("Illegal file system URI [expected=" + IGFS_SCHEME
                    + "://[name]/[optional_path], actual=" + name + ']');

        uri = name;

        uriAuthority = uri.getAuthority();

        setUser(cfg.get(MRJobConfig.USER_NAME, DFLT_USER_NAME));

        // Override sequential reads before prefetch if needed.
        seqReadsBeforePrefetch = parameter(cfg, PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH, uriAuthority, 0);

        if (seqReadsBeforePrefetch > 0)
            seqReadsBeforePrefetchOverride = true;

        // In GG replication factor is controlled by data cache affinity.
        // We use replication factor to force the whole file to be stored on local node.
        dfltReplication = (short) cfg.getInt("dfs.replication", 3);

        // Get file colocation control flag.
        colocateFileWrites = parameter(cfg, PARAM_IGFS_COLOCATED_WRITES, uriAuthority, false);
        preferLocFileWrites = cfg.getBoolean(PARAM_IGFS_PREFER_LOCAL_WRITES, false);

        // Get log directory.
        String logDirCfg = parameter(cfg, PARAM_IGFS_LOG_DIR, uriAuthority, DFLT_IGFS_LOG_DIR);

        File logDirFile = U.resolveIgnitePath(logDirCfg);

        String logDir = logDirFile != null ? logDirFile.getAbsolutePath() : null;

        rmtClient = new IgfsHadoopWrapper(uriAuthority, logDir, cfg, LOG);

        // Handshake.
        IgfsHandshakeResponse handshake = rmtClient.handshake(logDir);

        igfsGrpBlockSize = handshake.blockSize();

        IgfsPaths paths = handshake.secondaryPaths();

        // Initialize client logger.
        Boolean logEnabled = parameter(cfg, PARAM_IGFS_LOG_ENABLED, uriAuthority, false);

        if (handshake.sampling() != null ? handshake.sampling() : logEnabled) {
            // Initiate client logger.
            if (logDir == null)
                throw new IOException("Failed to resolve log directory: " + logDirCfg);

            Integer batchSize = parameter(cfg, PARAM_IGFS_LOG_BATCH_SIZE, uriAuthority,
                    DFLT_IGFS_LOG_BATCH_SIZE);

            clientLog = IgfsLogger.logger(uriAuthority, handshake.igfsName(), logDir, batchSize);
        } else
            clientLog = IgfsLogger.disabledLogger();

        modeRslvr = new IgfsModeResolver(paths.defaultMode(), paths.pathModes());

        boolean initSecondary = paths.defaultMode() == PROXY;

        if (paths.pathModes() != null && !paths.pathModes().isEmpty()) {
            for (T2<IgfsPath, IgfsMode> pathMode : paths.pathModes()) {
                IgfsMode mode = pathMode.getValue();

                initSecondary |= mode == PROXY;
            }
        }

        if (initSecondary) {
            Map<String, String> props = paths.properties();

            String secUri = props.get(IgfsHadoopFileSystemWrapper.SECONDARY_FS_URI);
            String secConfPath = props.get(IgfsHadoopFileSystemWrapper.SECONDARY_FS_CONFIG_PATH);

            if (secConfPath == null)
                throw new IOException("Failed to connect to the secondary file system because configuration "
                        + "path is not provided.");

            if (secUri == null)
                throw new IOException(
                        "Failed to connect to the secondary file system because URI is not " + "provided.");

            try {
                secondaryUri = new URI(secUri);

                URL secondaryCfgUrl = U.resolveIgniteUrl(secConfPath);

                Configuration conf = new Configuration();

                if (secondaryCfgUrl != null)
                    conf.addResource(secondaryCfgUrl);

                String prop = String.format("fs.%s.impl.disable.cache", secondaryUri.getScheme());

                conf.setBoolean(prop, true);

                secondaryFs = FileSystem.get(secondaryUri, conf);
            } catch (URISyntaxException ignore) {
                if (!mgmt)
                    throw new IOException("Failed to resolve secondary file system URI: " + secUri);
                else
                    LOG.warn(
                            "Visor failed to create secondary file system (operations on paths with PROXY mode "
                                    + "will have no effect).");
            } catch (IOException e) {
                if (!mgmt)
                    throw new IOException("Failed to connect to the secondary file system: " + secUri, e);
                else
                    LOG.warn(
                            "Visor failed to create secondary file system (operations on paths with PROXY mode "
                                    + "will have no effect): " + e.getMessage());
            }
        }
    } finally {
        leaveBusy();
    }
}

From source file:org.apache.ignite.igfs.hadoop.v2.IgfsHadoopFileSystem.java

License:Apache License

/**
 * @param name URI passed to constructor.
 * @param cfg Configuration passed to constructor.
 * @throws IOException If initialization failed.
 *///w w w  .  j ava 2 s  .co  m
private void initialize(URI name, Configuration cfg) throws IOException {
    enterBusy();

    try {
        if (rmtClient != null)
            throw new IOException("File system is already initialized: " + rmtClient);

        A.notNull(name, "name");
        A.notNull(cfg, "cfg");

        if (!IGFS_SCHEME.equals(name.getScheme()))
            throw new IOException("Illegal file system URI [expected=" + IGFS_SCHEME
                    + "://[name]/[optional_path], actual=" + name + ']');

        uriAuthority = name.getAuthority();

        // Override sequential reads before prefetch if needed.
        seqReadsBeforePrefetch = parameter(cfg, PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH, uriAuthority, 0);

        if (seqReadsBeforePrefetch > 0)
            seqReadsBeforePrefetchOverride = true;

        // In GG replication factor is controlled by data cache affinity.
        // We use replication factor to force the whole file to be stored on local node.
        dfltReplication = (short) cfg.getInt("dfs.replication", 3);

        // Get file colocation control flag.
        colocateFileWrites = parameter(cfg, PARAM_IGFS_COLOCATED_WRITES, uriAuthority, false);
        preferLocFileWrites = cfg.getBoolean(PARAM_IGFS_PREFER_LOCAL_WRITES, false);

        // Get log directory.
        String logDirCfg = parameter(cfg, PARAM_IGFS_LOG_DIR, uriAuthority, DFLT_IGFS_LOG_DIR);

        File logDirFile = U.resolveIgnitePath(logDirCfg);

        String logDir = logDirFile != null ? logDirFile.getAbsolutePath() : null;

        rmtClient = new IgfsHadoopWrapper(uriAuthority, logDir, cfg, LOG);

        // Handshake.
        IgfsHandshakeResponse handshake = rmtClient.handshake(logDir);

        grpBlockSize = handshake.blockSize();

        IgfsPaths paths = handshake.secondaryPaths();

        Boolean logEnabled = parameter(cfg, PARAM_IGFS_LOG_ENABLED, uriAuthority, false);

        if (handshake.sampling() != null ? handshake.sampling() : logEnabled) {
            // Initiate client logger.
            if (logDir == null)
                throw new IOException("Failed to resolve log directory: " + logDirCfg);

            Integer batchSize = parameter(cfg, PARAM_IGFS_LOG_BATCH_SIZE, uriAuthority,
                    DFLT_IGFS_LOG_BATCH_SIZE);

            clientLog = IgfsLogger.logger(uriAuthority, handshake.igfsName(), logDir, batchSize);
        } else
            clientLog = IgfsLogger.disabledLogger();

        modeRslvr = new IgfsModeResolver(paths.defaultMode(), paths.pathModes());

        boolean initSecondary = paths.defaultMode() == PROXY;

        if (paths.pathModes() != null) {
            for (T2<IgfsPath, IgfsMode> pathMode : paths.pathModes()) {
                IgfsMode mode = pathMode.getValue();

                initSecondary |= mode == PROXY;
            }
        }

        if (initSecondary) {
            Map<String, String> props = paths.properties();

            String secUri = props.get(IgfsHadoopFileSystemWrapper.SECONDARY_FS_URI);
            String secConfPath = props.get(IgfsHadoopFileSystemWrapper.SECONDARY_FS_CONFIG_PATH);

            if (secConfPath == null)
                throw new IOException("Failed to connect to the secondary file system because configuration "
                        + "path is not provided.");

            if (secUri == null)
                throw new IOException(
                        "Failed to connect to the secondary file system because URI is not " + "provided.");

            if (secConfPath == null)
                throw new IOException("Failed to connect to the secondary file system because configuration "
                        + "path is not provided.");

            if (secUri == null)
                throw new IOException(
                        "Failed to connect to the secondary file system because URI is not " + "provided.");

            try {
                secondaryUri = new URI(secUri);

                URL secondaryCfgUrl = U.resolveIgniteUrl(secConfPath);

                if (secondaryCfgUrl == null)
                    throw new IOException("Failed to resolve secondary file system config URL: " + secConfPath);

                Configuration conf = new Configuration();

                conf.addResource(secondaryCfgUrl);

                String prop = String.format("fs.%s.impl.disable.cache", secondaryUri.getScheme());

                conf.setBoolean(prop, true);

                secondaryFs = AbstractFileSystem.get(secondaryUri, conf);
            } catch (URISyntaxException ignore) {
                throw new IOException("Failed to resolve secondary file system URI: " + secUri);
            } catch (IOException e) {
                throw new IOException("Failed to connect to the secondary file system: " + secUri, e);
            }
        }
    } finally {
        leaveBusy();
    }
}

From source file:org.apache.ignite.igfs.HadoopIgfsDualAbstractSelfTest.java

License:Apache License

/**
 * Check how prefetch override works./* w w w .j a va2s . c o m*/
 *
 * @throws Exception IF failed.
 */
public void testOpenPrefetchOverride() throws Exception {
    create(igfsSecondary, paths(DIR, SUBDIR), paths(FILE));

    // Write enough data to the secondary file system.
    final int blockSize = IGFS_BLOCK_SIZE;

    IgfsOutputStream out = igfsSecondary.append(FILE, false);

    int totalWritten = 0;

    while (totalWritten < blockSize * 2 + chunk.length) {
        out.write(chunk);

        totalWritten += chunk.length;
    }

    out.close();

    awaitFileClose(igfsSecondary.asSecondary(), FILE);

    // Instantiate file system with overridden "seq reads before prefetch" property.
    Configuration cfg = new Configuration();

    cfg.addResource(U.resolveIgniteUrl(PRIMARY_CFG));

    int seqReads = SEQ_READS_BEFORE_PREFETCH + 1;

    cfg.setInt(String.format(PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH, "igfs:grid@"), seqReads);

    FileSystem fs = FileSystem.get(new URI(PRIMARY_URI), cfg);

    // Read the first two blocks.
    Path fsHome = new Path(PRIMARY_URI);
    Path dir = new Path(fsHome, DIR.name());
    Path subdir = new Path(dir, SUBDIR.name());
    Path file = new Path(subdir, FILE.name());

    FSDataInputStream fsIn = fs.open(file);

    final byte[] readBuf = new byte[blockSize * 2];

    fsIn.readFully(0, readBuf, 0, readBuf.length);

    // Wait for a while for prefetch to finish (if any).
    IgfsMetaManager meta = igfs.context().meta();

    IgfsFileInfo info = meta.info(meta.fileId(FILE));

    IgfsBlockKey key = new IgfsBlockKey(info.id(), info.affinityKey(), info.evictExclude(), 2);

    GridCache<IgfsBlockKey, byte[]> dataCache = igfs.context().kernalContext().cache()
            .cache(igfs.configuration().getDataCacheName());

    for (int i = 0; i < 10; i++) {
        if (dataCache.containsKey(key))
            break;
        else
            U.sleep(100);
    }

    fsIn.close();

    // Remove the file from the secondary file system.
    igfsSecondary.delete(FILE, false);

    // Try reading the third block. Should fail.
    GridTestUtils.assertThrows(log, new Callable<Object>() {
        @Override
        public Object call() throws Exception {
            IgfsInputStream in0 = igfs.open(FILE);

            in0.seek(blockSize * 2);

            try {
                in0.read(readBuf);
            } finally {
                U.closeQuiet(in0);
            }

            return null;
        }
    }, IOException.class, "Failed to read data due to secondary file system exception: /dir/subdir/file");
}