Example usage for org.apache.hadoop.conf Configuration iterator

List of usage examples for org.apache.hadoop.conf Configuration iterator

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration iterator.

Prototype

@Override
public Iterator<Map.Entry<String, String>> iterator() 

Source Link

Document

Get an Iterator to go through the list of String key-value pairs in the configuration.

Usage

From source file:org.mrgeo.data.DataProviderFactory.java

License:Apache License

public static ProviderProperties loadProviderPropertiesFromConfig(Configuration conf) {
    // Tell each data provider to load their config settings from the Configuration.
    // This is the inverse operation to saveProviderPropertiesToConfig.
    Iterator<Map.Entry<String, String>> iter = conf.iterator();
    Map<String, String> configSettings = new HashMap<String, String>();
    int prefixLen = DATA_PROVIDER_CONFIG_PREFIX.length();
    while (iter.hasNext()) {
        Map.Entry<String, String> entry = iter.next();
        if (entry.getKey().startsWith(DATA_PROVIDER_CONFIG_PREFIX)) {
            configSettings.put(entry.getKey().substring(prefixLen), entry.getValue());
        }//from   www .java2  s . c o m
    }
    setConfigurationForProviders(configSettings);

    String userName = conf.get(PROVIDER_PROPERTY_USER_NAME, "");
    List<String> roles = new ArrayList<String>();
    String strRoles = conf.get(PROVIDER_PROPERTY_USER_ROLES, "");
    if (strRoles != null && !strRoles.isEmpty()) {
        String[] separated = strRoles.split(",");
        for (String r : separated) {
            roles.add(r);
        }
    }
    return new ProviderProperties(userName, roles);
}

From source file:org.sf.xrime.algorithms.GraphAlgorithmContext.java

License:Apache License

/**
 * Set the source graph of this execution context.
 * @param source source graph./*from   w w  w .  java  2s  . co  m*/
 */
public void setSource(Graph source) {
    this.source = source;

    // Temp configuration for copy, since we need to clear the entries for source graph, 
    // and Configuration does not provide remove method.
    Configuration temp_conf = new Configuration(this);
    Iterator<Map.Entry<String, String>> iterator = temp_conf.iterator();
    // Clear this context.
    clear();
    // Deal with each entry.
    while (iterator.hasNext()) {
        Map.Entry<String, String> entry = iterator.next();
        String key = entry.getKey();
        if (key.endsWith(sourceGraphPostfix)) {
            // Skip this entry.
            continue;
        } else {
            // Copy this entry.
            this.set(entry.getKey(), entry.getValue());
        }
    }

    storeGraph(source, this, sourceGraphPostfix);
}

From source file:org.sf.xrime.algorithms.GraphAlgorithmContext.java

License:Apache License

/**
 * Set the destination graph of this execution context.
 * @param destination the destination graph.
 *//*from  ww  w  .j a  v  a 2 s  . c  om*/
public void setDestination(Graph destination) {
    this.destination = destination;

    // Temp configuration for copy, since we need to clear the entries for source graph, 
    // and Configuration does not provide remove method.
    Configuration temp_conf = new Configuration(this);
    Iterator<Map.Entry<String, String>> iterator = temp_conf.iterator();
    // Clear this context.
    clear();
    // Deal with each entry.
    while (iterator.hasNext()) {
        Map.Entry<String, String> entry = iterator.next();
        String key = entry.getKey();
        if (key.endsWith(destinationGraphPostfix)) {
            // Skip this entry.
            continue;
        } else {
            // Copy this entry.
            this.set(entry.getKey(), entry.getValue());
        }
    }

    storeGraph(destination, this, destinationGraphPostfix);
}

From source file:org.sf.xrime.algorithms.GraphAlgorithmContext.java

License:Apache License

/**
 * Read settings, which are identified with specified postfix, from specified hadoop
 * configuration object, and store them into specified properties.
 * @param properties properties to write to.
 * @param conf hadoop configuration object to read from.
 * @param postfix postfix to identify settings involved.
 *///from   w  w  w.j  a  va 2s .c o m
private static void readProperties(Properties properties, Configuration conf, String postfix) {
    if (postfix == null || conf == null || properties == null) {
        return;
    }

    Iterator<Map.Entry<String, String>> iterator = conf.iterator();

    while (iterator.hasNext()) {
        Map.Entry<String, String> entry = iterator.next();
        String key = entry.getKey();
        if (key.endsWith(postfix)) {
            properties.setProperty(key.substring(0, key.length() - postfix.length()), entry.getValue());
        }
    }
}

From source file:org.shaf.core.util.ConfigUtils.java

License:Apache License

/**
 * Exports properties with the specified pattern from {@link Configuration
 * Hadoop configuration} object to the {@link ProcessConfiguration process
 * configuration} object.//from w  ww  .ja va  2s. com
 * 
 * @param source
 *            the Hadoop configuration object.
 * @param target
 *            the process configuration object.
 * @param pattern
 *            the pattern for selecting properties.
 */
public final static void export(final Configuration source, final ProcessConfiguration target,
        final String pattern) {
    Iterator<Entry<String, String>> entries = source.iterator();
    while (entries.hasNext()) {
        Entry<String, String> entry = entries.next();
        if (Pattern.matches(pattern, entry.getKey())) {
            target.addProperty(entry.getKey(), entry.getValue());

            LOG.trace("Exports from Process to Hadoop configuration property: " + entry.getKey() + "="
                    + entry.getValue());
        }
    }
}

From source file:org.talend.components.hadoopcluster.runtime.configuration.HadoopCMClusterService.java

License:Open Source License

private Configuration filterByBlacklist(Configuration originalConf, List<String> blacklist) {
    if (blacklist != null && blacklist.size() > 0) {
        Configuration filteredConf = new Configuration(false);
        Iterator<Entry<String, String>> iterator = originalConf.iterator();
        while (iterator.hasNext()) {
            Entry<String, String> next = iterator.next();
            if (blacklist.contains(next.getKey())) {
                continue;
            }//  w  w w .  j a v  a  2  s.  co  m
            filteredConf.set(next.getKey(), next.getValue());
        }
        originalConf = filteredConf;
    }
    return originalConf;
}

From source file:org.talend.repository.hadoopcluster.conf.RetrieveLocalConfsService.java

License:Open Source License

private Map<String, String> configurationToMap(Configuration configuration) {
    Map<String, String> cMap = new HashMap<>();
    Iterator<Entry<String, String>> confsIter = configuration.iterator();
    while (confsIter.hasNext()) {
        Entry<String, String> confsEntry = confsIter.next();
        cMap.put(confsEntry.getKey(), confsEntry.getValue());
    }/*from ww w  .  j  a v a 2 s  .c om*/
    return cMap;
}

From source file:org.trustedanalytics.atk.giraph.io.titan.GiraphToTitanGraphFactory.java

License:Apache License

/**
 * generateTitanConfiguration from Giraph configuration
 *
 * @param hadoopConfig : Giraph configuration
 * @param prefix : prefix to remove for Titan
 * @return BaseConfiguration/*from w  ww  .  j  a v a  2  s  . co  m*/
 */
public static BaseConfiguration createTitanBaseConfiguration(Configuration hadoopConfig, String prefix) {

    BaseConfiguration titanConfig = new BaseConfiguration();
    Iterator<Map.Entry<String, String>> itty = hadoopConfig.iterator();

    while (itty.hasNext()) {
        Map.Entry<String, String> entry = itty.next();
        String key = entry.getKey();
        String value = entry.getValue();

        if (key.startsWith(prefix)) {
            titanConfig.setProperty(key.substring(prefix.length() + 1), value);
        }
    }
    return titanConfig;
}

From source file:org.trustedanalytics.atk.graphbuilder.titan.cache.TitanHadoopCacheConfiguration.java

License:Apache License

/**
 * Create Titan configuration from Hadoop configuration
 *
 * @param hadoopConfig Hadoop configuration
 * @return Titan configuration/*  ww w  . ja  v  a2 s . co m*/
 */
private SerializableBaseConfiguration createTitanConfiguration(Configuration hadoopConfig) {
    SerializableBaseConfiguration titanConfig = new SerializableBaseConfiguration();
    Iterator<Map.Entry<String, String>> itty = hadoopConfig.iterator();

    while (itty.hasNext()) {
        Map.Entry<String, String> entry = itty.next();
        String key = entry.getKey();
        String value = entry.getValue();

        if (key.startsWith(TITAN_HADOOP_PREFIX)) {
            titanConfig.setProperty(key.substring(TITAN_HADOOP_PREFIX.length() + 1), value);
        }
    }
    return (titanConfig);
}

From source file:root.input.InputJob.java

License:Apache License

/**
 * Writes a timestamp to file.// w  ww  .  j a va2  s. c o m
 * 
 * @param conf the configuration object to work with
 * @param filename the location to write to
 * @param time the timestamp to write
 * @return
 */
protected boolean writeTimestamp(Configuration conf, String filename, long time) {

    boolean success = true;

    try {
        URI localURI = new URI(conf.get("fs.default.name"));
        FileSystem localFS = FileSystem.get(localURI, conf);

        Path timestamp = new Path(filename);

        if (localFS.exists(timestamp)) {
            localFS.delete(timestamp, true);
        }

        FSDataOutputStream out = localFS.create(timestamp);

        out.writeBytes("HAP duration: " + time + "\n");

        Iterator<Map.Entry<String, String>> iter = conf.iterator();
        while (iter.hasNext()) {
            Map.Entry<String, String> entry = iter.next();
            String key = entry.getKey();
            String val = entry.getValue();

            if (key.startsWith(CONF_PREFIX)) {
                key = key.substring(CONF_PREFIX.length());
                out.writeBytes(key);
                out.writeBytes("\t");
                out.writeBytes(val);
                out.writeBytes("\n");
            }
        }

        out.flush();
        out.close();

        localFS.close();
    } catch (IOException e) {
        System.err.println(e);
        success = false;
    } catch (URISyntaxException e) {
        System.err.println(e);
        success = false;
    }

    return success;
}