Example usage for org.apache.hadoop.conf Configuration getStringCollection

List of usage examples for org.apache.hadoop.conf Configuration getStringCollection

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration getStringCollection.

Prototype

public Collection<String> getStringCollection(String name) 

Source Link

Document

Get the comma delimited values of the name property as a collection of Strings.

Usage

From source file:com.moz.fiji.hadoop.configurator.ConfigurationMethod.java

License:Apache License

/**
 * Calls an object's method with the value read from a Configuration instance.
 *
 * @param instance The object to populate.
 * @param conf The configuration to read from.
 * @throws IllegalAccessException If the method cannot be called on the object.
 * @throws HadoopConfigurationException If there is a problem with the annotation definition.
 *//*from   w  w  w .  j  a  v a  2  s .c o m*/
public void call(Object instance, Configuration conf) throws IllegalAccessException {
    final String key = getKey();
    if (null == key) {
        throw new HadoopConfigurationException("Missing 'key' attribute of @HadoopConf on "
                + instance.getClass().getName() + "." + mMethod.getName());
    }

    if (!mMethod.isAccessible()) {
        mMethod.setAccessible(true);
    }

    final Class<?>[] parameterTypes = mMethod.getParameterTypes();
    if (1 != parameterTypes.length) {
        throw new HadoopConfigurationException(
                "Methods annotated with @HadoopConf must have exactly one parameter: "
                        + instance.getClass().getName() + "." + mMethod.getName());
    }

    final Class<?> parameterType = parameterTypes[0];

    try {
        try {
            if (boolean.class == parameterType) {
                mMethod.invoke(instance, conf.getBoolean(key, Boolean.parseBoolean(getDefault())));
            } else if (float.class == parameterType) {
                mMethod.invoke(instance, conf.getFloat(key, Float.parseFloat(getDefault())));
            } else if (double.class == parameterType) {
                mMethod.invoke(instance, conf.getFloat(key, Float.parseFloat(getDefault())));
            } else if (int.class == parameterType) {
                mMethod.invoke(instance, conf.getInt(key, Integer.parseInt(getDefault())));
            } else if (long.class == parameterType) {
                mMethod.invoke(instance, conf.getLong(key, Long.parseLong(getDefault())));
            } else if (parameterType.isAssignableFrom(String.class)) {
                mMethod.invoke(instance, conf.get(key, getDefault()));
            } else if (parameterType.isAssignableFrom(Collection.class)) {
                mMethod.invoke(instance, conf.getStringCollection(key));
            } else if (String[].class == parameterType) {
                mMethod.invoke(instance, new Object[] { conf.getStrings(key) });
            } else {
                throw new HadoopConfigurationException(
                        "Unsupported method parameter type annotated by @HadoopConf: "
                                + instance.getClass().getName() + "." + mMethod.getName());
            }
        } catch (NumberFormatException e) {
            mMethod.invoke(instance, getDefault());
        }
    } catch (InvocationTargetException e) {
        throw new HadoopConfigurationException(e);
    }
}

From source file:com.moz.fiji.hadoop.configurator.ConfigurationVariable.java

License:Apache License

/**
 * Populates an object's field with the value read from a Configuration instance.
 *
 * @param instance The object to populate.
 * @param conf The configuration to read from.
 * @throws IllegalAccessException If the field cannot be set on the object.
 * @throws HadoopConfigurationException If there is a problem with the annotation definition.
 *//*from   w ww  . j a v a 2s .  co m*/
public void setValue(Object instance, Configuration conf) throws IllegalAccessException {
    final String key = getKey();
    if (null == key) {
        throw new HadoopConfigurationException("Missing 'key' attribute of @HadoopConf on "
                + instance.getClass().getName() + "." + mField.getName());
    }
    if (null == conf.get(key) && mAnnotation.defaultValue().isEmpty()) {
        // Nothing set in the configuration, and no default value
        // specified. Just leave the field alone.
        return;
    }

    if (!mField.isAccessible()) {
        mField.setAccessible(true);
    }

    try {
        if (boolean.class == mField.getType()) {
            mField.setBoolean(instance, conf.getBoolean(key, getDefaultBoolean(instance)));
        } else if (float.class == mField.getType()) {
            mField.setFloat(instance, conf.getFloat(key, getDefaultFloat(instance)));
        } else if (double.class == mField.getType()) {
            mField.setDouble(instance, conf.getFloat(key, getDefaultDouble(instance)));
        } else if (int.class == mField.getType()) {
            mField.setInt(instance, conf.getInt(key, getDefaultInt(instance)));
        } else if (long.class == mField.getType()) {
            mField.setLong(instance, conf.getLong(key, getDefaultLong(instance)));
        } else if (mField.getType().isAssignableFrom(String.class)) {
            mField.set(instance, conf.get(key, getDefaultString(instance)));
        } else if (mField.getType().isAssignableFrom(Collection.class)) {
            mField.set(instance, conf.getStringCollection(key));
        } else if (String[].class == mField.getType()) {
            mField.set(instance, conf.getStrings(key));
        } else {
            throw new HadoopConfigurationException("Unsupported field type annotated by @HadoopConf: "
                    + instance.getClass().getName() + "." + mField.getName());
        }
    } catch (NumberFormatException e) {
        // That's okay. The default value for the field will be kept.
    }
}

From source file:com.splicemachine.mrio.api.SpliceTableMapReduceUtil.java

License:Apache License

/**
 * Add the jars containing the given classes to the job's configuration
 * such that JobClient will ship them to the cluster and add them to
 * the DistributedCache.//from ww  w .  j a  v  a  2 s  . c o m
 */
public static void addDependencyJars(Configuration conf, Class... classes) throws IOException {

    FileSystem localFs = FileSystem.getLocal(conf);

    Set<String> jars = new HashSet<String>();

    // Add jars that are already in the tmpjars variable
    jars.addAll(conf.getStringCollection("tmpjars"));

    // Add jars containing the specified classes
    for (Class clazz : classes) {
        if (clazz == null)
            continue;

        String pathStr = findOrCreateJar(clazz);
        if (pathStr == null) {
            LOG.warn("Could not find jar for class " + clazz + " in order to ship it to the cluster.");
            continue;
        }
        Path path = new Path(pathStr);
        if (!localFs.exists(path)) {
            LOG.warn("Could not validate jar file " + path + " for class " + clazz);
            continue;
        }
        jars.add(path.makeQualified(localFs).toString());
    }
    if (jars.isEmpty())
        return;

    conf.set("tmpjars", StringUtils.arrayToString(jars.toArray(new String[0])));
}

From source file:com.yunrang.hadoop.app.utils.CustomizedUtil.java

License:Apache License

/**
 * Returns a classpath string built from the content of the "tmpjars" value
 * in {@code conf}. Also exposed to shell scripts via `bin/hbase mapredcp`.
 *///from  w ww.j av a 2  s . c o m
public static String buildDependencyClasspath(Configuration conf) {
    if (conf == null) {
        throw new IllegalArgumentException("Must provide a configuration object.");
    }
    Set<String> paths = new HashSet<String>(conf.getStringCollection("tmpjars"));
    if (paths.size() == 0) {
        throw new IllegalArgumentException("Configuration contains no tmpjars.");
    }
    StringBuilder sb = new StringBuilder();
    for (String s : paths) {
        // entries can take the form 'file:/path/to/file.jar'.
        int idx = s.indexOf(":");
        if (idx != -1)
            s = s.substring(idx + 1);
        if (sb.length() > 0)
            sb.append(File.pathSeparator);
        sb.append(s);
    }
    return sb.toString();
}

From source file:common.DataNode.java

License:Apache License

static Collection<URI> getStorageDirs(Configuration conf) {
    Collection<String> dirNames = conf.getStringCollection(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
    return Util.stringCollectionAsURIs(dirNames);
}

From source file:org.apache.accumulo.core.client.mapreduce.lib.impl.InputConfigurator.java

License:Apache License

/**
 * Gets the ranges to scan over from a job.
 *
 * @param implementingClass/*from w w  w .  ja  v a2 s  .c o m*/
 *          the class whose name will be used as a prefix for the property configuration key
 * @param conf
 *          the Hadoop configuration object to configure
 * @return the ranges
 * @throws IOException
 *           if the ranges have been encoded improperly
 * @since 1.6.0
 * @see #setRanges(Class, Configuration, Collection)
 */
public static List<Range> getRanges(Class<?> implementingClass, Configuration conf) throws IOException {

    Collection<String> encodedRanges = conf
            .getStringCollection(enumToConfKey(implementingClass, ScanOpts.RANGES));
    List<Range> ranges = new ArrayList<>();
    for (String rangeString : encodedRanges) {
        ByteArrayInputStream bais = new ByteArrayInputStream(Base64.getDecoder().decode(rangeString));
        Range range = new Range();
        range.readFields(new DataInputStream(bais));
        ranges.add(range);
    }
    return ranges;
}

From source file:org.apache.avro.hadoop.io.AvroSerialization.java

License:Apache License

/**
 * Adds the AvroSerialization scheme to the configuration, so SerializationFactory
 * instances constructed from the given configuration will be aware of it.
 *
 * @param conf The configuration to add AvroSerialization to.
 *///from  w w w  . ja v a 2 s. com
public static void addToConfiguration(Configuration conf) {
    Collection<String> serializations = conf.getStringCollection("io.serializations");
    if (!serializations.contains(AvroSerialization.class.getName())) {
        serializations.add(AvroSerialization.class.getName());
        conf.setStrings("io.serializations", serializations.toArray(new String[serializations.size()]));
    }
}

From source file:org.apache.blur.mapreduce.lib.BlurMapReduceUtil.java

License:Apache License

/**
 * Adds all the jars in the same path as the blur jar files.
 * //  w  ww .  j ava2 s .  co m
 * @param conf
 * @throws IOException
 */
public static void addAllJarsInBlurLib(Configuration conf) throws IOException {
    FileSystem localFs = FileSystem.getLocal(conf);
    Set<String> jars = new HashSet<String>();
    jars.addAll(conf.getStringCollection("tmpjars"));

    String property = System.getProperty("java.class.path");
    String[] files = property.split("\\:");

    String blurLibPath = getPath("blur-", files);
    if (blurLibPath == null) {
        return;
    }
    List<String> pathes = getPathes(blurLibPath, files);
    for (String pathStr : pathes) {
        Path path = new Path(pathStr);
        if (!localFs.exists(path)) {
            LOG.warn("Could not validate jar file " + path);
            continue;
        }
        jars.add(path.makeQualified(localFs.getUri(), localFs.getWorkingDirectory()).toString());
    }
    if (jars.isEmpty()) {
        return;
    }
    conf.set("tmpjars", StringUtils.arrayToString(jars.toArray(new String[0])));
}

From source file:org.apache.blur.mapreduce.lib.BlurMapReduceUtil.java

License:Apache License

/**
 * Add the jars containing the given classes to the job's configuration such
 * that JobClient will ship them to the cluster and add them to the
 * DistributedCache.//from  w ww  .  jav  a  2  s .c  om
 */
public static void addDependencyJars(Configuration conf, Class<?>... classes) throws IOException {
    FileSystem localFs = FileSystem.getLocal(conf);
    Set<String> jars = new HashSet<String>();
    // Add jars that are already in the tmpjars variable
    jars.addAll(conf.getStringCollection("tmpjars"));

    // Add jars containing the specified classes
    for (Class<?> clazz : classes) {
        if (clazz == null) {
            continue;
        }

        String pathStr = findOrCreateJar(clazz);
        if (pathStr == null) {
            LOG.warn("Could not find jar for class " + clazz + " in order to ship it to the cluster.");
            continue;
        }
        Path path = new Path(pathStr);
        if (!localFs.exists(path)) {
            LOG.warn("Could not validate jar file " + path + " for class " + clazz);
            continue;
        }
        jars.add(path.makeQualified(localFs.getUri(), localFs.getWorkingDirectory()).toString());
    }
    if (jars.isEmpty()) {
        return;
    }

    conf.set("tmpjars", StringUtils.arrayToString(jars.toArray(new String[0])));
}

From source file:org.apache.blur.mapreduce.lib.CsvBlurDriverTest.java

License:Apache License

@Test
public void testCsvBlurDriverTest() throws Exception {
    Configuration configurationSetup = new Configuration();
    ControllerPool controllerPool = new CsvBlurDriver.ControllerPool() {
        @Override/*from  w  w  w  .  j a  v  a2s .  com*/
        public Iface getClient(String controllerConnectionStr) {
            return getMockIface();
        }
    };
    AtomicReference<Callable<Void>> ref = new AtomicReference<Callable<Void>>();
    Job job = CsvBlurDriver.setupJob(configurationSetup, controllerPool, ref, "-c", "host:40010", "-d",
            "family1", "col1", "col2", "-d", "family2", "col3", "col4", "-t", "table1", "-i", _path1.toString(),
            "-i", _path2.toString());
    assertNotNull(job);
    Configuration configuration = job.getConfiguration();
    TableDescriptor tableDescriptor = BlurOutputFormat.getTableDescriptor(configuration);
    assertEquals(tableDescriptor.getName(), "table1");
    Collection<String> inputs = configuration.getStringCollection("mapred.input.dir");
    assertEquals(2, inputs.size());
    Map<String, List<String>> familyAndColumnNameMap = CsvBlurMapper.getFamilyAndColumnNameMap(configuration);
    assertEquals(2, familyAndColumnNameMap.size());
}