Example usage for org.apache.hadoop.conf Configuration getStrings

List of usage examples for org.apache.hadoop.conf Configuration getStrings

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration getStrings.

Prototype

public String[] getStrings(String name) 

Source Link

Document

Get the comma delimited values of the name property as an array of Strings.

Usage

From source file:com.bah.culvert.hive.CulvertHiveUtils.java

License:Apache License

/**
 * Get the hive column types from the conf. Corresponds 1-1 to the mappings.
 * // w w  w .  j  av a  2 s.  com
 * @param conf The configuration to get the hive column types from.
 * @return The column types.
 */
public static String[] getHiveColumnTypesInConf(Configuration conf) {
    return conf.getStrings(CULVERT_HIVE_COLUMN_TYPES_CONF_KEY);
}

From source file:com.bol.crazypigs.HBaseStorage15.java

License:Apache License

@Override
public List<String> getShipFiles() {
    // Depend on HBase to do the right thing when available, as of HBASE-9165
    try {//w  ww .j a v a  2s .c  o  m
        Method addHBaseDependencyJars = TableMapReduceUtil.class.getMethod("addHBaseDependencyJars",
                Configuration.class);
        if (addHBaseDependencyJars != null) {
            Configuration conf = new Configuration();
            addHBaseDependencyJars.invoke(null, conf);
            if (conf.get("tmpjars") != null) {
                String[] tmpjars = conf.getStrings("tmpjars");
                List<String> shipFiles = new ArrayList<String>(tmpjars.length);
                for (String tmpjar : tmpjars) {
                    shipFiles.add(new URL(tmpjar).getPath());
                }
                return shipFiles;
            }
        }
    } catch (NoSuchMethodException e) {
        LOG.debug("TableMapReduceUtils#addHBaseDependencyJars not available."
                + " Falling back to previous logic.", e);
    } catch (IllegalAccessException e) {
        LOG.debug("TableMapReduceUtils#addHBaseDependencyJars invocation"
                + " not permitted. Falling back to previous logic.", e);
    } catch (InvocationTargetException e) {
        LOG.debug("TableMapReduceUtils#addHBaseDependencyJars invocation"
                + " failed. Falling back to previous logic.", e);
    } catch (MalformedURLException e) {
        LOG.debug("TableMapReduceUtils#addHBaseDependencyJars tmpjars"
                + " had malformed url. Falling back to previous logic.", e);
    }

    List<Class> classList = new ArrayList<Class>();
    classList.add(org.apache.hadoop.hbase.client.HTable.class); // main hbase jar or hbase-client
    classList.add(org.apache.hadoop.hbase.mapreduce.TableSplit.class); // main hbase jar or hbase-server
    if (!HadoopShims.isHadoopYARN()) { //Avoid shipping duplicate. Hadoop 0.23/2 itself has guava
        classList.add(com.google.common.collect.Lists.class); // guava
    }
    classList.add(org.apache.zookeeper.ZooKeeper.class); // zookeeper
    // Additional jars that are specific to v0.95.0+
    addClassToList("org.cloudera.htrace.Trace", classList); // htrace
    addClassToList("org.apache.hadoop.hbase.protobuf.generated.HBaseProtos", classList); // hbase-protocol
    addClassToList("org.apache.hadoop.hbase.TableName", classList); // hbase-common
    addClassToList("org.apache.hadoop.hbase.CompatibilityFactory", classList); // hbase-hadoop-compar
    addClassToList("org.jboss.netty.channel.ChannelFactory", classList); // netty
    return FuncUtils.getShipFiles(classList);
}

From source file:com.cloudera.recordservice.mr.PlanUtil.java

License:Apache License

/**
 * Generates a request from the configs set in jobConf.
 *//* ww  w .  j  a  v a  2s  .c  o  m*/
public static Request getRequest(Configuration jobConf) throws IOException {
    LOG.debug("Generating input splits.");

    String tblName = jobConf.get(ConfVars.TBL_NAME_CONF.name);
    String inputDir = jobConf.get(FileInputFormat.INPUT_DIR);
    String sqlQuery = jobConf.get(ConfVars.QUERY_NAME_CONF.name);

    int numSet = 0;
    if (tblName != null)
        ++numSet;
    if (inputDir != null)
        ++numSet;
    if (sqlQuery != null)
        ++numSet;

    if (numSet == 0) {
        throw new IllegalArgumentException("No input specified. Specify either '" + ConfVars.TBL_NAME_CONF.name
                + "', '" + ConfVars.QUERY_NAME_CONF.name + "' or '" + FileInputFormat.INPUT_DIR + "'");
    }
    if (numSet > 1) {
        throw new IllegalArgumentException("More than one input specified. Can " + "only specify one of '"
                + ConfVars.TBL_NAME_CONF.name + "'=" + tblName + ", '" + FileInputFormat.INPUT_DIR + "'="
                + inputDir + ", '" + ConfVars.QUERY_NAME_CONF.name + "'=" + sqlQuery);
    }

    String[] colNames = jobConf.getStrings(ConfVars.COL_NAMES_CONF.name);
    if (colNames == null)
        colNames = new String[0];

    if (tblName == null && colNames.length > 0) {
        // TODO: support this.
        throw new IllegalArgumentException("Column projections can only be specified with table inputs.");
    }

    Request request = null;
    if (tblName != null) {
        if (colNames.length == 0) {
            // If length of colNames = 0, return all possible columns
            // TODO: this has slightly different meaning than createProjectionRequest()
            // which treats empty columns as an empty projection. i.e. select * vs count(*)
            // Reconcile this.
            request = Request.createTableScanRequest(tblName);
        } else {
            List<String> projection = new ArrayList<String>();
            for (String c : colNames) {
                if (c == null || c.isEmpty()) {
                    throw new IllegalArgumentException(
                            "Cannot specify projection with null or empty column name.");
                }
                projection.add(c);
            }
            request = Request.createProjectionRequest(tblName, projection);
        }
    } else if (inputDir != null) {
        // TODO: inputDir is a comma separate list of paths. The service needs to
        // handle that.
        if (inputDir.contains(",")) {
            throw new IllegalArgumentException("Only reading a single directory is currently supported.");
        }
        request = Request.createPathRequest(inputDir);
    } else if (sqlQuery != null) {
        request = Request.createSqlRequest(sqlQuery);
    } else {
        Preconditions.checkState(false);
    }
    return request;
}

From source file:com.codefollower.lealone.hbase.server.HBaseTcpServer.java

License:Apache License

private void init(Configuration conf) {
    ArrayList<String> args = new ArrayList<String>();
    for (String arg : conf.getStrings(Constants.PROJECT_NAME_PREFIX + "args")) {
        int pos = arg.indexOf('=');
        if (pos == -1) {
            args.add(arg.trim());//from www.  j  av  a 2  s .c  o m
        } else {
            args.add(arg.substring(0, pos).trim());
            args.add(arg.substring(pos + 1).trim());
        }
    }
    args.add("-tcpPort");
    args.add("" + tcpPort);
    args.add("-tcpDaemon");
    super.init(args.toArray(new String[0]));
}

From source file:com.conversantmedia.mapreduce.io.avro.MultiSchemaAvroSerialization.java

License:Apache License

protected static Schema getSchemaAt(Configuration conf, int b) {
    String schemaName = conf.getStrings(CONF_KEY_MULTI_SCHEMAS)[b];
    if (schemaName == null) {
        throw new IllegalStateException("No avro schema registered for data.");
    }// w w  w.jav  a  2s.c om
    Schema schema = null;
    try {
        schema = (Schema) Class.forName(schemaName).getField("SCHEMA$").get(null);
    } catch (IllegalArgumentException | IllegalAccessException | NoSuchFieldException | SecurityException
            | ClassNotFoundException e) {
        logger().error(e.getMessage());
        throw new IllegalStateException(
                "Configured class [" + schemaName + "] does not contain an accessible static SCHEMA$ member.");
    }
    return schema;
}

From source file:com.conversantmedia.mapreduce.io.avro.MultiSchemaAvroSerialization.java

License:Apache License

/**
 * //w  w  w. j a v a  2  s  .  c om
 * @param conf   Hadoop configuration
 * @param c      the avro record type class
 * @return      the index of this schema assigned when it was registered
 * @see       #registerSchemas
 */
protected static int getIndexForSchema(Configuration conf, Class<?> c) {
    int idx = 0;
    for (String name : conf.getStrings(CONF_KEY_MULTI_SCHEMAS)) {
        if (c.getName().equals(name)) {
            return idx;
        }
        idx++;
    }
    throw new IllegalStateException("Schema for class [" + c.getName() + "] was not registered.");
}

From source file:com.conversantmedia.mapreduce.tool.DistributedResourceManager.java

License:Apache License

/**
 * Locates the resources in the configuration and distributed cache, etc., 
 * and sets them on the provided mapper instance.
 * /*ww w . j a va 2  s .  com*/
 * @param bean            the object to inspect for resource annotations
 * @param config         the job configuration
 * @throws ToolException   if there are errors with reflection or the cache
 */
@SuppressWarnings("unchecked")
public static void initializeResources(Object bean, Configuration config) throws ToolException {
    try {
        List<Field> fields = MaraAnnotationUtil.INSTANCE.findAnnotatedFields(bean.getClass(), Resource.class);
        Path[] files = org.apache.hadoop.util.StringUtils
                .stringToPath(config.getStrings(MRJobConfig.CACHE_LOCALFILES));
        for (Field field : fields) {
            Resource resAnnotation = field.getAnnotation(Resource.class);
            String key = StringUtils.isEmpty(resAnnotation.name()) ? field.getName() : resAnnotation.name();
            String resourceId = config.get(CONFIGKEYBASE_RESOURCE + key);
            if (resourceId != null) {
                String[] parts = StringUtils.split(resourceId, VALUE_SEP);
                String className = parts[0];
                String valueString = parts[1];

                // Retrieve the value
                Object value = getResourceValue(field, valueString, className, files);

                setFieldValue(field, bean, value);
            }
        }
    } catch (IllegalArgumentException | IOException | ClassNotFoundException | IllegalAccessException e) {
        throw new ToolException(e);
    }
}

From source file:com.datasalt.pangool.tuplemr.TupleMRConfig.java

License:Apache License

static void deserializeComparators(Configuration conf, TupleMRConfig mrConfig) throws TupleMRException {
    String[] comparatorRefs = conf.getStrings(CONF_COMPARATOR_REFERENCES);
    String[] comparatorInstanceFiles = conf.getStrings(CONF_COMPARATOR_INSTANCES);

    if (comparatorRefs == null) {
        return;/*ww  w .ja va2s.  com*/

    }
    try {
        for (int i = 0; i < comparatorRefs.length; i++) {
            String[] ref = comparatorRefs[i].split("\\|");
            String instanceFile = comparatorInstanceFiles[i];

            // Here we use "false" as last parameter because otherwise it could be
            // an infinite loop. We will call setConf() later.
            RawComparator<?> comparator = InstancesDistributor.loadInstance(conf, RawComparator.class,
                    instanceFile, false);

            if (ref[0].equals(COMMON)) {
                setComparator(mrConfig.getCommonCriteria(), ref[1], comparator);
            } else {
                setComparator(mrConfig.getSpecificOrderBys().get(new Integer(ref[0])), ref[1], comparator);
            }
        }
    } catch (IOException e) {
        throw new TupleMRException(e);
    }
}

From source file:com.datasalt.utils.commons.BaseConfigurationFactory.java

License:Apache License

private void configureSnappyCompression(Configuration conf) {

    String[] codecs = conf.getStrings("io.compression.codecs");
    boolean found = false;
    for (String codec : codecs) {
        if (codec.equals("org.apache.hadoop.io.compress.SnappyCodec")) {
            log.info("Snappy compression support detected. Enabling it, so all outputs are compressed...");

            // Compress Map output
            conf.set("mapred.compress.map.output", "true");
            conf.set("mapred.map.output.compression.codec", "org.apache.hadoop.io.compress.SnappyCodec");

            // Compress MapReduce output
            conf.set("mapred.output.compress", "true");
            conf.set("mapred.output.compression.codec", "org.apache.hadoop.io.compress.SnappyCodec");

            found = true;// w  w  w.  ja  va 2 s. c  o  m
            break;
        }
    }
    if (!found) {
        log.info("Snappy compression not enabled because it was not found. Continue without it");
    }
}

From source file:com.datasalt.utils.mapred.joiner.MultiJoinReducer.java

License:Apache License

@SuppressWarnings("unchecked")
@Override/* ww w .j a  v  a2s . co m*/
protected void setup(Context context) throws IOException, InterruptedException {
    super.setup(context);
    try {
        Configuration conf = context.getConfiguration();
        this.conf = conf;

        /*
         * Creating a cached instance of each class for each channel
         */
        String[] classes = conf.getStrings(MultiJoiner.MULTIJOINER_CLASSES);
        String[] channels = conf.getStrings(MultiJoiner.MULTIJOINER_CHANNELS);
        for (int i = 0; i < classes.length; i++) {
            Class c = Class.forName(classes[i]);
            Object instance = ReflectionUtils.newInstance(c, conf);
            instances.put(new Integer(channels[i]), instance);
            this.classes.put(new Integer(channels[i]), c);
        }

    } catch (Exception e) {
        throw new IOException(e);
    }
}