Example usage for org.apache.hadoop.conf Configuration get

List of usage examples for org.apache.hadoop.conf Configuration get

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration get.

Prototype

public String get(String name) 

Source Link

Document

Get the value of the name property, null if no such property exists.

Usage

From source file:com.blackberry.logdriver.mapreduce.boom.BoomFilterMapper.java

License:Apache License

@Override
protected void setup(Context context) throws IOException, InterruptedException {
    Configuration conf = context.getConfiguration();

    // We'll need to know the component name to know which rules to use
    String componentName = conf.get("logdriver.component.name");

    // Load in the yaml file that defines the rules.
    String confFileName = conf.get("logdriver.filter.file");

    try {// ww w.  j av a  2  s .c o  m
        filters = loadFilters(componentName, new FileInputStream(confFileName));
    } catch (FileNotFoundException e) {
        LOG.error("Error loading config files.  No filters will be used.", e);
    }
    LOG.info("Initial filter set: {}", filters);
}

From source file:com.blackberry.logdriver.mapreduce.boom.BoomInputFormat.java

License:Apache License

@SuppressWarnings("deprecation")
public List<InputSplit> getSplits(JobContext context) throws IOException {
    Configuration conf = context.getConfiguration();
    // Ensure we have sensible defaults for how we build blocks.
    if (conf.get("mapreduce.job.max.split.locations") == null) {
        conf.setLong("mapreduce.job.max.split.locations", MAX_SPLIT_LOCATIONS);
    }//  w  w w .  j  a v a 2  s  . c  om
    if (conf.get("mapred.max.split.size") == null) {
        // Try to set the split size to the default block size. In case of
        // failure, we'll use this 128MB default.
        long blockSize = 128 * 1024 * 1024; // 128MB
        try {
            blockSize = FileSystem.get(conf).getDefaultBlockSize();
        } catch (IOException e) {
            LOG.error("Error getting filesystem to get get default block size (this does not bode well).");
        }
        conf.setLong("mapred.max.split.size", blockSize);
    }
    for (String key : new String[] { "mapreduce.job.max.split.locations", "mapred.max.split.size" }) {
        LOG.info("{} = {}", key, context.getConfiguration().get(key));
    }

    return super.getSplits(context);
}

From source file:com.blm.orc.OrcInputFormat.java

License:Apache License

@Override
public boolean shouldSkipCombine(Path path, Configuration conf) throws IOException {
    return (conf.get(AcidUtils.CONF_ACID_KEY) != null) || AcidUtils.isAcid(path, conf);
}

From source file:com.blm.orc.OrcInputFormat.java

License:Apache License

static void setSearchArgument(Reader.Options options, List<OrcProto.Type> types, Configuration conf,
        boolean isOriginal) {
    int rootColumn = getRootColumn(isOriginal);
    String serializedPushdown = conf.get(TableScanDesc.FILTER_EXPR_CONF_STR);
    String sargPushdown = conf.get(SARG_PUSHDOWN);
    String columnNamesString = conf.get(ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR);
    if ((sargPushdown == null && serializedPushdown == null) || columnNamesString == null) {
        LOG.debug("No ORC pushdown predicate");
        options.searchArgument(null, null);
    } else {/* w w  w. j  av  a  2 s . c  o m*/
        SearchArgument sarg;
        if (serializedPushdown != null) {
            sarg = SearchArgumentFactory.create(Utilities.deserializeExpression(serializedPushdown));
        } else {
            sarg = SearchArgumentFactory.create(sargPushdown);
        }
        LOG.info("ORC pushdown predicate: " + sarg);
        String[] neededColumnNames = columnNamesString.split(",");
        String[] columnNames = new String[types.size() - rootColumn];
        boolean[] includedColumns = options.getInclude();
        int i = 0;
        for (int columnId : types.get(rootColumn).getSubtypesList()) {
            if (includedColumns == null || includedColumns[columnId - rootColumn]) {
                // this is guaranteed to be positive because types only have children
                // ids greater than their own id.
                columnNames[columnId - rootColumn] = neededColumnNames[i++];
            }
        }
        options.searchArgument(sarg, columnNames);
    }
}

From source file:com.blm.orc.OrcInputFormat.java

License:Apache License

/**
 * Get the list of input {@link Path}s for the map-reduce job.
 *
 * @param conf The configuration of the job
 * @return the list of input {@link Path}s for the map-reduce job.
 *///from www .j  a va  2  s  . com
static Path[] getInputPaths(Configuration conf) throws IOException {
    String dirs = conf.get("mapred.input.dir");
    if (dirs == null) {
        throw new IOException("Configuration mapred.input.dir is not defined.");
    }
    String[] list = StringUtils.split(dirs);
    Path[] result = new Path[list.length];
    for (int i = 0; i < list.length; i++) {
        result[i] = new Path(StringUtils.unEscapeString(list[i]));
    }
    return result;
}

From source file:com.bol.crazypigs.HBaseStorage15.java

License:Apache License

@Override
public List<String> getShipFiles() {
    // Depend on HBase to do the right thing when available, as of HBASE-9165
    try {/*w w w . ja v a  2  s.c o m*/
        Method addHBaseDependencyJars = TableMapReduceUtil.class.getMethod("addHBaseDependencyJars",
                Configuration.class);
        if (addHBaseDependencyJars != null) {
            Configuration conf = new Configuration();
            addHBaseDependencyJars.invoke(null, conf);
            if (conf.get("tmpjars") != null) {
                String[] tmpjars = conf.getStrings("tmpjars");
                List<String> shipFiles = new ArrayList<String>(tmpjars.length);
                for (String tmpjar : tmpjars) {
                    shipFiles.add(new URL(tmpjar).getPath());
                }
                return shipFiles;
            }
        }
    } catch (NoSuchMethodException e) {
        LOG.debug("TableMapReduceUtils#addHBaseDependencyJars not available."
                + " Falling back to previous logic.", e);
    } catch (IllegalAccessException e) {
        LOG.debug("TableMapReduceUtils#addHBaseDependencyJars invocation"
                + " not permitted. Falling back to previous logic.", e);
    } catch (InvocationTargetException e) {
        LOG.debug("TableMapReduceUtils#addHBaseDependencyJars invocation"
                + " failed. Falling back to previous logic.", e);
    } catch (MalformedURLException e) {
        LOG.debug("TableMapReduceUtils#addHBaseDependencyJars tmpjars"
                + " had malformed url. Falling back to previous logic.", e);
    }

    List<Class> classList = new ArrayList<Class>();
    classList.add(org.apache.hadoop.hbase.client.HTable.class); // main hbase jar or hbase-client
    classList.add(org.apache.hadoop.hbase.mapreduce.TableSplit.class); // main hbase jar or hbase-server
    if (!HadoopShims.isHadoopYARN()) { //Avoid shipping duplicate. Hadoop 0.23/2 itself has guava
        classList.add(com.google.common.collect.Lists.class); // guava
    }
    classList.add(org.apache.zookeeper.ZooKeeper.class); // zookeeper
    // Additional jars that are specific to v0.95.0+
    addClassToList("org.cloudera.htrace.Trace", classList); // htrace
    addClassToList("org.apache.hadoop.hbase.protobuf.generated.HBaseProtos", classList); // hbase-protocol
    addClassToList("org.apache.hadoop.hbase.TableName", classList); // hbase-common
    addClassToList("org.apache.hadoop.hbase.CompatibilityFactory", classList); // hbase-hadoop-compar
    addClassToList("org.jboss.netty.channel.ChannelFactory", classList); // netty
    return FuncUtils.getShipFiles(classList);
}

From source file:com.bol.crazypigs.HBaseStorage15.java

License:Apache License

private JobConf initializeLocalJobConfig(Job job) {
    Properties udfProps = getUDFProperties();
    Configuration jobConf = job.getConfiguration();
    JobConf localConf = new JobConf(jobConf);
    if (udfProps.containsKey(HBASE_CONFIG_SET)) {
        for (Entry<Object, Object> entry : udfProps.entrySet()) {
            localConf.set((String) entry.getKey(), (String) entry.getValue());
        }//from   w w w. j a va  2s . co m
    } else {
        Configuration hbaseConf = HBaseConfiguration.create();
        for (Entry<String, String> entry : hbaseConf) {
            // JobConf may have some conf overriding ones in hbase-site.xml
            // So only copy hbase config not in job config to UDFContext
            // Also avoids copying core-default.xml and core-site.xml
            // props in hbaseConf to UDFContext which would be redundant.
            if (jobConf.get(entry.getKey()) == null) {
                udfProps.setProperty(entry.getKey(), entry.getValue());
                localConf.set(entry.getKey(), entry.getValue());
            }
        }
        udfProps.setProperty(HBASE_CONFIG_SET, "true");
    }
    return localConf;
}

From source file:com.bol.crazypigs.HBaseStorage15.java

License:Apache License

/**
 * Get delegation token from hbase and add it to the Job
 *
 *//*from w  w  w  .ja  v a2 s.  c  o m*/
@SuppressWarnings({ "rawtypes", "unchecked" })
private void addHBaseDelegationToken(Configuration hbaseConf, Job job) {

    if (!UDFContext.getUDFContext().isFrontend()) {
        return;
    }

    if ("kerberos".equalsIgnoreCase(hbaseConf.get(HBASE_SECURITY_CONF_KEY))) {
        // Will not be entering this block for 0.20.2 as it has no security.
        try {
            // getCurrentUser method is not public in 0.20.2
            Method m1 = UserGroupInformation.class.getMethod("getCurrentUser");
            UserGroupInformation currentUser = (UserGroupInformation) m1.invoke(null, (Object[]) null);
            // hasKerberosCredentials method not available in 0.20.2
            Method m2 = UserGroupInformation.class.getMethod("hasKerberosCredentials");
            boolean hasKerberosCredentials = (Boolean) m2.invoke(currentUser, (Object[]) null);
            if (hasKerberosCredentials) {
                // Class and method are available only from 0.92 security release
                Class tokenUtilClass = Class.forName("org.apache.hadoop.hbase.security.token.TokenUtil");
                Method m3 = tokenUtilClass.getMethod("obtainTokenForJob",
                        new Class[] { Configuration.class, UserGroupInformation.class, Job.class });
                m3.invoke(null, new Object[] { hbaseConf, currentUser, job });
            } else {
                LOG.info("Not fetching hbase delegation token as no Kerberos TGT is available");
            }
        } catch (ClassNotFoundException cnfe) {
            throw new RuntimeException("Failure loading TokenUtil class, " + "is secure RPC available?", cnfe);
        } catch (RuntimeException re) {
            throw re;
        } catch (Exception e) {
            throw new UndeclaredThrowableException(e, "Unexpected error calling TokenUtil.obtainTokenForJob()");
        }
    }
}

From source file:com.bonc.mr_roamRecognition_hjpt.comm.NewFileOutputFormat.java

License:Apache License

/**
 * Get the {@link CompressionCodec} for compressing the job outputs.
 * //from w w w  . j ava  2 s.c  o m
 * @param job
 *            the {@link Job} to look in
 * @param defaultValue
 *            the {@link CompressionCodec} to return if not set
 * @return the {@link CompressionCodec} to be used to compress the job
 *         outputs
 * @throws IllegalArgumentException
 *             if the class was specified, but not found
 */
public static Class<? extends CompressionCodec> getOutputCompressorClass(JobContext job,
        Class<? extends CompressionCodec> defaultValue) {
    Class<? extends CompressionCodec> codecClass = defaultValue;
    Configuration conf = job.getConfiguration();
    String name = conf.get(FileOutputFormat.COMPRESS_CODEC);
    if (name != null) {
        try {
            codecClass = conf.getClassByName(name).asSubclass(CompressionCodec.class);
        } catch (ClassNotFoundException e) {
            throw new IllegalArgumentException("Compression codec " + name + " was not found.", e);
        }
    }
    return codecClass;
}

From source file:com.ceph.rados.fs.hdfs.RadosFileSystem.java

License:Apache License

@Override
public void initialize(URI uri, Configuration conf) throws IOException {
    super.initialize(uri, conf);
    if (store == null) {
        String conf_file = conf.get("ceph_conf");
        String id = conf.get("ceph_id");
        String pool = conf.get("ceph_pool");

        store = createDefaultStore();/*from   w  w w .  j a va 2s  .  c  om*/
        store.initialize(conf_file, id, pool);
    }

    setConf(conf);
    this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
    this.workingDir = new Path("/user", System.getProperty("user.name")).makeQualified(this);
}