List of usage examples for org.apache.hadoop.conf Configuration setClass
public void setClass(String name, Class<?> theClass, Class<?> xface)
name
property to the name of a theClass
implementing the given interface xface
. From source file:com.mongodb.hadoop.util.MapredMongoConfigUtil.java
License:Apache License
public static void setOutputValue(final Configuration conf, final Class<?> val) { conf.setClass(JOB_OUTPUT_VALUE, val, Object.class); }
From source file:com.mongodb.hadoop.util.MapredMongoConfigUtil.java
License:Apache License
public static void setInputFormat(final Configuration conf, final Class<? extends InputFormat> val) { conf.setClass(JOB_INPUT_FORMAT, val, InputFormat.class); }
From source file:com.mongodb.hadoop.util.MapredMongoConfigUtil.java
License:Apache License
public static void setBSONPathFilter(final Configuration conf, final Class<? extends PathFilter> val) { conf.setClass(BSON_PATHFILTER, val, PathFilter.class); }
From source file:com.mongodb.hadoop.util.MapredMongoConfigUtil.java
License:Apache License
public static void setSplitterClass(final Configuration conf, final Class<? extends MongoSplitter> val) { conf.setClass(MONGO_SPLITTER_CLASS, val, MongoSplitter.class); }
From source file:com.moz.fiji.mapreduce.bulkimport.FijiBulkImportJobBuilder.java
License:Apache License
/** {@inheritDoc} */ @Override/* ww w.java2 s . c o m*/ protected void configureJob(Job job) throws IOException { final Configuration conf = job.getConfiguration(); // Store the name of the the importer to use in the job configuration so the mapper can // create instances of it. // Construct the bulk importer instance. if (null == mBulkImporterClass) { throw new JobConfigurationException("Must specify a bulk importer."); } conf.setClass(FijiConfKeys.FIJI_BULK_IMPORTER_CLASS, mBulkImporterClass, FijiBulkImporter.class); mJobOutput.configure(job); // Configure the mapper and reducer. This part depends on whether we're going to write // to HFiles or directly to the table. configureJobForHFileOutput(job); job.setJobName("Fiji bulk import: " + mBulkImporterClass.getSimpleName()); mBulkImporter = ReflectionUtils.newInstance(mBulkImporterClass, conf); // Configure the MapReduce job (requires mBulkImporter to be set properly): super.configureJob(job); }
From source file:com.moz.fiji.mapreduce.gather.FijiGatherJobBuilder.java
License:Apache License
/** {@inheritDoc} */ @Override/*from w w w .j a v a 2s . c o m*/ protected void configureJob(Job job) throws IOException { // Construct the gatherer instance. if (null == mGathererClass) { throw new JobConfigurationException("Must specify a gatherer."); } final Configuration conf = job.getConfiguration(); // Serialize the gatherer class name into the job configuration. conf.setClass(FijiConfKeys.FIJI_GATHERER_CLASS, mGathererClass, FijiGatherer.class); if ((getJobOutput() instanceof HFileMapReduceJobOutput) && (null == mReducerClass)) { mReducerClass = IdentityReducer.class; } final StringBuilder name = new StringBuilder("Fiji gather: " + mGathererClass.getSimpleName()); if (null != mReducerClass) { name.append(" / " + mReducerClass.getSimpleName()); } job.setJobName(name.toString()); mGatherer = ReflectionUtils.newInstance(mGathererClass, conf); mMapper.setConf(conf); mDataRequest = mGatherer.getDataRequest(); // Construct the combiner instance (if specified). if (null != mCombinerClass) { mCombiner = ReflectionUtils.newInstance(mCombinerClass, conf); } // Construct the reducer instance (if specified). if (null != mReducerClass) { mReducer = ReflectionUtils.newInstance(mReducerClass, conf); } // Configure the table input job (requires mGatherer, mMapper and mReducer to be set): super.configureJob(job); // Some validation: if (getJobOutput() instanceof HFileMapReduceJobOutput) { if (mReducer instanceof IdentityReducer) { Preconditions.checkState(mGatherer.getOutputKeyClass() == HFileKeyValue.class, String.format("Gatherer '%s' writing HFiles must output HFileKeyValue keys, but got '%s'", mGathererClass.getName(), mGatherer.getOutputKeyClass().getName())); Preconditions.checkState(mGatherer.getOutputValueClass() == NullWritable.class, String.format("Gatherer '%s' writing HFiles must output NullWritable values, but got '%s'", mGathererClass.getName(), mGatherer.getOutputValueClass().getName())); } Preconditions.checkState(mReducer.getOutputKeyClass() == HFileKeyValue.class, String.format("Reducer '%s' writing HFiles must output HFileKeyValue keys, but got '%s'", mReducerClass.getName(), mReducer.getOutputKeyClass().getName())); Preconditions.checkState(mReducer.getOutputValueClass() == NullWritable.class, String.format("Reducer '%s' writing HFiles must output NullWritable values, but got '%s'", mReducerClass.getName(), mReducer.getOutputValueClass().getName())); } }
From source file:com.moz.fiji.mapreduce.kvstore.TestKeyValueStoreConfiguration.java
License:Apache License
@Test public void testCopyFromConfiguration() { Configuration conf = new Configuration(false); conf.set("foo", "foo-value"); conf.setInt("bar", 123); conf.setClass("qaz", String.class, Object.class); KeyValueStoreConfiguration kvStoreConf = KeyValueStoreConfiguration.fromConf(conf); assertEquals("foo-value", kvStoreConf.get("foo")); assertEquals(123, kvStoreConf.getInt("bar", 0)); assertEquals(String.class, kvStoreConf.getClass("qaz", null)); }
From source file:com.moz.fiji.mapreduce.output.DirectFijiTableMapReduceJobOutput.java
License:Apache License
/** {@inheritDoc} */ @Override/*from w w w .j a v a 2 s . c om*/ public void configure(Job job) throws IOException { // sets Hadoop output format, Fiji output table and # of reducers: super.configure(job); final Configuration conf = job.getConfiguration(); // Fiji table context: conf.setClass(FijiConfKeys.FIJI_TABLE_CONTEXT_CLASS, DirectFijiTableWriterContext.class, FijiTableContext.class); // Since there's no "commit" operation for an entire map task writing to a // Fiji table, do not use speculative execution when writing directly to a Fiji table. conf.setBoolean("mapred.map.tasks.speculative.execution", false); }
From source file:com.moz.fiji.mapreduce.output.framework.HFileReducerMapReduceJobOutput.java
License:Apache License
/** {@inheritDoc} */ @Override//from w w w . j a va 2s .c o m public void configure(Job job) throws IOException { super.configure(job); // sets the Hadoop output format final Configuration conf = job.getConfiguration(); conf.set(FijiConfKeys.FIJI_OUTPUT_TABLE_URI, mJobOutput.getOutputTableURI().toString()); // Fiji table context: conf.setClass(FijiConfKeys.FIJI_TABLE_CONTEXT_CLASS, HFileWriterContext.class, FijiTableContext.class); // Set the output path. FileOutputFormat.setOutputPath(job, mJobOutput.getPath()); job.setNumReduceTasks(mJobOutput.getNumReduceTasks()); }
From source file:com.moz.fiji.mapreduce.output.HFileMapReduceJobOutput.java
License:Apache License
/** {@inheritDoc} */ @Override/*from w w w . j a v a 2s .c o m*/ public void configure(Job job) throws IOException { // sets Hadoop output format, Fiji output table and # of reducers: super.configure(job); final Configuration conf = job.getConfiguration(); // Fiji table context: conf.setClass(FijiConfKeys.FIJI_TABLE_CONTEXT_CLASS, HFileWriterContext.class, FijiTableContext.class); // Set the output path. FileOutputFormat.setOutputPath(job, mPath); // Configure the total order partitioner so generated HFile shards are contiguous and sorted. configurePartitioner(job, makeTableKeySplit(getOutputTableURI(), getNumReduceTasks(), conf)); // Note: the HFile job output requires the reducer of the MapReduce job to be IdentityReducer. // This is enforced externally. }