List of usage examples for org.apache.hadoop.conf Configuration unset
public synchronized void unset(String name)
From source file:org.apache.tez.dag.library.vertexmanager.TestShuffleVertexManagerUtils.java
License:Apache License
static FairShuffleVertexManager createFairShuffleVertexManager(Configuration conf, VertexManagerPluginContext context, FairRoutingType fairRoutingType, Long desiredTaskInputSize, Float min, Float max) { FairShuffleVertexManagerConfigBuilder builder = FairShuffleVertexManager.createConfigBuilder(conf); if (min != null) { builder.setSlowStartMinSrcCompletionFraction(min); } else if (conf != null) { conf.unset(FairShuffleVertexManager.TEZ_FAIR_SHUFFLE_VERTEX_MANAGER_MIN_SRC_FRACTION); }//from w w w. ja va 2 s . co m if (max != null) { builder.setSlowStartMaxSrcCompletionFraction(max); } else if (conf != null) { conf.unset(FairShuffleVertexManager.TEZ_FAIR_SHUFFLE_VERTEX_MANAGER_MAX_SRC_FRACTION); } if (fairRoutingType != null) { builder.setAutoParallelism(fairRoutingType); } if (desiredTaskInputSize != null) { builder.setDesiredTaskInputSize(desiredTaskInputSize); } UserPayload payload = builder.build().getUserPayload(); when(context.getUserPayload()).thenReturn(payload); FairShuffleVertexManager manager = new FairShuffleVertexManager(context); manager.initialize(); return manager; }
From source file:org.apache.tez.dag.utils.TestTaskSpecificLaunchCmdOption.java
License:Apache License
@Test(timeout = 5000) public void testTaskSpecificLogOptions() { Configuration conf = new Configuration(false); conf.set(TezConfiguration.TEZ_TASK_SPECIFIC_LAUNCH_CMD_OPTS_LIST, "v1[0,2,5]"); TaskSpecificLaunchCmdOption options; conf.set(TezConfiguration.TEZ_TASK_SPECIFIC_LOG_LEVEL, "DEBUG;org.apache.tez=INFO"); options = new TaskSpecificLaunchCmdOption(conf); assertTrue(options.hasModifiedLogProperties()); assertFalse(options.hasModifiedTaskLaunchOpts()); assertEquals(2, options.getTaskSpecificLogParams().length); conf.unset(TezConfiguration.TEZ_TASK_SPECIFIC_LOG_LEVEL); options = new TaskSpecificLaunchCmdOption(conf); assertFalse(options.hasModifiedLogProperties()); assertFalse(options.hasModifiedTaskLaunchOpts()); conf.set(TezConfiguration.TEZ_TASK_SPECIFIC_LOG_LEVEL, "DEBUG"); options = new TaskSpecificLaunchCmdOption(conf); assertTrue(options.hasModifiedLogProperties()); assertFalse(options.hasModifiedTaskLaunchOpts()); assertEquals(1, options.getTaskSpecificLogParams().length); }
From source file:org.apache.tez.engine.common.security.TokenCache.java
License:Apache License
/** * Remove jobtoken referrals which don't make sense in the context * of the task execution./*from w ww. j a va2 s.c om*/ * * @param conf */ public static void cleanUpTokenReferral(Configuration conf) { conf.unset(TezJobConfig.DAG_CREDENTIALS_BINARY); }
From source file:org.apache.tez.mapreduce.hadoop.MRHelpers.java
License:Apache License
private static void processDirectConversion(Configuration conf) { for (Map.Entry<String, String> dep : DeprecatedKeys.getMRToTezRuntimeParamMap().entrySet()) { if (conf.get(dep.getKey()) != null) { // TODO Deprecation reason does not seem to reflect in the config ? // The ordering is important in case of keys which are also deprecated. // Unset will unset the deprecated keys and all it's variants. final String mrValue = conf.get(dep.getKey()); final String tezValue = conf.get(dep.getValue()); conf.unset(dep.getKey()); if (tezValue == null) { conf.set(dep.getValue(), mrValue, "TRANSLATED_TO_TEZ"); }/*w ww . j a v a2 s . com*/ if (LOG.isDebugEnabled()) { LOG.debug("Config: mr(unset):" + dep.getKey() + ", mr initial value=" + mrValue + ", tez:" + dep.getValue() + "=" + conf.get(dep.getValue())); } } } }
From source file:org.apache.tinkerpop.gremlin.hadoop.process.computer.util.MapReduceHelper.java
License:Apache License
public static void executeMapReduceJob(final MapReduce mapReduce, final Memory.Admin memory, final Configuration configuration) throws IOException, ClassNotFoundException, InterruptedException { final Configuration newConfiguration = new Configuration(configuration); final boolean vertexProgramExists = newConfiguration.get(VertexProgram.VERTEX_PROGRAM, null) != null; if (vertexProgramExists) { newConfiguration.set(Constants.GREMLIN_HADOOP_GRAPH_READER, InputOutputHelper.getInputFormat( (Class) newConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_WRITER, OutputFormat.class)) .getCanonicalName());/* ww w .ja v a 2 s . c o m*/ newConfiguration.unset(Constants.GREMLIN_HADOOP_GRAPH_FILTER); } final BaseConfiguration apacheConfiguration = new BaseConfiguration(); apacheConfiguration.setDelimiterParsingDisabled(true); mapReduce.storeState(apacheConfiguration); ConfUtil.mergeApacheIntoHadoopConfiguration(apacheConfiguration, newConfiguration); final Optional<Comparator<?>> mapSort = mapReduce.getMapKeySort(); final Optional<Comparator<?>> reduceSort = mapReduce.getReduceKeySort(); newConfiguration.setClass(Constants.GREMLIN_HADOOP_MAP_REDUCE_CLASS, mapReduce.getClass(), MapReduce.class); final Job job = Job.getInstance(newConfiguration, mapReduce.toString()); HadoopGraph.LOGGER.info(Constants.GREMLIN_HADOOP_JOB_PREFIX + mapReduce.toString()); job.setJarByClass(HadoopGraph.class); if (mapSort.isPresent()) job.setSortComparatorClass(ObjectWritableComparator.ObjectWritableMapComparator.class); job.setMapperClass(HadoopMap.class); if (mapReduce.doStage(MapReduce.Stage.REDUCE)) { if (mapReduce.doStage(MapReduce.Stage.COMBINE)) job.setCombinerClass(HadoopCombine.class); job.setReducerClass(HadoopReduce.class); } else { if (mapSort.isPresent()) { job.setReducerClass(Reducer.class); job.setNumReduceTasks(1); // todo: is this necessary to ensure sorted order? } else { job.setNumReduceTasks(0); } } job.setMapOutputKeyClass(ObjectWritable.class); job.setMapOutputValueClass(ObjectWritable.class); job.setOutputKeyClass(ObjectWritable.class); job.setOutputValueClass(ObjectWritable.class); job.setInputFormatClass(GraphFilterInputFormat.class); job.setOutputFormatClass(SequenceFileOutputFormat.class); // if there is no vertex program, then grab the graph from the input location final Path graphPath; if (vertexProgramExists) { graphPath = new Path( Constants.getGraphLocation(newConfiguration.get(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION))); } else { graphPath = new Path(newConfiguration.get(Constants.GREMLIN_HADOOP_INPUT_LOCATION)); } Path memoryPath = new Path( Constants.getMemoryLocation(newConfiguration.get(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION), (reduceSort.isPresent() ? mapReduce.getMemoryKey() + "-temp" : mapReduce.getMemoryKey()))); if (FileSystem.get(newConfiguration).exists(memoryPath)) { FileSystem.get(newConfiguration).delete(memoryPath, true); } FileInputFormat.setInputPaths(job, graphPath); FileOutputFormat.setOutputPath(job, memoryPath); job.waitForCompletion(true); // if there is a reduce sort, we need to run another identity MapReduce job if (reduceSort.isPresent()) { final Job reduceSortJob = Job.getInstance(newConfiguration, "ReduceKeySort"); reduceSortJob.setSortComparatorClass(ObjectWritableComparator.ObjectWritableReduceComparator.class); reduceSortJob.setMapperClass(Mapper.class); reduceSortJob.setReducerClass(Reducer.class); reduceSortJob.setMapOutputKeyClass(ObjectWritable.class); reduceSortJob.setMapOutputValueClass(ObjectWritable.class); reduceSortJob.setOutputKeyClass(ObjectWritable.class); reduceSortJob.setOutputValueClass(ObjectWritable.class); reduceSortJob.setInputFormatClass(SequenceFileInputFormat.class); reduceSortJob.setOutputFormatClass(SequenceFileOutputFormat.class); reduceSortJob.setNumReduceTasks(1); // todo: is this necessary to ensure sorted order? FileInputFormat.setInputPaths(reduceSortJob, memoryPath); final Path sortedMemoryPath = new Path(Constants.getMemoryLocation( newConfiguration.get(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION), mapReduce.getMemoryKey())); FileOutputFormat.setOutputPath(reduceSortJob, sortedMemoryPath); reduceSortJob.waitForCompletion(true); FileSystem.get(newConfiguration).delete(memoryPath, true); // delete the temporary memory path memoryPath = sortedMemoryPath; } mapReduce.addResultToMemory(memory, new ObjectWritableIterator(newConfiguration, memoryPath)); }
From source file:org.apache.zeppelin.integration.MiniHadoopCluster.java
License:Apache License
protected void saveConfig(Configuration conf, String dest) throws IOException { Configuration redacted = new Configuration(conf); // This setting references a test class that is not available when using a real Spark // installation, so remove it from client configs. redacted.unset("net.topology.node.switch.mapping.impl"); FileOutputStream out = new FileOutputStream(dest); try {/* w w w . j a v a2s .c o m*/ redacted.writeXml(out); } finally { out.close(); } LOGGER.info("Save configuration to " + dest); }
From source file:org.kitesdk.apps.spi.AppDeployer.java
License:Apache License
private Configuration filterConfig(Configuration conf) { Configuration appConfig = new Configuration(conf); // Ugly way of including Hive settings to be visible in // the application configuration. We should find a better way. appConfig.addResource("hive-site.xml"); // Remove properties disallowed by Oozie. // TODO: better way to do this? Are these defined somewhere? if (appConfig.get("mapred.job.tracker") != null) appConfig.unset("mapred.job.tracker"); if (appConfig.get("fs.default.name") != null) appConfig.unset("fs.default.name"); return appConfig; }
From source file:org.mrgeo.data.DataProviderFactoryTest.java
License:Apache License
private void setupPreferred(Configuration conf, String confVal, String mrgeoVal, String defMrgeoVal) { if (conf != null) { oldConfValues = new HashMap<>(); oldConfValues.put(DataProviderFactory.PREFERRED_ADHOC_PROVIDER_NAME, conf.get(DataProviderFactory.PREFERRED_ADHOC_PROVIDER_NAME, null)); oldConfValues.put(DataProviderFactory.PREFERRED_MRSIMAGE_PROVIDER_NAME, conf.get(DataProviderFactory.PREFERRED_MRSIMAGE_PROVIDER_NAME, null)); oldConfValues.put(DataProviderFactory.PREFERRED_VECTOR_PROVIDER_NAME, conf.get(DataProviderFactory.PREFERRED_VECTOR_PROVIDER_NAME, null)); if (confVal == null) { conf.unset(DataProviderFactory.PREFERRED_ADHOC_PROVIDER_NAME); conf.unset(DataProviderFactory.PREFERRED_MRSIMAGE_PROVIDER_NAME); conf.unset(DataProviderFactory.PREFERRED_VECTOR_PROVIDER_NAME); } else {/* w w w . j av a 2 s .c om*/ conf.set(DataProviderFactory.PREFERRED_ADHOC_PROVIDER_NAME, confVal); conf.set(DataProviderFactory.PREFERRED_MRSIMAGE_PROVIDER_NAME, confVal); conf.set(DataProviderFactory.PREFERRED_VECTOR_PROVIDER_NAME, confVal); } } Properties mp = MrGeoProperties.getInstance(); oldMrGeoValues = new HashMap<>(); oldMrGeoValues.put(DataProviderFactory.PREFERRED_ADHOC_PROPERTYNAME, mp.getProperty(DataProviderFactory.PREFERRED_ADHOC_PROPERTYNAME, null)); oldMrGeoValues.put(DataProviderFactory.PREFERRED_MRSIMAGE_PROPERTYNAME, mp.getProperty(DataProviderFactory.PREFERRED_MRSIMAGE_PROPERTYNAME, null)); oldMrGeoValues.put(DataProviderFactory.PREFERRED_VECTOR_PROPERTYNAME, mp.getProperty(DataProviderFactory.PREFERRED_VECTOR_PROPERTYNAME, null)); oldMrGeoValues.put(DataProviderFactory.PREFERRED_PROPERTYNAME, mp.getProperty(DataProviderFactory.PREFERRED_PROPERTYNAME, null)); if (mrgeoVal == null) { mp.remove(DataProviderFactory.PREFERRED_ADHOC_PROPERTYNAME); mp.remove(DataProviderFactory.PREFERRED_MRSIMAGE_PROPERTYNAME); mp.remove(DataProviderFactory.PREFERRED_VECTOR_PROPERTYNAME); } else { mp.setProperty(DataProviderFactory.PREFERRED_ADHOC_PROPERTYNAME, mrgeoVal); mp.setProperty(DataProviderFactory.PREFERRED_MRSIMAGE_PROPERTYNAME, mrgeoVal); mp.setProperty(DataProviderFactory.PREFERRED_VECTOR_PROPERTYNAME, mrgeoVal); } if (defMrgeoVal == null) { mp.remove(DataProviderFactory.PREFERRED_PROPERTYNAME); } else { mp.setProperty(DataProviderFactory.PREFERRED_PROPERTYNAME, defMrgeoVal); } }
From source file:org.mrgeo.data.DataProviderFactoryTest.java
License:Apache License
private void teardownPreferred(Configuration conf) { if (conf != null && oldConfValues != null) { for (Map.Entry<String, String> val : oldConfValues.entrySet()) { if (val.getValue() == null) { conf.unset(val.getKey()); } else { conf.set(val.getKey(), val.getValue()); }//from w ww.j a va2s .c om } } if (oldMrGeoValues != null) { Properties mp = MrGeoProperties.getInstance(); for (Map.Entry<String, String> val : oldMrGeoValues.entrySet()) { if (val.getValue() == null) { mp.remove(val.getKey()); } else { mp.setProperty(val.getKey(), val.getValue()); } } } }
From source file:org.schedoscope.export.BaseExportJob.java
License:Apache License
protected Configuration configureHiveMetaStore(Configuration conf) { if (metaStoreUris.startsWith("thrift://")) { conf.set("hive.metastore.local", "false"); conf.set(HiveConf.ConfVars.METASTOREURIS.varname, metaStoreUris); } else {/* ww w . jav a 2 s . co m*/ conf.set("hive.metastore.local", "true"); conf.unset(HiveConf.ConfVars.METASTOREURIS.varname); conf.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, metaStoreUris); } return conf; }