List of usage examples for org.apache.hadoop.conf Configuration setClass
public void setClass(String name, Class<?> theClass, Class<?> xface)
name
property to the name of a theClass
implementing the given interface xface
. From source file:com.cloudera.llama.am.impl.TestMultiQueueLlamaAM.java
License:Apache License
@Test public void testReleaseReservationUnknown() throws Exception { Configuration conf = new Configuration(false); conf.setClass(LlamaAM.RM_CONNECTOR_CLASS_KEY, MyRMConnector.class, RMConnector.class); LlamaAM am = LlamaAM.create(conf);// w w w . j a va2 s. c o m am.start(); am.releaseReservation(UUID.randomUUID(), UUID.randomUUID(), false); }
From source file:com.cloudera.llama.am.impl.TestMultiQueueLlamaAM.java
License:Apache License
@SuppressWarnings("unchecked") @Test/*from w w w . ja v a2 s .co m*/ public void testMultiQueueListener() throws Exception { Configuration conf = new Configuration(false); conf.setClass(LlamaAM.RM_CONNECTOR_CLASS_KEY, MyRMConnector.class, RMConnector.class); LlamaAM am = LlamaAM.create(conf); try { am.start(); LlamaAMListener listener = new LlamaAMListener() { @Override public void onEvent(LlamaAMEvent event) { listenerCalled = true; } }; UUID handle = UUID.randomUUID(); PlacedReservation rr = am.getReservation(am.reserve(TestUtils.createReservation(handle, "q", 1, true))); UUID id = rr.getReservationId(); am.getNodes(); am.addListener(listener); am.getReservation(id); Assert.assertFalse(listenerCalled); List<RMResource> resources = (List<RMResource>) rmConnector.args.get(3); rmConnector.callback.onEvent(Arrays.asList(RMEvent .createStatusChangeEvent(resources.get(0).getResourceId(), PlacedResource.Status.REJECTED))); Assert.assertTrue(listenerCalled); am.releaseReservation(handle, id, false); am.releaseReservationsForHandle(UUID.randomUUID(), false); am.removeListener(listener); listenerCalled = false; Assert.assertFalse(listenerCalled); am.stop(); } finally { am.stop(); } }
From source file:com.cloudera.llama.am.impl.TestMultiQueueLlamaAM.java
License:Apache License
@Test public void testQueueExpiry() throws Exception { ManualClock clock = new ManualClock(); Clock.setClock(clock);//from w w w. ja v a2 s . c om Configuration conf = new Configuration(false); conf.setClass(LlamaAM.RM_CONNECTOR_CLASS_KEY, MyRMConnector.class, RMConnector.class); conf.set(LlamaAM.CORE_QUEUES_KEY, "root.corequeue"); MultiQueueLlamaAM am = new MultiQueueLlamaAM(conf); am.amCheckExpiryIntervalMs = 20; am.start(); // Core queue should exist Assert.assertEquals(1, am.ams.keySet().size()); UUID handle = UUID.randomUUID(); UUID resId = am.reserve(TestUtils.createReservation(handle, "root.someotherqueue", 1, true)); Assert.assertEquals(2, am.ams.keySet().size()); am.releaseReservation(handle, resId, true); clock.increment(LlamaAM.QUEUE_AM_EXPIRE_DEFAULT * 2); Thread.sleep(300); // am expiry check should run in this time // Other queue should get cleaned up Assert.assertEquals(1, am.ams.keySet().size()); handle = UUID.randomUUID(); resId = am.reserve(TestUtils.createReservation(handle, "root.corequeue", 1, true)); am.releaseReservation(handle, resId, true); clock.increment(LlamaAM.QUEUE_AM_EXPIRE_DEFAULT * 2); Thread.sleep(300); // am expiry check should run in this time // Core queue should still exist Assert.assertEquals(1, am.ams.keySet().size()); Assert.assertFalse(am.ams.containsKey("root.someotherqueue")); handle = UUID.randomUUID(); am.reserve(TestUtils.createReservation(handle, "root.someotherqueue", 1, true)); Assert.assertTrue(am.ams.containsKey("root.someotherqueue")); }
From source file:com.cloudera.llama.am.impl.TestMultiQueueLlamaAM.java
License:Apache License
@Test public void testReleaseReservationsForQueue() throws Exception { Configuration conf = new Configuration(false); conf.setClass(LlamaAM.RM_CONNECTOR_CLASS_KEY, MyRMConnector.class, RMConnector.class); MultiQueueLlamaAM am = new MultiQueueLlamaAM(conf); am.start();/*from www.j av a 2 s.c o m*/ UUID handle = UUID.randomUUID(); am.reserve(TestUtils.createReservation(handle, "root.q1", 1, true)); Assert.assertTrue(am.ams.containsKey("root.q1")); // Release the queue without caching. am.releaseReservationsForQueue("root.q1", true); Assert.assertFalse(am.ams.containsKey("root.q1")); // Try to release it again, the queue does not exist. without cache. am.releaseReservationsForQueue("root.q1", true); // Try to release it again, the queue does not exist. with cache. am.releaseReservationsForQueue("root.q1", false); Assert.assertFalse(am.ams.containsKey("root.q1")); // Now create another reservation and try to release the queue by using cache, am.reserve(TestUtils.createReservation(handle, "root.q2", 1, true)); Assert.assertTrue(am.ams.containsKey("root.q2")); // Release the queue without caching. am.releaseReservationsForQueue("root.q2", false); Assert.assertTrue(am.ams.containsKey("root.q2")); // Try to release it again, the queue does not exist. with cache. am.releaseReservationsForQueue("root.q2", false); // Try to release it again, the queue does not exist. without cache. am.releaseReservationsForQueue("root.q2", true); Assert.assertFalse(am.ams.containsKey("root.q2")); }
From source file:com.cloudera.llama.am.impl.TestSingleQueueLlamaAM.java
License:Apache License
public static SingleQueueLlamaAM createLlamaAM() { Configuration conf = new Configuration(false); conf.setClass(LlamaAM.RM_CONNECTOR_CLASS_KEY, MyRMConnector.class, RMConnector.class); conf.setBoolean(LlamaAM.NORMALIZING_ENABLED_KEY, false); conf.setBoolean(LlamaAM.CACHING_ENABLED_KEY, false); SingleQueueLlamaAM am = new SingleQueueLlamaAM(conf, "queue", Executors.newScheduledThreadPool(4)); return am;//from ww w .j av a2 s . c o m }
From source file:com.cloudera.llama.am.TestLlamaAMThriftServer.java
License:Apache License
protected Configuration createLlamaConfiguration() throws Exception { Configuration conf = new Configuration(false); conf.set(ServerConfiguration.CONFIG_DIR_KEY, TestAbstractMain.createTestDir()); conf.setClass(LlamaAM.RM_CONNECTOR_CLASS_KEY, MockRMConnector.class, RMConnector.class); conf.set(LlamaAM.CORE_QUEUES_KEY, "root.q1,root.q2"); conf.set(MockRMConnector.QUEUES_KEY, "root.q1,root.q2"); String nodesKey = "" + MockLlamaAMFlags.ALLOCATE + "n1"; conf.set(MockRMConnector.NODES_KEY, nodesKey); conf.setInt(MockRMConnector.EVENTS_MIN_WAIT_KEY, 5); conf.setInt(MockRMConnector.EVENTS_MAX_WAIT_KEY, 10); conf.set(sConf.getPropertyName(ServerConfiguration.SERVER_ADDRESS_KEY), "localhost:0"); conf.set(sConf.getPropertyName(ServerConfiguration.SERVER_ADMIN_ADDRESS_KEY), "localhost:0"); conf.set(sConf.getPropertyName(ServerConfiguration.HTTP_ADDRESS_KEY), "localhost:0"); return conf;// ww w . ja v a2 s .c om }
From source file:com.cloudera.llama.am.TestLlamaHAServer.java
License:Apache License
@Before public void setup() { Configuration conf = new Configuration(false); conf.set(ServerConfiguration.CONFIG_DIR_KEY, TestAbstractMain.createTestDir()); conf.setClass(LlamaAM.RM_CONNECTOR_CLASS_KEY, MockRMConnector.class, RMConnector.class); conf.set(LlamaAM.CORE_QUEUES_KEY, "root.q1,root.q2"); conf.set(MockRMConnector.QUEUES_KEY, "root.q1,root.q2"); String nodesKey = "" + MockLlamaAMFlags.ALLOCATE + "n1"; conf.set(MockRMConnector.NODES_KEY, nodesKey); conf.setInt(MockRMConnector.EVENTS_MIN_WAIT_KEY, 5); conf.setInt(MockRMConnector.EVENTS_MAX_WAIT_KEY, 10); ServerConfiguration sConf = new AMServerConfiguration(conf); conf.set(sConf.getPropertyName(ServerConfiguration.SERVER_ADDRESS_KEY), "localhost:0"); conf.set(sConf.getPropertyName(ServerConfiguration.SERVER_ADMIN_ADDRESS_KEY), "localhost:0"); conf.set(sConf.getPropertyName(ServerConfiguration.HTTP_ADDRESS_KEY), "localhost:0"); conf.setBoolean(HAServerConfiguration.HA_ENABLED, true); server = new LlamaHAServer(); server.setConf(conf);//from ww w . j a v a 2 s. c o m }
From source file:com.cloudera.llama.am.yarn.TestLlamaAMWithYarn.java
License:Apache License
private Configuration createMiniYarnConfig(boolean usePortInName) throws Exception { Configuration conf = new YarnConfiguration(); conf.set("yarn.scheduler.fair.allocation.file", "test-fair-scheduler.xml"); conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 0); conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES, 0); conf.setClass(YarnConfiguration.RM_SCHEDULER, FairScheduler.class, FairScheduler.class); //proxy user config String llamaProxyUser = System.getProperty("user.name"); conf.set("hadoop.security.authentication", "simple"); conf.set("hadoop.proxyuser." + llamaProxyUser + ".hosts", "*"); conf.set("hadoop.proxyuser." + llamaProxyUser + ".groups", "*"); String[] userGroups = new String[] { "g" }; UserGroupInformation.createUserForTesting(llamaProxyUser, userGroups); conf.setBoolean(YarnConfiguration.RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME, usePortInName); return conf;/*from w ww . j a v a 2s . co m*/ }
From source file:com.cloudera.llama.nm.TestLlamaNMAuxiliaryService.java
License:Apache License
private Configuration createMiniYarnConfig() throws Exception { Configuration conf = new YarnConfiguration(); conf.set("yarn.nodemanager.aux-services", "llama_nm_plugin"); conf.setClass("yarn.nodemanager.aux-services.llama_nm_plugin.class", MyLlamaNMAuxiliaryService.class, AuxiliaryService.class); injectLlamaNMConfiguration(conf);//from www. j av a 2 s . c o m return conf; }
From source file:com.cloudera.oryx.computation.common.JobStep.java
License:Open Source License
/** * Creates a new {@link MRPipeline} instance that contains common configuration * settings./*from w ww. j a va2s . c om*/ * * @return a new {@link MRPipeline} instance, suitably configured */ protected final MRPipeline createBasicPipeline(Class<?> jarClass) throws IOException { Configuration conf = OryxConfiguration.get(getConf()); conf.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS, true); conf.setClass(MRJobConfig.MAP_OUTPUT_COMPRESS_CODEC, SnappyCodec.class, CompressionCodec.class); conf.setBoolean("mapred.output.compress", true); conf.set("mapred.output.compression.type", "BLOCK"); conf.setClass("mapred.output.compression.codec", SnappyCodec.class, CompressionCodec.class); // Set old-style equivalents for Avro/Crunch's benefit conf.set("avro.output.codec", "snappy"); conf.setBoolean(MRJobConfig.MAP_SPECULATIVE, true); conf.setBoolean(MRJobConfig.REDUCE_SPECULATIVE, true); conf.setBoolean(TTConfig.TT_OUTOFBAND_HEARBEAT, true); conf.setInt(MRJobConfig.JVM_NUMTASKS_TORUN, -1); //conf.setBoolean("crunch.disable.deep.copy", true); // Giving one mapper a lot of data can cause issues in some stages, so default to disable this conf.setBoolean("crunch.disable.combine.file", true); Config appConfig = ConfigUtils.getDefaultConfig(); conf.set("crunch.tmp.dir", appConfig.getString("computation-layer.tmp-dir")); int mapMemoryMB = appConfig.getInt("computation-layer.mapper-memory-mb"); log.info("Mapper memory: {}", mapMemoryMB); int mapHeapMB = (int) (mapMemoryMB / 1.3); // Matches Hadoop's default log.info("Mappers have {}MB heap and can access {}MB RAM", mapHeapMB, mapMemoryMB); if (conf.get(MRJobConfig.MAP_JAVA_OPTS) != null) { log.info("Overriding previous setting of {}, which was '{}'", MRJobConfig.MAP_JAVA_OPTS, conf.get(MRJobConfig.MAP_JAVA_OPTS)); } conf.set(MRJobConfig.MAP_JAVA_OPTS, "-Xmx" + mapHeapMB + "m -XX:+UseCompressedOops -XX:+UseParallelGC -XX:+UseParallelOldGC"); log.info("Set {} to '{}'", MRJobConfig.MAP_JAVA_OPTS, conf.get(MRJobConfig.MAP_JAVA_OPTS)); // See comment below on CM conf.setInt("mapreduce.map.java.opts.max.heap", mapHeapMB); int reduceMemoryMB = appConfig.getInt("computation-layer.reducer-memory-mb"); log.info("Reducer memory: {}", reduceMemoryMB); if (isHighMemoryStep()) { reduceMemoryMB *= appConfig.getInt("computation-layer.worker-high-memory-factor"); log.info("Increasing {} to {} for high-memory step", MRJobConfig.REDUCE_MEMORY_MB, reduceMemoryMB); } conf.setInt(MRJobConfig.REDUCE_MEMORY_MB, reduceMemoryMB); int reduceHeapMB = (int) (reduceMemoryMB / 1.3); // Matches Hadoop's default log.info("Reducers have {}MB heap and can access {}MB RAM", reduceHeapMB, reduceMemoryMB); if (conf.get(MRJobConfig.REDUCE_JAVA_OPTS) != null) { log.info("Overriding previous setting of {}, which was '{}'", MRJobConfig.REDUCE_JAVA_OPTS, conf.get(MRJobConfig.REDUCE_JAVA_OPTS)); } conf.set(MRJobConfig.REDUCE_JAVA_OPTS, "-Xmx" + reduceHeapMB + "m -XX:+UseCompressedOops -XX:+UseParallelGC -XX:+UseParallelOldGC"); log.info("Set {} to '{}'", MRJobConfig.REDUCE_JAVA_OPTS, conf.get(MRJobConfig.REDUCE_JAVA_OPTS)); // I see this in CM but not in Hadoop docs; probably won't hurt as it's supposed to result in // -Xmx appended to opts above, which is at worst redundant conf.setInt("mapreduce.reduce.java.opts.max.heap", reduceHeapMB); conf.setInt("yarn.scheduler.capacity.minimum-allocation-mb", 128); conf.setInt("yarn.app.mapreduce.am.resource.mb", 384); // Pass total config state conf.set(CONFIG_SERIALIZATION_KEY, ConfigUtils.getDefaultConfig().root().render()); // Make sure to set any args to conf above this line! setConf(conf); Job job = Job.getInstance(conf); // Basic File IO settings FileInputFormat.setMaxInputSplitSize(job, 1L << 28); // ~268MB SequenceFileOutputFormat.setOutputCompressionType(job, SequenceFile.CompressionType.BLOCK); FileOutputFormat.setCompressOutput(job, true); FileOutputFormat.setOutputCompressorClass(job, SnappyCodec.class); log.info("Created pipeline configuration {}", job.getConfiguration()); return new MRPipeline(jarClass, getCustomJobName(), job.getConfiguration()); }