List of usage examples for org.apache.hadoop.yarn.conf YarnConfiguration set
public void set(String name, String value)
value
of the name
property. From source file:co.cask.cdap.common.guice.TwillModule.java
License:Apache License
/** * Provider method for instantiating {@link YarnTwillRunnerService}. *///from w w w. j ava2 s. co m @Singleton @Provides private YarnTwillRunnerService provideYarnTwillRunnerService(CConfiguration configuration, YarnConfiguration yarnConfiguration, LocationFactory locationFactory) { String zkConnectStr = configuration.get(Constants.Zookeeper.QUORUM) + configuration.get(Constants.CFG_TWILL_ZK_NAMESPACE); // Copy the yarn config and set the max heap ratio. YarnConfiguration yarnConfig = new YarnConfiguration(yarnConfiguration); yarnConfig.set(Constants.CFG_TWILL_RESERVED_MEMORY_MB, configuration.get(Constants.CFG_TWILL_RESERVED_MEMORY_MB)); YarnTwillRunnerService runner = new YarnTwillRunnerService(yarnConfig, zkConnectStr, LocationFactories.namespace(locationFactory, "twill")); // Set JVM options based on configuration String jvmOpts = configuration.get(Constants.AppFabric.PROGRAM_JVM_OPTS); runner.setJVMOptions(jvmOpts); return runner; }
From source file:co.cask.cdap.common.security.YarnTokenUtils.java
License:Apache License
/** * Gets a Yarn delegation token and stores it in the given Credentials. * * @return the same Credentials instance as the one given in parameter. *//*from w w w.jav a 2 s . c o m*/ public static Credentials obtainToken(YarnConfiguration configuration, Credentials credentials) { if (!UserGroupInformation.isSecurityEnabled()) { return credentials; } try { YarnClient yarnClient = YarnClient.createYarnClient(); yarnClient.init(configuration); yarnClient.start(); try { Text renewer = new Text(UserGroupInformation.getCurrentUser().getShortUserName()); org.apache.hadoop.yarn.api.records.Token rmDelegationToken = yarnClient .getRMDelegationToken(renewer); // TODO: The following logic should be replaced with call to ClientRMProxy.getRMDelegationTokenService after // CDAP-4825 is resolved List<String> services = new ArrayList<>(); if (HAUtil.isHAEnabled(configuration)) { // If HA is enabled, we need to enumerate all RM hosts // and add the corresponding service name to the token service // Copy the yarn conf since we need to modify it to get the RM addresses YarnConfiguration yarnConf = new YarnConfiguration(configuration); for (String rmId : HAUtil.getRMHAIds(configuration)) { yarnConf.set(YarnConfiguration.RM_HA_ID, rmId); InetSocketAddress address = yarnConf.getSocketAddr(YarnConfiguration.RM_ADDRESS, YarnConfiguration.DEFAULT_RM_ADDRESS, YarnConfiguration.DEFAULT_RM_PORT); services.add(SecurityUtil.buildTokenService(address).toString()); } } else { services.add(SecurityUtil.buildTokenService(YarnUtils.getRMAddress(configuration)).toString()); } Token<TokenIdentifier> token = ConverterUtils.convertFromYarn(rmDelegationToken, (InetSocketAddress) null); token.setService(new Text(Joiner.on(',').join(services))); credentials.addToken(new Text(token.getService()), token); // OK to log, it won't log the credential, only information about the token. LOG.info("Added RM delegation token: {}", token); } finally { yarnClient.stop(); } return credentials; } catch (Exception e) { LOG.error("Failed to get secure token for Yarn.", e); throw Throwables.propagate(e); } }
From source file:co.cask.cdap.operations.yarn.YarnInfo.java
License:Apache License
/** * Should only be called when HA is enabled. *//*w w w . j av a 2 s. c o m*/ private URL getHAWebURL() throws IOException { InetSocketAddress activeRM = null; Collection<String> rmIds = HAUtil.getRMHAIds(conf); if (rmIds.isEmpty()) { throw new IllegalStateException("Resource Manager HA web URL requested in non-HA mode."); } for (String rmId : rmIds) { YarnConfiguration yarnConf = new YarnConfiguration(conf); yarnConf.set(YarnConfiguration.RM_HA_ID, rmId); yarnConf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, conf.get(YarnConfiguration.RM_PRINCIPAL, "")); RMHAServiceTarget rmhaServiceTarget = new RMHAServiceTarget(yarnConf); HAServiceProtocol proxy = rmhaServiceTarget.getProxy(yarnConf, 10000); HAServiceStatus serviceStatus = proxy.getServiceStatus(); if (HAServiceProtocol.HAServiceState.ACTIVE != serviceStatus.getState()) { continue; } activeRM = rmhaServiceTarget.getAddress(); } if (activeRM == null) { throw new IllegalStateException("Could not find an active resource manager"); } return adminToWebappAddress(activeRM); }
From source file:co.cask.tigon.guice.TwillModule.java
License:Apache License
/** * Provider method for instantiating {@link org.apache.twill.yarn.YarnTwillRunnerService}. *//*from w ww. jav a2 s . com*/ @Singleton @Provides private YarnTwillRunnerService provideYarnTwillRunnerService(CConfiguration configuration, YarnConfiguration yarnConfiguration, LocationFactory locationFactory) { String zkConnectStr = configuration.get(Constants.Zookeeper.QUORUM) + configuration.get(Constants.CFG_TWILL_ZK_NAMESPACE); // Copy the yarn config and set the max heap ratio. YarnConfiguration yarnConfig = new YarnConfiguration(yarnConfiguration); yarnConfig.set(Constants.CFG_TWILL_RESERVED_MEMORY_MB, configuration.get(Constants.CFG_TWILL_RESERVED_MEMORY_MB)); YarnTwillRunnerService runner = new YarnTwillRunnerService(yarnConfig, zkConnectStr, LocationFactories.namespace(locationFactory, "twill")); // Set JVM options based on configuration runner.setJVMOptions(configuration.get(Constants.Container.PROGRAM_JVM_OPTS)); return runner; }
From source file:com.continuuity.weave.yarn.ClusterTestBase.java
License:Open Source License
protected final void doInit() throws IOException { // Starts Zookeeper zkServer = InMemoryZKServer.builder().build(); zkServer.startAndWait();// w ww . java 2 s. c o m // Start YARN mini cluster YarnConfiguration config = new YarnConfiguration(new Configuration()); // TODO: Hack config.set("yarn.resourcemanager.scheduler.class", "org.apache.hadoop.yarn.server.resourcemanager.scheduler" + ".fifo.FifoScheduler"); config.set("yarn.minicluster.fixed.ports", "true"); cluster = new MiniYARNCluster("test-cluster", 1, 1, 1); cluster.init(config); cluster.start(); runnerService = new YarnWeaveRunnerService(config, zkServer.getConnectionStr() + "/weave", new LocalLocationFactory(Files.createTempDir())); runnerService.startAndWait(); }
From source file:com.github.sakserv.minicluster.simpleyarnapp.Client.java
License:Apache License
public void run(String[] args) throws Exception { final String command = args[0]; final int n = Integer.valueOf(args[1]); final Path jarPath = new Path(args[2]); final String resourceManagerAddress = args[3]; final String resourceManagerHostname = args[4]; final String resourceManagerSchedulerAddress = args[5]; final String resourceManagerResourceTrackerAddress = args[6]; // Create yarnClient YarnConfiguration conf = new YarnConfiguration(); conf.set("yarn.resourcemanager.address", resourceManagerAddress); conf.set("yarn.resourcemanager.hostname", resourceManagerHostname); conf.set("yarn.resourcemanager.scheduler.address", resourceManagerSchedulerAddress); conf.set("yarn.resourcemanager.resource-tracker.address", resourceManagerResourceTrackerAddress); YarnClient yarnClient = YarnClient.createYarnClient(); yarnClient.init(conf);/*from w ww . j a va 2s .c om*/ yarnClient.start(); // Create application via yarnClient YarnClientApplication app = yarnClient.createApplication(); // Set up the container launch context for the application master ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class); amContainer.setCommands(Collections.singletonList("$JAVA_HOME/bin/java" + " -Xmx256M" + " com.hortonworks.simpleyarnapp.ApplicationMaster" + " " + command + " " + String.valueOf(n) + " " + resourceManagerAddress + " " + resourceManagerHostname + " " + resourceManagerSchedulerAddress + " " + resourceManagerResourceTrackerAddress + " 1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout" + " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr")); // Setup jar for ApplicationMaster LocalResource appMasterJar = Records.newRecord(LocalResource.class); setupAppMasterJar(jarPath, appMasterJar); amContainer.setLocalResources(Collections.singletonMap("simple-yarn-app-1.1.0.jar", appMasterJar)); // Setup CLASSPATH for ApplicationMaster Map<String, String> appMasterEnv = new HashMap<String, String>(); setupAppMasterEnv(appMasterEnv); amContainer.setEnvironment(appMasterEnv); // Set up resource type requirements for ApplicationMaster Resource capability = Records.newRecord(Resource.class); capability.setMemory(256); capability.setVirtualCores(1); // Finally, set-up ApplicationSubmissionContext for the application ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext(); appContext.setApplicationName("simple-yarn-app"); // application name appContext.setAMContainerSpec(amContainer); appContext.setResource(capability); appContext.setQueue("default"); // queue // Submit application ApplicationId appId = appContext.getApplicationId(); System.out.println("Submitting application " + appId); yarnClient.submitApplication(appContext); ApplicationReport appReport = yarnClient.getApplicationReport(appId); YarnApplicationState appState = appReport.getYarnApplicationState(); while (appState != YarnApplicationState.FINISHED && appState != YarnApplicationState.KILLED && appState != YarnApplicationState.FAILED) { Thread.sleep(100); appReport = yarnClient.getApplicationReport(appId); appState = appReport.getYarnApplicationState(); } System.out.println("Application " + appId + " finished with" + " state " + appState + " at " + appReport.getFinishTime()); }
From source file:com.intropro.prairie.unit.yarn.YarnUnit.java
License:Apache License
@Override protected YarnConfiguration gatherConfigs() { YarnConfiguration yarnConfigs = new YarnConfiguration(super.gatherConfigs()); yarnConfigs.set("fs.defaultFS", hdfsUnit.getNamenode()); yarnConfigs.set("mapreduce.task.tmp.dir", getTmpDir().toString()); String user = System.getProperty("user.name"); yarnConfigs.set("hadoop.proxyuser." + user + ".hosts", "*"); yarnConfigs.set("hadoop.proxyuser." + user + ".groups", "*"); yarnConfigs.set("yarn.nodemanager.admin-env", "PATH=$PATH:" + cmdUnit.getPath()); yarnConfigs.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class); yarnConfigs.addResource("mapred-site.prairie.xml"); yarnConfigs.addResource("yarn-site.prairie.xml"); return yarnConfigs; }
From source file:com.microsoft.canberra.tf.util.DoubleMatrixTextIO.java
License:Open Source License
@Inject public DoubleMatrixTextIO() throws IOException { final YarnConfiguration yarnConf = new YarnConfiguration(); yarnConf.set("fs.hdfs.impl", DistributedFileSystem.class.getName()); yarnConf.set("fs.file.impl", LocalFileSystem.class.getName()); this.fileSystem = FileSystem.newInstance(yarnConf); }
From source file:com.msd.gin.halyard.common.HBaseServerTestInstance.java
License:Apache License
public static synchronized Configuration getInstanceConfig() throws Exception { if (conf == null) { File zooRoot = File.createTempFile("hbase-zookeeper", ""); zooRoot.delete();/*from w ww. ja v a2 s . c om*/ ZooKeeperServer zookeper = new ZooKeeperServer(zooRoot, zooRoot, 2000); ServerCnxnFactory factory = ServerCnxnFactory.createFactory(new InetSocketAddress("localhost", 0), 5000); factory.startup(zookeper); YarnConfiguration yconf = new YarnConfiguration(); String argLine = System.getProperty("argLine"); if (argLine != null) { yconf.set("yarn.app.mapreduce.am.command-opts", argLine.replace("jacoco.exec", "jacocoMR.exec")); } yconf.setBoolean(MRConfig.MAPREDUCE_MINICLUSTER_CONTROL_RESOURCE_MONITORING, false); yconf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class); MiniMRYarnCluster miniCluster = new MiniMRYarnCluster("testCluster"); miniCluster.init(yconf); yconf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, true); miniCluster.start(); File hbaseRoot = File.createTempFile("hbase-root", ""); hbaseRoot.delete(); conf = HBaseConfiguration.create(miniCluster.getConfig()); conf.set(HConstants.HBASE_DIR, hbaseRoot.toURI().toURL().toString()); conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, factory.getLocalPort()); conf.set("hbase.master.hostname", "localhost"); conf.set("hbase.regionserver.hostname", "localhost"); conf.setInt("hbase.master.info.port", -1); conf.set("hbase.fs.tmp.dir", new File(System.getProperty("java.io.tmpdir")).toURI().toURL().toString()); LocalHBaseCluster cluster = new LocalHBaseCluster(conf); cluster.startup(); } return conf; }
From source file:com.streamsets.datacollector.hdfs.cluster.KafkaToHDFSIT.java
License:Apache License
@BeforeClass public static void beforeClass() throws Exception { //setup kafka to read from KafkaTestUtil.startZookeeper();/* ww w.j a v a2 s .c o m*/ KafkaTestUtil.startKafkaBrokers(1); KafkaTestUtil.createTopic(TOPIC, 1, 1); producer = KafkaTestUtil.createProducer(KafkaTestUtil.getMetadataBrokerURI(), true); produceRecords(RECORDS_PRODUCED); // setting some dummy kerberos settings to be able to test a mis-setting System.setProperty("java.security.krb5.realm", "foo"); System.setProperty("java.security.krb5.kdc", "localhost:0"); File minidfsDir = new File("target/minidfs").getAbsoluteFile(); if (!minidfsDir.exists()) { Assert.assertTrue(minidfsDir.mkdirs()); } System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath()); Configuration conf = new HdfsConfiguration(); conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*"); conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*"); UserGroupInformation.createUserForTesting("foo", new String[] { "all", "supergroup" }); EditLogFileOutputStream.setShouldSkipFsyncForTesting(true); miniDFS = new MiniDFSCluster.Builder(conf).build(); //setup Cluster and start pipeline YarnConfiguration entries = new YarnConfiguration(); //TODO: Investigate why this is required for test to pass. Is yarn messing with the miniDFS cluster configuration? entries.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*"); entries.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*"); ClusterUtil.setupCluster(TEST_NAME, getPipelineJson(), entries); serverURI = ClusterUtil.getServerURI(); miniSDC = ClusterUtil.getMiniSDC(); }