List of usage examples for org.apache.hadoop.yarn.conf YarnConfiguration YarnConfiguration
public YarnConfiguration(Configuration conf)
From source file:org.apache.twill.internal.appmaster.ApplicationMasterMain.java
License:Apache License
/** * Starts the application master./*w w w. j a va 2 s. c o m*/ */ public static void main(String[] args) throws Exception { String zkConnect = System.getenv(EnvKeys.TWILL_ZK_CONNECT); File twillSpec = new File(Constants.Files.TWILL_SPEC); RunId runId = RunIds.fromString(System.getenv(EnvKeys.TWILL_RUN_ID)); ZKClientService zkClientService = createZKClient(zkConnect, System.getenv(EnvKeys.TWILL_APP_NAME)); Configuration conf = new YarnConfiguration(new HdfsConfiguration(new Configuration())); setRMSchedulerAddress(conf); final YarnAMClient amClient = new VersionDetectYarnAMClientFactory(conf).create(); ApplicationMasterService service = new ApplicationMasterService(runId, zkClientService, twillSpec, amClient, createAppLocation(conf)); TrackerService trackerService = new TrackerService(service); List<Service> prerequisites = Lists.newArrayList(new YarnAMClientService(amClient, trackerService), zkClientService, new AppMasterTwillZKPathService(zkClientService, runId)); // TODO: Temp fix for Kakfa issue in MapR. Will be removed when fixing TWILL-147 if (Boolean.parseBoolean(System.getProperty("twill.disable.kafka"))) { LOG.info("Log collection through kafka disabled"); } else { prerequisites.add(new ApplicationKafkaService(zkClientService, runId)); } new ApplicationMasterMain(String.format("%s/%s/kafka", zkConnect, runId.getId())).doMain(service, prerequisites.toArray(new Service[prerequisites.size()])); }
From source file:org.apache.twill.internal.container.TwillContainerMain.java
License:Apache License
/** * Main method for launching a {@link TwillContainerService} which runs * a {@link org.apache.twill.api.TwillRunnable}. *///from w w w .j a va 2 s. com public static void main(final String[] args) throws Exception { // Try to load the secure store from localized file, which AM requested RM to localize it for this container. loadSecureStore(); String zkConnectStr = System.getenv(EnvKeys.TWILL_ZK_CONNECT); File twillSpecFile = new File(Constants.Files.TWILL_SPEC); RunId appRunId = RunIds.fromString(System.getenv(EnvKeys.TWILL_APP_RUN_ID)); RunId runId = RunIds.fromString(System.getenv(EnvKeys.TWILL_RUN_ID)); String runnableName = System.getenv(EnvKeys.TWILL_RUNNABLE_NAME); int instanceId = Integer.parseInt(System.getenv(EnvKeys.TWILL_INSTANCE_ID)); int instanceCount = Integer.parseInt(System.getenv(EnvKeys.TWILL_INSTANCE_COUNT)); ZKClientService zkClientService = createZKClient(zkConnectStr, System.getenv(EnvKeys.TWILL_APP_NAME)); ZKDiscoveryService discoveryService = new ZKDiscoveryService(zkClientService); ZKClient appRunZkClient = getAppRunZKClient(zkClientService, appRunId); TwillSpecification twillSpec = loadTwillSpec(twillSpecFile); TwillRunnableSpecification runnableSpec = twillSpec.getRunnables().get(runnableName) .getRunnableSpecification(); ContainerInfo containerInfo = new EnvContainerInfo(); Arguments arguments = decodeArgs(); BasicTwillContext context = new BasicTwillContext(runId, appRunId, containerInfo.getHost(), arguments.getRunnableArguments().get(runnableName).toArray(new String[0]), arguments.getArguments().toArray(new String[0]), runnableSpec, instanceId, discoveryService, discoveryService, appRunZkClient, instanceCount, containerInfo.getMemoryMB(), containerInfo.getVirtualCores()); ZKClient containerZKClient = getContainerZKClient(zkClientService, appRunId, runnableName); Configuration conf = new YarnConfiguration(new HdfsConfiguration(new Configuration())); Service service = new TwillContainerService(context, containerInfo, containerZKClient, runId, runnableSpec, getClassLoader(), createAppLocation(conf)); new TwillContainerMain().doMain(service, new LogFlushService(), zkClientService, new TwillZKPathService(containerZKClient, runId)); }
From source file:org.apache.twill.internal.yarn.Hadoop23YarnAppClient.java
License:Apache License
/** * Overrides parent method to adds RM delegation token to the given context. If YARN is running with HA RM, * delegation tokens for each RM service will be added. *///from w w w. j ava 2 s . c om protected void addRMToken(ContainerLaunchContext context, YarnClient yarnClient, ApplicationId appId) { if (!UserGroupInformation.isSecurityEnabled()) { return; } try { Text renewer = new Text(UserGroupInformation.getCurrentUser().getShortUserName()); org.apache.hadoop.yarn.api.records.Token rmDelegationToken = yarnClient.getRMDelegationToken(renewer); // The following logic is copied from ClientRMProxy.getRMDelegationTokenService, which is not available in // YARN older than 2.4 List<String> services = new ArrayList<>(); if (HAUtil.isHAEnabled(configuration)) { // If HA is enabled, we need to enumerate all RM hosts // and add the corresponding service name to the token service // Copy the yarn conf since we need to modify it to get the RM addresses YarnConfiguration yarnConf = new YarnConfiguration(configuration); for (String rmId : HAUtil.getRMHAIds(configuration)) { yarnConf.set(YarnConfiguration.RM_HA_ID, rmId); InetSocketAddress address = yarnConf.getSocketAddr(YarnConfiguration.RM_ADDRESS, YarnConfiguration.DEFAULT_RM_ADDRESS, YarnConfiguration.DEFAULT_RM_PORT); services.add(SecurityUtil.buildTokenService(address).toString()); } } else { services.add(SecurityUtil.buildTokenService(YarnUtils.getRMAddress(configuration)).toString()); } Credentials credentials = YarnUtils.decodeCredentials(context.getTokens()); // casting needed for later Hadoop version @SuppressWarnings("RedundantCast") Token<TokenIdentifier> token = ConverterUtils.convertFromYarn(rmDelegationToken, (InetSocketAddress) null); token.setService(new Text(Joiner.on(',').join(services))); credentials.addToken(new Text(token.getService()), token); LOG.debug("Added RM delegation token {} for application {}", token, appId); credentials.addToken(token.getService(), token); context.setTokens(YarnUtils.encodeCredentials(credentials)); } catch (Exception e) { throw Throwables.propagate(e); } }
From source file:org.apache.twill.yarn.TwillTester.java
License:Apache License
@Override protected void before() throws Throwable { tmpFolder.create();/* w ww. ja va 2s . c o m*/ // Starts Zookeeper zkServer = InMemoryZKServer.builder().setDataDir(tmpFolder.newFolder()).build(); zkServer.startAndWait(); // Start YARN mini cluster File miniDFSDir = tmpFolder.newFolder(); LOG.info("Starting Mini DFS on path {}", miniDFSDir); Configuration fsConf = new HdfsConfiguration(new Configuration()); fsConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, miniDFSDir.getAbsolutePath()); dfsCluster = new MiniDFSCluster.Builder(fsConf).numDataNodes(1).build(); Configuration conf = new YarnConfiguration(dfsCluster.getFileSystem().getConf()); if (YarnUtils.getHadoopVersion().equals(YarnUtils.HadoopVersions.HADOOP_20)) { conf.set("yarn.resourcemanager.scheduler.class", "org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler"); } else { conf.set("yarn.resourcemanager.scheduler.class", "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler"); conf.set("yarn.scheduler.capacity.resource-calculator", "org.apache.hadoop.yarn.util.resource.DominantResourceCalculator"); conf.setBoolean("yarn.scheduler.include-port-in-node-name", true); } conf.set("yarn.nodemanager.vmem-pmem-ratio", "20.1"); conf.set("yarn.nodemanager.vmem-check-enabled", "false"); conf.set("yarn.scheduler.minimum-allocation-mb", "128"); conf.set("yarn.nodemanager.delete.debug-delay-sec", "3600"); cluster = new MiniYARNCluster("test-cluster", 3, 1, 1); cluster.init(conf); cluster.start(); this.config = new YarnConfiguration(cluster.getConfig()); twillRunner = createTwillRunnerService(); twillRunner.start(); yarnAppClient = new VersionDetectYarnAppClientFactory().create(conf); yarnAppClient.startAndWait(); }
From source file:org.apache.twill.yarn.YarnTestUtils.java
License:Apache License
private static final void init(File folder) throws IOException { // Starts Zookeeper zkServer = InMemoryZKServer.builder().build(); zkServer.startAndWait();//from w w w . j av a 2 s . c o m // Start YARN mini cluster LOG.info("Starting Mini DFS on path {}", folder); Configuration fsConf = new HdfsConfiguration(new Configuration()); fsConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, folder.getAbsolutePath()); dfsCluster = new MiniDFSCluster.Builder(fsConf).numDataNodes(1).build(); Configuration conf = new YarnConfiguration(dfsCluster.getFileSystem().getConf()); if (YarnUtils.getHadoopVersion().equals(YarnUtils.HadoopVersions.HADOOP_20)) { conf.set("yarn.resourcemanager.scheduler.class", "org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler"); } else { conf.set("yarn.resourcemanager.scheduler.class", "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler"); conf.set("yarn.scheduler.capacity.resource-calculator", "org.apache.hadoop.yarn.util.resource.DominantResourceCalculator"); conf.setBoolean("yarn.scheduler.include-port-in-node-name", true); } conf.set("yarn.nodemanager.vmem-pmem-ratio", "20.1"); conf.set("yarn.nodemanager.vmem-check-enabled", "false"); conf.set("yarn.scheduler.minimum-allocation-mb", "128"); conf.set("yarn.nodemanager.delete.debug-delay-sec", "3600"); cluster = new MiniYARNCluster("test-cluster", 3, 1, 1); cluster.init(conf); cluster.start(); config = new YarnConfiguration(cluster.getConfig()); runnerService = createTwillRunnerService(); runnerService.startAndWait(); yarnAppClient = new VersionDetectYarnAppClientFactory().create(conf); yarnAppClient.start(); }
From source file:org.apache.zeppelin.integration.MiniHadoopCluster.java
License:Apache License
@BeforeClass public void start() throws IOException { LOGGER.info("Starting MiniHadoopCluster ..."); this.hadoopConf = new Configuration(); new File(configPath).mkdirs(); // start MiniDFSCluster this.dfsCluster = new MiniDFSCluster.Builder(hadoopConf).numDataNodes(2).format(true).waitSafeMode(true) .build();//from w w w . j av a 2 s .c om this.dfsCluster.waitActive(); saveConfig(hadoopConf, configPath + "/core-site.xml"); // start MiniYarnCluster YarnConfiguration baseConfig = new YarnConfiguration(hadoopConf); baseConfig.set("yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage", "95"); this.yarnCluster = new MiniYARNCluster(getClass().getName(), 2, 1, 1); yarnCluster.init(baseConfig); // Install a shutdown hook for stop the service and kill all running applications. Runtime.getRuntime().addShutdownHook(new Thread() { @Override public void run() { yarnCluster.stop(); } }); yarnCluster.start(); // Workaround for YARN-2642. Configuration yarnConfig = yarnCluster.getConfig(); long start = System.currentTimeMillis(); while (System.currentTimeMillis() - start < 30 * 1000) { try { Thread.sleep(100); } catch (InterruptedException e) { throw new IOException(e); } if (!yarnConfig.get(YarnConfiguration.RM_ADDRESS).split(":")[1].equals("0")) { break; } } if (yarnConfig.get(YarnConfiguration.RM_ADDRESS).split(":")[1].equals("0")) { throw new IOException("RM not up yes"); } LOGGER.info("RM address in configuration is " + yarnConfig.get(YarnConfiguration.RM_ADDRESS)); saveConfig(yarnConfig, configPath + "/yarn-site.xml"); }
From source file:org.deeplearning4j.iterativereduce.runtime.yarn.ContainerManagerHandler.java
License:Apache License
public ContainerManager getContainerManager() { if (containerManager != null) return containerManager; YarnConfiguration yarnConf = new YarnConfiguration(conf); YarnRPC rpc = YarnRPC.create(yarnConf); InetSocketAddress cmAddr = NetUtils.createSocketAddr(container.getNodeId().getHost(), container.getNodeId().getPort()); LOG.info("Connecting to container manager at " + cmAddr); containerManager = ((ContainerManager) rpc.getProxy(ContainerManager.class, cmAddr, conf)); return containerManager; }
From source file:org.deeplearning4j.iterativereduce.runtime.yarn.ResourceManagerHandler.java
License:Apache License
public AMRMProtocol getAMResourceManager() { if (amResourceManager != null) return amResourceManager; LOG.debug("Using configuration: " + conf); YarnConfiguration yarnConf = new YarnConfiguration(conf); YarnRPC rpc = YarnRPC.create(yarnConf); InetSocketAddress rmAddress = NetUtils.createSocketAddr(yarnConf.get(YarnConfiguration.RM_SCHEDULER_ADDRESS, YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS)); LOG.info("Connecting to the resource manager (scheduling) at " + rmAddress); amResourceManager = (AMRMProtocol) rpc.getProxy(AMRMProtocol.class, rmAddress, conf); return amResourceManager; }
From source file:org.deeplearning4j.iterativereduce.runtime.yarn.ResourceManagerHandler.java
License:Apache License
public ClientRMProtocol getClientResourceManager() { if (clientResourceManager != null) return clientResourceManager; YarnConfiguration yarnConf = new YarnConfiguration(conf); YarnRPC rpc = YarnRPC.create(yarnConf); InetSocketAddress rmAddress = NetUtils .createSocketAddr(yarnConf.get(YarnConfiguration.RM_ADDRESS, YarnConfiguration.DEFAULT_RM_ADDRESS)); LOG.info("Connecting to the resource manager (client) at " + rmAddress); clientResourceManager = (ClientRMProtocol) rpc.getProxy(ClientRMProtocol.class, rmAddress, conf); return clientResourceManager; }
From source file:org.elasticsearch.hadoop.yarn.am.AppMasterRpc.java
License:Apache License
public AppMasterRpc(Configuration cfg, NMTokenCache nmTokenCache) { this.cfg = new YarnConfiguration(cfg); this.nmTokenCache = nmTokenCache; }