List of usage examples for org.apache.hadoop.yarn.conf YarnConfiguration YarnConfiguration
public YarnConfiguration(Configuration conf)
From source file:oz.hadoop.yarn.api.core.ApplicationContainerLauncherImpl.java
License:Apache License
/** * /*from ww w. j ava2 s. c o m*/ * @param applicationSpecification * @param containerSpecification */ public ApplicationContainerLauncherImpl(PrimitiveImmutableTypeMap applicationSpecification, PrimitiveImmutableTypeMap containerSpecification) { super(applicationSpecification, containerSpecification); this.resourceManagerClient = AMRMClientAsync.createAMRMClientAsync(100, this.callbackSupport.buildResourceManagerCallbackHandler(this)); this.nodeManagerCallbaclHandler = this.callbackSupport.buildNodeManagerCallbackHandler(this); this.nodeManagerClient = new NMClientAsyncImpl(this.nodeManagerCallbaclHandler); this.yarnConfig = new YarnConfiguration(new Configuration()); }
From source file:oz.hadoop.yarn.test.cluster.MiniYarnCluster.java
License:Apache License
/** * *//* w w w. ja va2s .co m*/ @Override public void serviceInit(Configuration conf) throws Exception { conf.setBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, true); this.addService(new ResourceManagerWrapper(0)); for (int index = 0; index < this.nodeManagers.length; index++) { this.nodeManagers[index] = new ShortCircuitedNodeManager(); this.addService(new NodeManagerWrapper(index)); } super.serviceInit(conf instanceof YarnConfiguration ? conf : new YarnConfiguration(conf)); }
From source file:proxyyarn.ProxyYarn.java
License:Apache License
public boolean run() throws Exception { Configuration conf = new YarnConfiguration(new Configuration()); YarnClient yarnClient = YarnClient.createYarnClient(); yarnClient.init(conf);/* ww w .j av a 2 s.co m*/ yarnClient.start(); YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics(); log.info("Got Cluster metric info from ASM" + ", numNodeManagers=" + clusterMetrics.getNumNodeManagers()); List<NodeReport> clusterNodeReports = yarnClient.getNodeReports(NodeState.RUNNING); log.info("Got Cluster node info from ASM"); for (NodeReport node : clusterNodeReports) { log.info("Got node report from ASM for" + ", nodeId=" + node.getNodeId() + ", nodeAddress" + node.getHttpAddress() + ", nodeRackName" + node.getRackName() + ", nodeNumContainers" + node.getNumContainers()); } QueueInfo queueInfo = yarnClient.getQueueInfo("default"); log.info("Queue info" + ", queueName=" + queueInfo.getQueueName() + ", queueCurrentCapacity=" + queueInfo.getCurrentCapacity() + ", queueMaxCapacity=" + queueInfo.getMaximumCapacity() + ", queueApplicationCount=" + queueInfo.getApplications().size() + ", queueChildQueueCount=" + queueInfo.getChildQueues().size()); List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo(); for (QueueUserACLInfo aclInfo : listAclInfo) { for (QueueACL userAcl : aclInfo.getUserAcls()) { log.info("User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName() + ", userAcl=" + userAcl.name()); } } FileSystem fs = FileSystem.get(conf); if (!fs.getClass().equals(DistributedFileSystem.class)) { log.error("Expected DistributedFileSystem, but was {}", fs.getClass().getSimpleName()); System.exit(1); } // ApplicationClientProtocol applicationsManager; // InetSocketAddress rmAddress = NetUtils.createSocketAddr(yarnConf.get(YarnConfiguration.RM_ADDRESS, YarnConfiguration.DEFAULT_RM_ADDRESS)); // log.info("Connecting to ResourceManager at {}", rmAddress); // Configuration appManagerServerConf = new Configuration(conf); // YarnRPC rpc = YarnRPC.create(appManagerServerConf); // ApplicationClientProtocol applicationManager = (ApplicationClientProtocol) rpc.getProxy(ApplicationClientProtocol.class, rmAddress, appManagerServerConf); String appName = "AccumuloProxyYarn"; YarnClientApplication app = yarnClient.createApplication(); // set the application name ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext(); ApplicationId appId = appContext.getApplicationId(); appContext.setApplicationName(appName); // GetNewApplicationRequest request = Records.newRecord(GetNewApplicationRequest.class); // GetNewApplicationResponse response = applicationManager.getNewApplication(request); // log.info("Got new ApplicationId=" + response.getApplicationId()); // ApplicationId appId = response.getApplicationId(); // Create a new ApplicationSubmissionContext // ApplicationSubmissionContext appContext = Records.newRecord(ApplicationSubmissionContext.class); // set the ApplicationId // appContext.setApplicationId(appId); // set the application name // appContext.setApplicationName(appName); // Create a new container launch context for the AM's container ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class); // Define the local resources required Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); // Lets assume the jar we need for our ApplicationMaster is available in // HDFS at a certain known path to us and we want to make it available to // the ApplicationMaster in the launched container Path localJarPath = new Path( "file:///Users/jelser/projects/accumulo-proxy-yarn/target/accumulo-proxy-yarn-0.0.1-SNAPSHOT.jar"); Path jarPath = new Path("hdfs:///accumulo-proxy-yarn-0.0.1-SNAPSHOT.jar"); fs.copyFromLocalFile(false, true, localJarPath, jarPath); FileStatus jarStatus = fs.getFileStatus(jarPath); LocalResource amJarRsrc = Records.newRecord(LocalResource.class); // Set the type of resource - file or archive // archives are untarred at the destination by the framework amJarRsrc.setType(LocalResourceType.FILE); // Set visibility of the resource // Setting to most private option i.e. this file will only // be visible to this instance of the running application amJarRsrc.setVisibility(LocalResourceVisibility.APPLICATION); // Set the location of resource to be copied over into the // working directory amJarRsrc.setResource(ConverterUtils.getYarnUrlFromPath(jarPath)); // Set timestamp and length of file so that the framework // can do basic sanity checks for the local resource // after it has been copied over to ensure it is the same // resource the client intended to use with the application amJarRsrc.setTimestamp(jarStatus.getModificationTime()); amJarRsrc.setSize(jarStatus.getLen()); // The framework will create a symlink called AppMaster.jar in the // working directory that will be linked back to the actual file. // The ApplicationMaster, if needs to reference the jar file, would // need to use the symlink filename. localResources.put("AppMaster.jar", amJarRsrc); // Set the local resources into the launch context amContainer.setLocalResources(localResources); // Set up the environment needed for the launch context Map<String, String> env = new HashMap<String, String>(); // For example, we could setup the classpath needed. // Assuming our classes or jars are available as local resources in the // working directory from which the command will be run, we need to append // "." to the path. // By default, all the hadoop specific classpaths will already be available // in $CLASSPATH, so we should be careful not to overwrite it. String classPathEnv = "$CLASSPATH:./*:/Users/jelser/projects/accumulo-proxy-yarn/target/lib/*"; env.put("CLASSPATH", classPathEnv); amContainer.setEnvironment(env); // Construct the command to be executed on the launched container String command = "${JAVA_HOME}" + "/bin/java" + " proxyyarn.ProxyYarnAppMaster 1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout" + " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr"; List<String> commands = new ArrayList<String>(); commands.add(command); // add additional commands if needed // Set the command array into the container spec amContainer.setCommands(commands); // Define the resource requirements for the container // For now, YARN only supports memory so we set the memory // requirements. // If the process takes more than its allocated memory, it will // be killed by the framework. // Memory being requested for should be less than max capability // of the cluster and all asks should be a multiple of the min capability. Resource capability = Records.newRecord(Resource.class); capability.setMemory(256); appContext.setResource(capability); // Create the request to send to the ApplicationsManager // SubmitApplicationRequest appRequest = Records.newRecord(SubmitApplicationRequest.class); // appRequest.setApplicationSubmissionContext(appContext); // Submit the application to the ApplicationsManager // Ignore the response as either a valid response object is returned on // success or an exception thrown to denote the failure // applicationManager.submitApplication(appRequest); // Set the container launch content into the ApplicationSubmissionContext appContext.setAMContainerSpec(amContainer); // Set the priority for the application master Priority pri = Records.newRecord(Priority.class); // TODO - what is the range for priority? how to decide? pri.setPriority(0); appContext.setPriority(pri); // Set the queue to which this application is to be submitted in the RM appContext.setQueue("default"); // Submit the application to the applications manager // SubmitApplicationResponse submitResp = applicationsManager.submitApplication(appRequest); // Ignore the response as either a valid response object is returned on success // or an exception thrown to denote some form of a failure log.info("Submitting application to ASM"); yarnClient.submitApplication(appContext); return monitorApplication(yarnClient, appId); /* Thread.sleep(200); boolean running = false; while(true) { GetApplicationReportRequest reportRequest = Records.newRecord(GetApplicationReportRequest.class); reportRequest.setApplicationId(appId); GetApplicationReportResponse reportResponse = applicationManager.getApplicationReport(reportRequest); ApplicationReport report = reportResponse.getApplicationReport(); log.info(report.toString()); YarnApplicationState state = report.getYarnApplicationState(); switch (state) { case NEW: case NEW_SAVING: case SUBMITTED: case ACCEPTED: log.info("State: {}", state); break; case RUNNING: log.info("Running application"); running = true; break; case FINISHED: case FAILED: case KILLED: log.info("State: {}", state); return; default: log.info("Unknown state: {}", state); return; } if (!running) { Thread.sleep(1000); } }*/ }
From source file:uk.ac.gla.terrier.probos.controller.ControllerServer.java
License:Open Source License
public ControllerServer(Configuration _hconf) throws IOException { this.yConf = new YarnConfiguration(_hconf); yConf.addResource("yarn-site.xml"); UserGroupInformation.setConfiguration(yConf); this.pConf = new PConfiguration(_hconf); //do the Kerberos authentication if (UserGroupInformation.isSecurityEnabled()) { final String principal = pConf.get(PConfiguration.KEY_CONTROLLER_PRINCIPAL); String keytab = pConf.get(PConfiguration.KEY_CONTROLLER_KEYTAB); File fKeytab = new File(keytab); if (!fKeytab.exists()) { if (!fKeytab.isAbsolute()) { keytab = System.getProperty("probos.conf") + '/' + keytab; fKeytab = new File(keytab); pConf.set(PConfiguration.KEY_CONTROLLER_KEYTAB, keytab); }//from w w w .j a v a 2 s . com if (!fKeytab.exists()) throw new FileNotFoundException("Could not find keytab file " + keytab); } LOG.debug("Starting login for " + principal + " using keytab " + keytab); SecurityUtil.login(pConf, PConfiguration.KEY_CONTROLLER_KEYTAB, PConfiguration.KEY_CONTROLLER_PRINCIPAL, Utils.getHostname()); LOG.info("Switched principal to " + UserGroupInformation.getCurrentUser().getUserName()); } this.mClient = MailClient.getMailClient(this.pConf); final String bindAddress = pConf.get(PConfiguration.KEY_CONTROLLER_BIND_ADDRESS); if (bindAddress == null) throw new IllegalArgumentException(PConfiguration.KEY_CONTROLLER_BIND_ADDRESS + " cannot be null"); secretManager = new ControllerAPISecretManager( //delegationKeyUpdateInterval //renewal interval for delegation token 7 * 24 * 3600 * 1000, //Yarn default is 7 day //delegationTokenMaxLifetime -- maximum lifetime for which a delegation token is valid //i.e. how long can we keep renewing the token for? 14 * 24 * 3600 * 1000, //Yarn default is 14 days //delegationTokenRenewInterval -- how long should a token last? 7 * 24 * 3600 * 1000, //Yarn default is 7 day //delegationTokenRemoverScanInterval -- how often are expired keys removed? 3600 * 1000); //Yarn default is 1 hour //build the client rpc server: 8027 int port = pConf.getInt(PConfiguration.KEY_CONTROLLER_PORT, 8027); LOG.info("Starting RPC server for " + PBSClient.class.getSimpleName() + " on port " + port); clientRpcserver = new RPC.Builder(yConf).setInstance(this).setBindAddress(bindAddress) .setProtocol(PBSClient.class).setPort(port).setSecretManager(secretManager). //setVerbose(true). build(); System.setProperty("hadoop.policy.file", Constants.PRODUCT_NAME + "-policy.xml"); clientRpcserver.refreshServiceAclWithLoadedConfiguration(yConf, new ControllerPolicyProvider()); //build the master rpc server: 8028 port = Constants.CONTROLLER_MASTER_PORT_OFFSET + pConf.getInt(PConfiguration.KEY_CONTROLLER_PORT, 8027); LOG.info("Starting RPC server for " + PBSMasterClient.class.getSimpleName() + " on port " + port); masterRpcserver = new RPC.Builder(yConf).setInstance(new ApplicationMasterAPI()).setBindAddress(bindAddress) .setProtocol(PBSMasterClient.class).setPort(port).setSecretManager(secretManager). //setVerbose(true). build(); masterRpcserver.refreshServiceAclWithLoadedConfiguration(yConf, new ControllerPolicyProvider()); port = Constants.CONTROLLER_INTERACTIVE_PORT_OFFSET + pConf.getInt(PConfiguration.KEY_CONTROLLER_PORT, 8027); LOG.info("Starting RPC server for " + PBSInteractiveClient.class.getSimpleName() + " on port " + port); //build the interactive rpc server: 8026 interactiveRpcserver = new RPC.Builder(yConf).setInstance(new InteractiveTaskAPI()) .setBindAddress(bindAddress).setProtocol(PBSInteractiveClient.class).setPort(port) .setSecretManager(secretManager). //setVerbose(true). build(); interactiveRpcserver.refreshServiceAclWithLoadedConfiguration(yConf, new ControllerPolicyProvider()); //build the webapp UI server final List<Entry<String, HttpServlet>> controllerServlets = new ArrayList<>(); controllerServlets .add(new MapEntry<String, HttpServlet>("/", new QstatServlet("/", controllerServlets, this))); controllerServlets.add( new MapEntry<String, HttpServlet>("/pbsnodes", new PbsnodesServlet("/", controllerServlets, this))); //metrics is the Servlet from metrics.dropwizard for accessing metrics controllerServlets.add(new MapEntry<String, HttpServlet>("/metrics", new MetricsServlet(metrics))); //this is the hadoop servlet for accessing anything defined in JMX controllerServlets.add(new MapEntry<String, HttpServlet>("/jmx", new JMXJsonServlet())); final int httpport = pConf.getInt(PConfiguration.KEY_CONTROLLER_HTTP_PORT, Constants.DEFAULT_CONTROLLER_PORT + Constants.CONTROLLER_HTTP_PORT_OFFSET); LOG.info("Starting Jetty ProbosControllerHttp on port " + httpport); webServer = new WebServer("ProbosControllerHttp", controllerServlets, httpport); webServer.init(pConf); //this thread detects yarn jobs that have ended watcherThread = new Thread(new ControllerWatcher()); watcherThread.setName(ControllerWatcher.class.getSimpleName()); //ensure we have the directory Path _probosFolder = new Path(pConf.get(PConfiguration.KEY_CONTROLLER_JOBDIR)); FileSystem controllerFS = FileSystem.get(yConf); if (!_probosFolder.isUriPathAbsolute()) { _probosFolder = _probosFolder.makeQualified(controllerFS.getUri(), controllerFS.getWorkingDirectory()); assert _probosFolder.isUriPathAbsolute(); } probosFolder = _probosFolder; if (!controllerFS.exists(probosFolder)) { throw new IllegalArgumentException(probosFolder.toString() + " does not exist"); } //now initialise the metrics //jobs.queued.size metrics.register(MetricRegistry.name(ControllerServer.class, "jobs", "queued.size"), new Gauge<Integer>() { @Override public Integer getValue() { int sum = 0; for (int i : user2QueuedCount.values()) sum += i; return sum; } }); //jobs.size metrics.register(MetricRegistry.name(ControllerServer.class, "jobs", "size"), new Gauge<Integer>() { @Override public Integer getValue() { return jobArray.size(); } }); //jobs.held.size metrics.register(MetricRegistry.name(ControllerServer.class, "jobs", "held.size"), new Gauge<Integer>() { @Override public Integer getValue() { return jobHolds.size(); } }); //nodes.size metrics.register(MetricRegistry.name(ControllerServer.class, "nodes", "size"), new Gauge<Integer>() { @Override public Integer getValue() { try { return getNodesStatus().length; } catch (Exception e) { return 0; } } }); //nodes.free.size metrics.register(MetricRegistry.name(ControllerServer.class, "nodes", "free.size"), new Gauge<Integer>() { @Override public Integer getValue() { try { PBSNodeStatus[] nodes = getNodesStatus(); int count = 0; for (PBSNodeStatus n : nodes) if ("free".equals(n.getState())) count++; return count; } catch (Exception e) { return 0; } } }); runningJobs = metrics.counter(MetricRegistry.name(ControllerServer.class, "jobs", "running.counter")); rejectedJobs = metrics.counter(MetricRegistry.name(ControllerServer.class, "jobs", "rejected.counter")); killedJobs = metrics.counter(MetricRegistry.name(ControllerServer.class, "jobs", "killed.counter")); mailEvents = metrics.counter(MetricRegistry.name(ControllerServer.class, "mails", "counter")); mailFailures = metrics.counter(MetricRegistry.name(ControllerServer.class, "mails", "failure.counter")); }
From source file:yarn.montecarlo.MonteCarloSimulationDemo.java
License:Apache License
/** * @param args/*from w ww . j a v a2 s . c om*/ */ public static void main(String[] args) { int containerCount = prepare(args); YarnApplication<DataProcessor> yarnApplication = YarnAssembly .forApplicationContainer(MonteCarloSimulationContainer.class).containerCount(containerCount) .memory(128).withApplicationMaster(new YarnConfiguration(new Configuration())).memory(128) .build("MonteCarloSimulation"); yarnApplication.registerReplyListener(new ResultsPrinter()); DataProcessor processor = yarnApplication.launch(); System.out.println("\n=== STARTING SIMULATION ===\n"); long start = System.currentTimeMillis(); for (int sigma = 5; sigma < 20; sigma++) { for (int avReturn = 8; avReturn < 16; avReturn++) { for (int anualInv = 5000; anualInv < 6000; anualInv += 100) { ByteBuffer inputBuffer = ByteBuffer.allocate(6 * 4); inputBuffer.putInt(sigma); inputBuffer.putInt(avReturn); inputBuffer.putInt(anualInv); inputBuffer.putInt(30); // cycle inputBuffer.putInt(100000); // initial investment inputBuffer.putInt(100000); // simulations inputBuffer.flip(); processor.process(inputBuffer); } } } long stop = System.currentTimeMillis(); yarnApplication.shutDown(); System.out.println("Completed in " + (stop - start) + " milliseconds"); cleanup(); }
From source file:yarnkit.appmaster.ApplicationMasterService.java
License:Apache License
public ApplicationMasterService(@CheckForNull ApplicationMasterParameters parameters) { super();/* ww w.j a va2 s . c o m*/ this.parameters = Preconditions.checkNotNull(parameters); this.yarnConf = new YarnConfiguration(parameters.getConfiguration()); }
From source file:yarnkit.client.YarnClientService.java
License:Apache License
public YarnClientService(@Nonnull YarnClientParameters parameters, @Nonnull Configuration conf, @Nonnull Stopwatch stopwatch) {// w w w.j av a2s. c o m this.parameters = Preconditions.checkNotNull(parameters); this.conf = new YarnConfiguration(conf); this.stopwatch = stopwatch; }