List of usage examples for org.apache.hadoop.yarn.conf YarnConfiguration YarnConfiguration
public YarnConfiguration(Configuration conf)
From source file:com.datatorrent.stram.InlineAM.java
License:Apache License
/** *//*from ww w. j a va 2 s . co m*/ public InlineAM(Configuration conf) throws Exception { appName = "UnmanagedAM"; amPriority = 0; amQueue = "default"; YarnConfiguration yarnConf = new YarnConfiguration(conf); rmClient = new YarnClientImpl(); rmClient.init(yarnConf); }
From source file:com.hortonworks.minicluster.MiniHadoopCluster.java
License:Apache License
/** * * @param clusterName//ww w . j a v a 2 s . c o m * @param numNodeManagers */ public MiniHadoopCluster(String clusterName, int numNodeManagers) { super(clusterName); this.testWorkDir = new File("target/MINI_YARN_CLUSTER"); this.resourceManager = new UnsecureResourceManager(); this.numLocalDirs = 1; this.numLogDirs = 1; this.nodeManagers = new NodeManager[numNodeManagers]; this.configuration = new YarnConfiguration(new Configuration()); this.configuration.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class); this.configuration.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, "target/MINI_DFS_CLUSTER/data"); try { FileUtils.deleteDirectory(MiniHadoopCluster.this.testWorkDir); } catch (Exception e) { logger.warn("Failed to remove 'target' directory", e); } }
From source file:com.hortonworks.minicluster.MiniHadoopCluster.java
License:Apache License
/** */*from w w w . jav a2 s.c om*/ */ @Override public void serviceInit(Configuration conf) throws Exception { conf.setBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, true); conf.setStrings(YarnConfiguration.NM_AUX_SERVICES, new String[] { ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID }); conf.setClass( String.format(YarnConfiguration.NM_AUX_SERVICE_FMT, ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID), ShuffleHandler.class, Service.class); conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY, 0); this.addService(new ResourceManagerWrapper()); for (int index = 0; index < this.nodeManagers.length; index++) { this.nodeManagers[index] = new ShortCircuitedNodeManager(); this.addService(new NodeManagerWrapper(index)); } super.serviceInit(conf instanceof YarnConfiguration ? conf : new YarnConfiguration(conf)); }
From source file:com.intropro.prairie.unit.yarn.YarnUnit.java
License:Apache License
@Override protected YarnConfiguration gatherConfigs() { YarnConfiguration yarnConfigs = new YarnConfiguration(super.gatherConfigs()); yarnConfigs.set("fs.defaultFS", hdfsUnit.getNamenode()); yarnConfigs.set("mapreduce.task.tmp.dir", getTmpDir().toString()); String user = System.getProperty("user.name"); yarnConfigs.set("hadoop.proxyuser." + user + ".hosts", "*"); yarnConfigs.set("hadoop.proxyuser." + user + ".groups", "*"); yarnConfigs.set("yarn.nodemanager.admin-env", "PATH=$PATH:" + cmdUnit.getPath()); yarnConfigs.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class); yarnConfigs.addResource("mapred-site.prairie.xml"); yarnConfigs.addResource("yarn-site.prairie.xml"); return yarnConfigs; }
From source file:disAMS.AMRMClient.Impl.AMRMClientImpl.java
License:Apache License
@Override protected void serviceStart() throws Exception { final YarnConfiguration conf = new YarnConfiguration(getConfig()); try {/*w w w . j a v a 2 s . c o m*/ rmClient = ClientRMProxy.createRMProxy(conf, ApplicationMasterProtocol.class); } catch (IOException e) { throw new YarnRuntimeException(e); } super.serviceStart(); }
From source file:edu.uci.ics.hyracks.yarn.am.HyracksYarnApplicationMaster.java
License:Apache License
private void run() throws Exception { Configuration conf = new Configuration(); config = new YarnConfiguration(conf); amrmc = new AMRMConnection(config); performRegistration();/* w ww . j a va2s .co m*/ setupHeartbeats(); parseManifest(); setupAsks(); while (true) { Thread.sleep(1000); } }
From source file:edu.uci.ics.hyracks.yarn.client.KillHyracksApplication.java
License:Apache License
private void run() throws Exception { Configuration conf = new Configuration(); YarnConfiguration yconf = new YarnConfiguration(conf); YarnClientRMConnection crmc = new YarnClientRMConnection(yconf); crmc.killApplication(options.appId); }
From source file:edu.uci.ics.hyracks.yarn.client.LaunchHyracksApplication.java
License:Apache License
private void run() throws Exception { Configuration conf = new Configuration(); YarnConfiguration yconf = new YarnConfiguration(conf); YarnClientRMConnection crmc = new YarnClientRMConnection(yconf); YarnApplication app = crmc.createApplication(options.appName); ContainerLaunchContext clCtx = app.getContainerLaunchContext(); Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); File amZipFile = new File(System.getProperty("basedir") + "/hyracks-yarn-am/hyracks-yarn-am.zip"); localResources.put("archive", LocalResourceHelper.createArchiveResource(conf, amZipFile)); clCtx.setLocalResources(localResources); String command = "./archive/bin/hyracks-yarn-am 1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout" + " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr"; List<String> commands = new ArrayList<String>(); commands.add(command);/* w ww .ja v a 2 s . c o m*/ clCtx.setCommands(commands); clCtx.setResource(ResourceHelper.createMemoryCapability(options.amMemory)); app.submit(); }
From source file:org.apache.ambari.view.slider.SliderAppsViewControllerImpl.java
License:Apache License
/** * Dynamically determines Slider client configuration. If unable to determine, * <code>null</code> is returned. * /*from ww w.j av a 2s . c om*/ * @return */ private Configuration getSliderClientConfiguration() { HdfsConfiguration hdfsConfig = new HdfsConfiguration(); YarnConfiguration yarnConfig = new YarnConfiguration(hdfsConfig); Map<String, String> hadoopConfigs = getHadoopConfigs(); for (Entry<String, String> entry : hadoopConfigs.entrySet()) { String entryValue = entry.getValue(); if (entryValue == null) { entryValue = ""; } yarnConfig.set(entry.getKey(), entryValue); } yarnConfig.set(PROPERTY_SLIDER_SECURITY_ENABLED, hadoopConfigs.get("security_enabled")); if (hadoopConfigs.containsKey(PROPERTY_SLIDER_ZK_QUORUM)) { yarnConfig.set(PROPERTY_SLIDER_ZK_QUORUM, hadoopConfigs.get(PROPERTY_SLIDER_ZK_QUORUM)); } return yarnConfig; }
From source file:org.apache.giraph.yarn.GiraphApplicationMaster.java
License:Apache License
/** * Construct the GiraphAppMaster, populate fields using env vars * set up by YARN framework in this execution container. * @param cId the ContainerId//from ww w . ja va 2s . c o m * @param aId the ApplicationAttemptId */ protected GiraphApplicationMaster(ContainerId cId, ApplicationAttemptId aId) throws IOException { containerId = cId; // future good stuff will need me to operate. appAttemptId = aId; lastResponseId = new AtomicInteger(0); giraphConf = new ImmutableClassesGiraphConfiguration(new GiraphConfiguration()); yarnConf = new YarnConfiguration(giraphConf); completedCount = new AtomicInteger(0); failedCount = new AtomicInteger(0); allocatedCount = new AtomicInteger(0); successfulCount = new AtomicInteger(0); containersToLaunch = giraphConf.getMaxWorkers() + 1; executor = Executors.newFixedThreadPool(containersToLaunch); heapPerContainer = giraphConf.getYarnTaskHeapMb(); LOG.info("GiraphAM for ContainerId " + cId + " ApplicationAttemptId " + aId); }