List of usage examples for org.apache.hadoop.fs FileSystem close
@Override public void close() throws IOException
From source file:org.apache.coheigea.bigdata.hdfs.HDFSTest.java
License:Apache License
@org.junit.Test public void testDirectoryPermissions() throws Exception { FileSystem fileSystem = hdfsCluster.getFileSystem(); // Write a file final Path file = new Path("/tmp/tmpdir/data-file4"); FSDataOutputStream out = fileSystem.create(file); for (int i = 0; i < 1024; ++i) { out.write(("data" + i + "\n").getBytes("UTF-8")); out.flush();//www .j a v a2 s . com } out.close(); // Try to read the directory as "bob" - this should be allowed UserGroupInformation ugi = UserGroupInformation.createRemoteUser("bob"); ugi.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { Configuration conf = new Configuration(); conf.set("fs.defaultFS", defaultFs); FileSystem fs = FileSystem.get(conf); RemoteIterator<LocatedFileStatus> iter = fs.listFiles(file.getParent(), false); Assert.assertTrue(iter.hasNext()); fs.close(); return null; } }); // Change permissions so that the directory can't be read by "other" fileSystem.setPermission(file.getParent(), new FsPermission(FsAction.ALL, FsAction.READ, FsAction.NONE)); // Try to read the base directory as the file owner RemoteIterator<LocatedFileStatus> iter = fileSystem.listFiles(file.getParent(), false); Assert.assertTrue(iter.hasNext()); // Now try to read the directory as "bob" again - this should fail ugi.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { Configuration conf = new Configuration(); conf.set("fs.defaultFS", defaultFs); FileSystem fs = FileSystem.get(conf); try { RemoteIterator<LocatedFileStatus> iter = fs.listFiles(file.getParent(), false); Assert.assertTrue(iter.hasNext()); Assert.fail("Failure expected on an incorrect permission"); } catch (AccessControlException ex) { // expected } fs.close(); return null; } }); }
From source file:org.apache.coheigea.bigdata.hdfs.ranger.HDFSRangerTest.java
License:Apache License
@org.junit.Test public void readTest() throws Exception { FileSystem fileSystem = hdfsCluster.getFileSystem(); // Write a file - the AccessControlEnforcer won't be invoked as we are the "superuser" final Path file = new Path("/tmp/tmpdir/data-file2"); FSDataOutputStream out = fileSystem.create(file); for (int i = 0; i < 1024; ++i) { out.write(("data" + i + "\n").getBytes("UTF-8")); out.flush();/*from w w w . ja v a 2 s. c o m*/ } out.close(); // Change permissions to read-only fileSystem.setPermission(file, new FsPermission(FsAction.READ, FsAction.NONE, FsAction.NONE)); // Now try to read the file as "bob" - this should be allowed (by the policy - user) UserGroupInformation ugi = UserGroupInformation.createUserForTesting("bob", new String[] {}); ugi.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { Configuration conf = new Configuration(); conf.set("fs.defaultFS", defaultFs); FileSystem fs = FileSystem.get(conf); // Read the file FSDataInputStream in = fs.open(file); ByteArrayOutputStream output = new ByteArrayOutputStream(); IOUtils.copy(in, output); String content = new String(output.toByteArray()); Assert.assertTrue(content.startsWith("data0")); fs.close(); return null; } }); // Now try to read the file as "alice" - this should be allowed (by the policy - group) ugi = UserGroupInformation.createUserForTesting("alice", new String[] { "IT" }); ugi.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { Configuration conf = new Configuration(); conf.set("fs.defaultFS", defaultFs); FileSystem fs = FileSystem.get(conf); // Read the file FSDataInputStream in = fs.open(file); ByteArrayOutputStream output = new ByteArrayOutputStream(); IOUtils.copy(in, output); String content = new String(output.toByteArray()); Assert.assertTrue(content.startsWith("data0")); fs.close(); return null; } }); // Now try to read the file as unknown user "eve" - this should not be allowed ugi = UserGroupInformation.createUserForTesting("eve", new String[] {}); ugi.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { Configuration conf = new Configuration(); conf.set("fs.defaultFS", defaultFs); FileSystem fs = FileSystem.get(conf); // Read the file try { fs.open(file); Assert.fail("Failure expected on an incorrect permission"); } catch (RemoteException ex) { // expected Assert.assertTrue(RangerAccessControlException.class.getName().equals(ex.getClassName())); } fs.close(); return null; } }); // Now try to read the file as known user "dave" - this should not be allowed, as he doesn't have the correct permissions ugi = UserGroupInformation.createUserForTesting("dave", new String[] {}); ugi.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { Configuration conf = new Configuration(); conf.set("fs.defaultFS", defaultFs); FileSystem fs = FileSystem.get(conf); // Read the file try { fs.open(file); Assert.fail("Failure expected on an incorrect permission"); } catch (RemoteException ex) { // expected Assert.assertTrue(RangerAccessControlException.class.getName().equals(ex.getClassName())); } fs.close(); return null; } }); }
From source file:org.apache.coheigea.bigdata.hdfs.ranger.HDFSRangerTest.java
License:Apache License
@org.junit.Test public void writeTest() throws Exception { FileSystem fileSystem = hdfsCluster.getFileSystem(); // Write a file - the AccessControlEnforcer won't be invoked as we are the "superuser" final Path file = new Path("/tmp/tmpdir2/data-file3"); FSDataOutputStream out = fileSystem.create(file); for (int i = 0; i < 1024; ++i) { out.write(("data" + i + "\n").getBytes("UTF-8")); out.flush();//from ww w .ja v a2 s. c o m } out.close(); // Now try to write to the file as "bob" - this should be allowed (by the policy - user) UserGroupInformation ugi = UserGroupInformation.createUserForTesting("bob", new String[] {}); ugi.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { Configuration conf = new Configuration(); conf.set("fs.defaultFS", defaultFs); FileSystem fs = FileSystem.get(conf); // Write to the file fs.append(file); fs.close(); return null; } }); // Now try to write to the file as "alice" - this should be allowed (by the policy - group) ugi = UserGroupInformation.createUserForTesting("alice", new String[] { "IT" }); ugi.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { Configuration conf = new Configuration(); conf.set("fs.defaultFS", defaultFs); FileSystem fs = FileSystem.get(conf); // Write to the file fs.append(file); fs.close(); return null; } }); // Now try to read the file as unknown user "eve" - this should not be allowed ugi = UserGroupInformation.createUserForTesting("eve", new String[] {}); ugi.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { Configuration conf = new Configuration(); conf.set("fs.defaultFS", defaultFs); FileSystem fs = FileSystem.get(conf); // Write to the file try { fs.append(file); Assert.fail("Failure expected on an incorrect permission"); } catch (RemoteException ex) { // expected Assert.assertTrue(RangerAccessControlException.class.getName().equals(ex.getClassName())); } fs.close(); return null; } }); // Now try to read the file as known user "dave" - this should not be allowed, as he doesn't have the correct permissions ugi = UserGroupInformation.createUserForTesting("dave", new String[] {}); ugi.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { Configuration conf = new Configuration(); conf.set("fs.defaultFS", defaultFs); FileSystem fs = FileSystem.get(conf); // Write to the file try { fs.append(file); Assert.fail("Failure expected on an incorrect permission"); } catch (RemoteException ex) { // expected Assert.assertTrue(RangerAccessControlException.class.getName().equals(ex.getClassName())); } fs.close(); return null; } }); }
From source file:org.apache.coheigea.bigdata.hdfs.ranger.HDFSRangerTest.java
License:Apache License
@org.junit.Test public void executeTest() throws Exception { FileSystem fileSystem = hdfsCluster.getFileSystem(); // Write a file - the AccessControlEnforcer won't be invoked as we are the "superuser" final Path file = new Path("/tmp/tmpdir3/data-file2"); FSDataOutputStream out = fileSystem.create(file); for (int i = 0; i < 1024; ++i) { out.write(("data" + i + "\n").getBytes("UTF-8")); out.flush();// w ww . jav a 2 s . c o m } out.close(); // Change permissions to read-only fileSystem.setPermission(file, new FsPermission(FsAction.READ, FsAction.NONE, FsAction.NONE)); // Change the parent directory permissions to be execute only for the owner Path parentDir = new Path("/tmp/tmpdir3"); fileSystem.setPermission(parentDir, new FsPermission(FsAction.EXECUTE, FsAction.NONE, FsAction.NONE)); // Try to read the directory as "bob" - this should be allowed (by the policy - user) UserGroupInformation ugi = UserGroupInformation.createUserForTesting("bob", new String[] {}); ugi.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { Configuration conf = new Configuration(); conf.set("fs.defaultFS", defaultFs); FileSystem fs = FileSystem.get(conf); RemoteIterator<LocatedFileStatus> iter = fs.listFiles(file.getParent(), false); Assert.assertTrue(iter.hasNext()); fs.close(); return null; } }); // Try to read the directory as "alice" - this should be allowed (by the policy - group) ugi = UserGroupInformation.createUserForTesting("alice", new String[] { "IT" }); ugi.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { Configuration conf = new Configuration(); conf.set("fs.defaultFS", defaultFs); FileSystem fs = FileSystem.get(conf); RemoteIterator<LocatedFileStatus> iter = fs.listFiles(file.getParent(), false); Assert.assertTrue(iter.hasNext()); fs.close(); return null; } }); // Now try to read the directory as unknown user "eve" - this should not be allowed ugi = UserGroupInformation.createUserForTesting("eve", new String[] {}); ugi.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { Configuration conf = new Configuration(); conf.set("fs.defaultFS", defaultFs); FileSystem fs = FileSystem.get(conf); // Write to the file try { RemoteIterator<LocatedFileStatus> iter = fs.listFiles(file.getParent(), false); Assert.assertTrue(iter.hasNext()); Assert.fail("Failure expected on an incorrect permission"); } catch (RemoteException ex) { // expected Assert.assertTrue(RangerAccessControlException.class.getName().equals(ex.getClassName())); } fs.close(); return null; } }); // Now try to read the directory as known user "dave" - this should not be allowed, as he doesn't have the correct permissions ugi = UserGroupInformation.createUserForTesting("dave", new String[] {}); ugi.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { Configuration conf = new Configuration(); conf.set("fs.defaultFS", defaultFs); FileSystem fs = FileSystem.get(conf); // Write to the file try { RemoteIterator<LocatedFileStatus> iter = fs.listFiles(file.getParent(), false); Assert.assertTrue(iter.hasNext()); Assert.fail("Failure expected on an incorrect permission"); } catch (RemoteException ex) { // expected Assert.assertTrue(RangerAccessControlException.class.getName().equals(ex.getClassName())); } fs.close(); return null; } }); }
From source file:org.apache.coheigea.bigdata.hdfs.ranger.HDFSRangerTest.java
License:Apache License
@org.junit.Test public void readTestUsingTagPolicy() throws Exception { FileSystem fileSystem = hdfsCluster.getFileSystem(); // Write a file - the AccessControlEnforcer won't be invoked as we are the "superuser" final Path file = new Path("/tmp/tmpdir6/data-file2"); FSDataOutputStream out = fileSystem.create(file); for (int i = 0; i < 1024; ++i) { out.write(("data" + i + "\n").getBytes("UTF-8")); out.flush();/*from w w w . j ava 2 s . c o m*/ } out.close(); // Change permissions to read-only fileSystem.setPermission(file, new FsPermission(FsAction.READ, FsAction.NONE, FsAction.NONE)); // Now try to read the file as "bob" - this should be allowed (by the policy - user) UserGroupInformation ugi = UserGroupInformation.createUserForTesting("bob", new String[] {}); ugi.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { Configuration conf = new Configuration(); conf.set("fs.defaultFS", defaultFs); FileSystem fs = FileSystem.get(conf); // Read the file FSDataInputStream in = fs.open(file); ByteArrayOutputStream output = new ByteArrayOutputStream(); IOUtils.copy(in, output); String content = new String(output.toByteArray()); Assert.assertTrue(content.startsWith("data0")); fs.close(); return null; } }); // Now try to read the file as "alice" - this should be allowed (by the policy - group) ugi = UserGroupInformation.createUserForTesting("alice", new String[] { "IT" }); ugi.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { Configuration conf = new Configuration(); conf.set("fs.defaultFS", defaultFs); FileSystem fs = FileSystem.get(conf); // Read the file FSDataInputStream in = fs.open(file); ByteArrayOutputStream output = new ByteArrayOutputStream(); IOUtils.copy(in, output); String content = new String(output.toByteArray()); Assert.assertTrue(content.startsWith("data0")); fs.close(); return null; } }); // Now try to read the file as unknown user "eve" - this should not be allowed ugi = UserGroupInformation.createUserForTesting("eve", new String[] {}); ugi.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { Configuration conf = new Configuration(); conf.set("fs.defaultFS", defaultFs); FileSystem fs = FileSystem.get(conf); // Read the file try { fs.open(file); Assert.fail("Failure expected on an incorrect permission"); } catch (RemoteException ex) { // expected Assert.assertTrue(RangerAccessControlException.class.getName().equals(ex.getClassName())); } fs.close(); return null; } }); // Now try to read the file as known user "dave" - this should not be allowed, as he doesn't have the correct permissions ugi = UserGroupInformation.createUserForTesting("dave", new String[] {}); ugi.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { Configuration conf = new Configuration(); conf.set("fs.defaultFS", defaultFs); FileSystem fs = FileSystem.get(conf); // Read the file try { fs.open(file); Assert.fail("Failure expected on an incorrect permission"); } catch (RemoteException ex) { // expected Assert.assertTrue(RangerAccessControlException.class.getName().equals(ex.getClassName())); } fs.close(); return null; } }); }
From source file:org.apache.eagle.service.security.hdfs.HDFSFileSystem.java
License:Apache License
/** * Brows the Files for the specific Path * * @param filePath//from www . jav a2s . c o m * @return listOfFiles * @throws Exception */ public List<FileStatus> browse(String filePath) throws Exception { FileSystem hdfsFileSystem = null; FileStatus[] listStatus; try { hdfsFileSystem = getFileSystem(); Path path = new Path(filePath); listStatus = hdfsFileSystem.listStatus(path); } catch (Exception ex) { LOG.error(" Exception when browsing files for the path " + filePath, ex.getMessage()); throw new Exception(" Exception When browsing Files in HDFS .. Message : " + ex.getMessage()); } finally { //Close the file system if (hdfsFileSystem != null) hdfsFileSystem.close(); } return Arrays.asList(listStatus); }
From source file:org.apache.falcon.entity.DatasourceHelper.java
License:Apache License
/** * fetch the password from file./* ww w . ja va 2s. c o m*/ * * @param passwordFilePath * @return * @throws FalconException */ private static String fetchPasswordInfoFromFile(String passwordFilePath) throws FalconException { try { Path path = new Path(passwordFilePath); FileSystem fs = HadoopClientFactory.get().createProxiedFileSystem(path.toUri()); if (!fs.exists(path)) { throw new IOException("The password file does not exist! "); } if (!fs.isFile(path)) { throw new IOException("The password file cannot be a directory! "); } InputStream is = fs.open(path); StringWriter writer = new StringWriter(); try { IOUtils.copy(is, writer); return writer.toString(); } finally { IOUtils.closeQuietly(is); IOUtils.closeQuietly(writer); fs.close(); } } catch (IOException ioe) { LOG.error("Error reading password file from HDFS : " + ioe); throw new FalconException(ioe); } }
From source file:org.apache.flink.yarn.AbstractYarnClusterDescriptor.java
License:Apache License
public ApplicationReport startAppMaster(JobGraph jobGraph, YarnClient yarnClient, YarnClientApplication yarnApplication) throws Exception { // ------------------ Set default file system scheme ------------------------- try {/*ww w.java2 s .com*/ org.apache.flink.core.fs.FileSystem.setDefaultScheme(flinkConfiguration); } catch (IOException e) { throw new IOException("Error while setting the default " + "filesystem scheme from configuration.", e); } // initialize file system // Copy the application master jar to the filesystem // Create a local resource to point to the destination jar path final FileSystem fs = FileSystem.get(conf); // hard coded check for the GoogleHDFS client because its not overriding the getScheme() method. if (!fs.getClass().getSimpleName().equals("GoogleHadoopFileSystem") && fs.getScheme().startsWith("file")) { LOG.warn("The file system scheme is '" + fs.getScheme() + "'. This indicates that the " + "specified Hadoop configuration path is wrong and the system is using the default Hadoop configuration values." + "The Flink YARN client needs to store its files in a distributed file system"); } ApplicationSubmissionContext appContext = yarnApplication.getApplicationSubmissionContext(); Set<File> effectiveShipFiles = new HashSet<>(shipFiles.size()); for (File file : shipFiles) { effectiveShipFiles.add(file.getAbsoluteFile()); } //check if there is a logback or log4j file File logbackFile = new File(configurationDirectory + File.separator + CONFIG_FILE_LOGBACK_NAME); final boolean hasLogback = logbackFile.exists(); if (hasLogback) { effectiveShipFiles.add(logbackFile); } File log4jFile = new File(configurationDirectory + File.separator + CONFIG_FILE_LOG4J_NAME); final boolean hasLog4j = log4jFile.exists(); if (hasLog4j) { effectiveShipFiles.add(log4jFile); if (hasLogback) { // this means there is already a logback configuration file --> fail LOG.warn("The configuration directory ('" + configurationDirectory + "') contains both LOG4J and " + "Logback configuration files. Please delete or rename one of them."); } } addLibFolderToShipFiles(effectiveShipFiles); // add the user jar to the classpath of the to-be-created cluster if (userJarFiles != null) { effectiveShipFiles.addAll(userJarFiles); } // Set-up ApplicationSubmissionContext for the application final ApplicationId appId = appContext.getApplicationId(); // ------------------ Add Zookeeper namespace to local flinkConfiguraton ------ String zkNamespace = getZookeeperNamespace(); // no user specified cli argument for namespace? if (zkNamespace == null || zkNamespace.isEmpty()) { // namespace defined in config? else use applicationId as default. zkNamespace = flinkConfiguration.getString(HighAvailabilityOptions.HA_CLUSTER_ID, String.valueOf(appId)); setZookeeperNamespace(zkNamespace); } flinkConfiguration.setString(HighAvailabilityOptions.HA_CLUSTER_ID, zkNamespace); if (HighAvailabilityMode.isHighAvailabilityModeActivated(flinkConfiguration)) { // activate re-execution of failed applications appContext.setMaxAppAttempts(flinkConfiguration.getInteger(ConfigConstants.YARN_APPLICATION_ATTEMPTS, YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS)); activateHighAvailabilitySupport(appContext); } else { // set number of application retries to 1 in the default case appContext .setMaxAppAttempts(flinkConfiguration.getInteger(ConfigConstants.YARN_APPLICATION_ATTEMPTS, 1)); } // local resource map for Yarn final Map<String, LocalResource> localResources = new HashMap<>(2 + effectiveShipFiles.size()); // list of remote paths (after upload) final List<Path> paths = new ArrayList<>(2 + effectiveShipFiles.size()); // classpath assembler final StringBuilder classPathBuilder = new StringBuilder(); // ship list that enables reuse of resources for task manager containers StringBuilder envShipFileList = new StringBuilder(); // upload and register ship files for (File shipFile : effectiveShipFiles) { LocalResource shipResources = Records.newRecord(LocalResource.class); Path shipLocalPath = new Path("file://" + shipFile.getAbsolutePath()); Path remotePath = Utils.setupLocalResource(fs, appId.toString(), shipLocalPath, shipResources, fs.getHomeDirectory()); paths.add(remotePath); localResources.put(shipFile.getName(), shipResources); if (shipFile.isDirectory()) { // add directories to the classpath java.nio.file.Path shipPath = shipFile.toPath(); final java.nio.file.Path parentPath = shipPath.getParent(); Files.walkFileTree(shipPath, new SimpleFileVisitor<java.nio.file.Path>() { @Override public FileVisitResult preVisitDirectory(java.nio.file.Path dir, BasicFileAttributes attrs) throws IOException { super.preVisitDirectory(dir, attrs); java.nio.file.Path relativePath = parentPath.relativize(dir); classPathBuilder.append(relativePath).append(File.separator).append("*") .append(File.pathSeparator); return FileVisitResult.CONTINUE; } }); } else { // add files to the classpath classPathBuilder.append(shipFile.getName()).append(File.pathSeparator); } envShipFileList.append(remotePath).append(","); } // Setup jar for ApplicationMaster LocalResource appMasterJar = Records.newRecord(LocalResource.class); LocalResource flinkConf = Records.newRecord(LocalResource.class); Path remotePathJar = Utils.setupLocalResource(fs, appId.toString(), flinkJarPath, appMasterJar, fs.getHomeDirectory()); Path remotePathConf = Utils.setupLocalResource(fs, appId.toString(), flinkConfigurationPath, flinkConf, fs.getHomeDirectory()); localResources.put("flink.jar", appMasterJar); localResources.put("flink-conf.yaml", flinkConf); paths.add(remotePathJar); classPathBuilder.append("flink.jar").append(File.pathSeparator); paths.add(remotePathConf); classPathBuilder.append("flink-conf.yaml").append(File.pathSeparator); // write job graph to tmp file and add it to local resource // TODO: server use user main method to generate job graph if (jobGraph != null) { try { File fp = File.createTempFile(appId.toString(), null); fp.deleteOnExit(); try (FileOutputStream output = new FileOutputStream(fp); ObjectOutputStream obOutput = new ObjectOutputStream(output);) { obOutput.writeObject(jobGraph); } LocalResource jobgraph = Records.newRecord(LocalResource.class); Path remoteJobGraph = Utils.setupLocalResource(fs, appId.toString(), new Path(fp.toURI()), jobgraph, fs.getHomeDirectory()); localResources.put("job.graph", jobgraph); paths.add(remoteJobGraph); classPathBuilder.append("job.graph").append(File.pathSeparator); } catch (Exception e) { LOG.warn("Add job graph to local resource fail"); throw e; } } sessionFilesDir = new Path(fs.getHomeDirectory(), ".flink/" + appId.toString() + "/"); FsPermission permission = new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE); fs.setPermission(sessionFilesDir, permission); // set permission for path. //To support Yarn Secure Integration Test Scenario //In Integration test setup, the Yarn containers created by YarnMiniCluster does not have the Yarn site XML //and KRB5 configuration files. We are adding these files as container local resources for the container //applications (JM/TMs) to have proper secure cluster setup Path remoteKrb5Path = null; Path remoteYarnSiteXmlPath = null; boolean hasKrb5 = false; if (System.getenv("IN_TESTS") != null) { String krb5Config = System.getProperty("java.security.krb5.conf"); if (krb5Config != null && krb5Config.length() != 0) { File krb5 = new File(krb5Config); LOG.info("Adding KRB5 configuration {} to the AM container local resource bucket", krb5.getAbsolutePath()); LocalResource krb5ConfResource = Records.newRecord(LocalResource.class); Path krb5ConfPath = new Path(krb5.getAbsolutePath()); remoteKrb5Path = Utils.setupLocalResource(fs, appId.toString(), krb5ConfPath, krb5ConfResource, fs.getHomeDirectory()); localResources.put(Utils.KRB5_FILE_NAME, krb5ConfResource); File f = new File(System.getenv("YARN_CONF_DIR"), Utils.YARN_SITE_FILE_NAME); LOG.info("Adding Yarn configuration {} to the AM container local resource bucket", f.getAbsolutePath()); LocalResource yarnConfResource = Records.newRecord(LocalResource.class); Path yarnSitePath = new Path(f.getAbsolutePath()); remoteYarnSiteXmlPath = Utils.setupLocalResource(fs, appId.toString(), yarnSitePath, yarnConfResource, fs.getHomeDirectory()); localResources.put(Utils.YARN_SITE_FILE_NAME, yarnConfResource); hasKrb5 = true; } } // setup security tokens LocalResource keytabResource = null; Path remotePathKeytab = null; String keytab = flinkConfiguration.getString(SecurityOptions.KERBEROS_LOGIN_KEYTAB); if (keytab != null) { LOG.info("Adding keytab {} to the AM container local resource bucket", keytab); keytabResource = Records.newRecord(LocalResource.class); Path keytabPath = new Path(keytab); remotePathKeytab = Utils.setupLocalResource(fs, appId.toString(), keytabPath, keytabResource, fs.getHomeDirectory()); localResources.put(Utils.KEYTAB_FILE_NAME, keytabResource); } final ContainerLaunchContext amContainer = setupApplicationMasterContainer(hasLogback, hasLog4j, hasKrb5); if (UserGroupInformation.isSecurityEnabled() && keytab == null) { //set tokens only when keytab is not provided LOG.info("Adding delegation token to the AM container.."); Utils.setTokensFor(amContainer, paths, conf); } amContainer.setLocalResources(localResources); fs.close(); // Setup CLASSPATH and environment variables for ApplicationMaster final Map<String, String> appMasterEnv = new HashMap<>(); // set user specified app master environment variables appMasterEnv.putAll(Utils.getEnvironmentVariables(ConfigConstants.YARN_APPLICATION_MASTER_ENV_PREFIX, flinkConfiguration)); // set Flink app class path appMasterEnv.put(YarnConfigKeys.ENV_FLINK_CLASSPATH, classPathBuilder.toString()); // set Flink on YARN internal configuration values appMasterEnv.put(YarnConfigKeys.ENV_TM_COUNT, String.valueOf(taskManagerCount)); appMasterEnv.put(YarnConfigKeys.ENV_TM_MEMORY, String.valueOf(taskManagerMemoryMb)); appMasterEnv.put(YarnConfigKeys.FLINK_JAR_PATH, remotePathJar.toString()); appMasterEnv.put(YarnConfigKeys.ENV_APP_ID, appId.toString()); appMasterEnv.put(YarnConfigKeys.ENV_CLIENT_HOME_DIR, fs.getHomeDirectory().toString()); appMasterEnv.put(YarnConfigKeys.ENV_CLIENT_SHIP_FILES, envShipFileList.toString()); appMasterEnv.put(YarnConfigKeys.ENV_SLOTS, String.valueOf(slots)); appMasterEnv.put(YarnConfigKeys.ENV_DETACHED, String.valueOf(detached)); appMasterEnv.put(YarnConfigKeys.ENV_ZOOKEEPER_NAMESPACE, getZookeeperNamespace()); // https://github.com/apache/hadoop/blob/trunk/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnApplicationSecurity.md#identity-on-an-insecure-cluster-hadoop_user_name appMasterEnv.put(YarnConfigKeys.ENV_HADOOP_USER_NAME, UserGroupInformation.getCurrentUser().getUserName()); if (keytabResource != null) { appMasterEnv.put(YarnConfigKeys.KEYTAB_PATH, remotePathKeytab.toString()); String principal = flinkConfiguration.getString(SecurityOptions.KERBEROS_LOGIN_PRINCIPAL); appMasterEnv.put(YarnConfigKeys.KEYTAB_PRINCIPAL, principal); } //To support Yarn Secure Integration Test Scenario if (remoteYarnSiteXmlPath != null && remoteKrb5Path != null) { appMasterEnv.put(YarnConfigKeys.ENV_YARN_SITE_XML_PATH, remoteYarnSiteXmlPath.toString()); appMasterEnv.put(YarnConfigKeys.ENV_KRB5_PATH, remoteKrb5Path.toString()); } if (dynamicPropertiesEncoded != null) { appMasterEnv.put(YarnConfigKeys.ENV_DYNAMIC_PROPERTIES, dynamicPropertiesEncoded); } // set classpath from YARN configuration Utils.setupYarnClassPath(conf, appMasterEnv); amContainer.setEnvironment(appMasterEnv); // Set up resource type requirements for ApplicationMaster Resource capability = Records.newRecord(Resource.class); capability.setMemory(jobManagerMemoryMb); capability.setVirtualCores(1); String name; if (customName == null) { name = "Flink session with " + taskManagerCount + " TaskManagers"; if (detached) { name += " (detached)"; } } else { name = customName; } appContext.setApplicationName(name); appContext.setApplicationType("Apache Flink"); appContext.setAMContainerSpec(amContainer); appContext.setResource(capability); if (yarnQueue != null) { appContext.setQueue(yarnQueue); } setApplicationTags(appContext); // add a hook to clean up in case deployment fails Thread deploymentFailureHook = new DeploymentFailureHook(yarnClient, yarnApplication); Runtime.getRuntime().addShutdownHook(deploymentFailureHook); LOG.info("Submitting application master " + appId); yarnClient.submitApplication(appContext); LOG.info("Waiting for the cluster to be allocated"); final long startTime = System.currentTimeMillis(); ApplicationReport report; YarnApplicationState lastAppState = YarnApplicationState.NEW; loop: while (true) { try { report = yarnClient.getApplicationReport(appId); } catch (IOException e) { throw new YarnDeploymentException("Failed to deploy the cluster.", e); } YarnApplicationState appState = report.getYarnApplicationState(); LOG.debug("Application State: {}", appState); switch (appState) { case FAILED: case FINISHED: //TODO: the finished state may be valid in flip-6 case KILLED: throw new YarnDeploymentException("The YARN application unexpectedly switched to state " + appState + " during deployment. \n" + "Diagnostics from YARN: " + report.getDiagnostics() + "\n" + "If log aggregation is enabled on your cluster, use this command to further investigate the issue:\n" + "yarn logs -applicationId " + appId); //break .. case RUNNING: LOG.info("YARN application has been deployed successfully."); break loop; default: if (appState != lastAppState) { LOG.info("Deploying cluster, current state " + appState); } if (System.currentTimeMillis() - startTime > 60000) { LOG.info( "Deployment took more than 60 seconds. Please check if the requested resources are available in the YARN cluster"); } } lastAppState = appState; Thread.sleep(250); } // print the application id for user to cancel themselves. if (isDetachedMode()) { LOG.info("The Flink YARN client has been started in detached mode. In order to stop " + "Flink on YARN, use the following command or a YARN web interface to stop " + "it:\nyarn application -kill " + appId + "\nPlease also note that the " + "temporary files of the YARN session in the home directoy will not be removed."); } // since deployment was successful, remove the hook try { Runtime.getRuntime().removeShutdownHook(deploymentFailureHook); } catch (IllegalStateException e) { // we're already in the shut down hook. } return report; }
From source file:org.apache.flink.yarn.Client.java
License:Apache License
public void run(String[] args) throws Exception { if (UserGroupInformation.isSecurityEnabled()) { throw new RuntimeException("Flink YARN client does not have security support right now." + "File a bug, we will fix it asap"); }/*from w w w .j a v a 2 s .c o m*/ //Utils.logFilesInCurrentDirectory(LOG); // // Command Line Options // Options options = new Options(); options.addOption(VERBOSE); options.addOption(FLINK_CONF_DIR); options.addOption(FLINK_JAR); options.addOption(JM_MEMORY); options.addOption(TM_MEMORY); options.addOption(TM_CORES); options.addOption(CONTAINER); options.addOption(GEN_CONF); options.addOption(QUEUE); options.addOption(QUERY); options.addOption(SHIP_PATH); CommandLineParser parser = new PosixParser(); CommandLine cmd = null; try { cmd = parser.parse(options, args); } catch (MissingOptionException moe) { System.out.println(moe.getMessage()); printUsage(); System.exit(1); } if (System.getProperty("log4j.configuration") == null) { Logger root = Logger.getRootLogger(); root.removeAllAppenders(); PatternLayout layout = new PatternLayout("%d{HH:mm:ss,SSS} %-5p %-60c %x - %m%n"); ConsoleAppender appender = new ConsoleAppender(layout, "System.err"); root.addAppender(appender); if (cmd.hasOption(VERBOSE.getOpt())) { root.setLevel(Level.DEBUG); LOG.debug("CLASSPATH: " + System.getProperty("java.class.path")); } else { root.setLevel(Level.INFO); } } // Jar Path Path localJarPath; if (cmd.hasOption(FLINK_JAR.getOpt())) { String userPath = cmd.getOptionValue(FLINK_JAR.getOpt()); if (!userPath.startsWith("file://")) { userPath = "file://" + userPath; } localJarPath = new Path(userPath); } else { localJarPath = new Path( "file://" + Client.class.getProtectionDomain().getCodeSource().getLocation().getPath()); } if (cmd.hasOption(GEN_CONF.getOpt())) { LOG.info("Placing default configuration in current directory"); File outFile = generateDefaultConf(localJarPath); LOG.info("File written to " + outFile.getAbsolutePath()); System.exit(0); } // Conf Path Path confPath = null; String confDirPath = ""; if (cmd.hasOption(FLINK_CONF_DIR.getOpt())) { confDirPath = cmd.getOptionValue(FLINK_CONF_DIR.getOpt()) + "/"; File confFile = new File(confDirPath + CONFIG_FILE_NAME); if (!confFile.exists()) { LOG.fatal("Unable to locate configuration file in " + confFile); System.exit(1); } confPath = new Path(confFile.getAbsolutePath()); } else { System.out.println("No configuration file has been specified"); // no configuration path given. // -> see if there is one in the current directory File currDir = new File("."); File[] candidates = currDir.listFiles(new FilenameFilter() { @Override public boolean accept(final File dir, final String name) { return name != null && name.endsWith(".yaml"); } }); if (candidates == null || candidates.length == 0) { System.out.println( "No configuration file has been found in current directory.\n" + "Copying default."); File outFile = generateDefaultConf(localJarPath); confPath = new Path(outFile.toURI()); } else { if (candidates.length > 1) { System.out.println("Multiple .yaml configuration files were found in the current directory\n" + "Please specify one explicitly"); System.exit(1); } else if (candidates.length == 1) { confPath = new Path(candidates[0].toURI()); } } } List<File> shipFiles = new ArrayList<File>(); // path to directory to ship if (cmd.hasOption(SHIP_PATH.getOpt())) { String shipPath = cmd.getOptionValue(SHIP_PATH.getOpt()); File shipDir = new File(shipPath); if (shipDir.isDirectory()) { shipFiles = new ArrayList<File>(Arrays.asList(shipDir.listFiles(new FilenameFilter() { @Override public boolean accept(File dir, String name) { return !(name.equals(".") || name.equals("..")); } }))); } else { LOG.warn("Ship directory is not a directory!"); } } boolean hasLog4j = false; //check if there is a log4j file if (confDirPath.length() > 0) { File l4j = new File(confDirPath + "/log4j.properties"); if (l4j.exists()) { shipFiles.add(l4j); hasLog4j = true; } } // queue String queue = "default"; if (cmd.hasOption(QUEUE.getOpt())) { queue = cmd.getOptionValue(QUEUE.getOpt()); } // JobManager Memory int jmMemory = 512; if (cmd.hasOption(JM_MEMORY.getOpt())) { jmMemory = Integer.valueOf(cmd.getOptionValue(JM_MEMORY.getOpt())); } // Task Managers memory int tmMemory = 1024; if (cmd.hasOption(TM_MEMORY.getOpt())) { tmMemory = Integer.valueOf(cmd.getOptionValue(TM_MEMORY.getOpt())); } // Task Managers vcores int tmCores = 1; if (cmd.hasOption(TM_CORES.getOpt())) { tmCores = Integer.valueOf(cmd.getOptionValue(TM_CORES.getOpt())); } Utils.getFlinkConfiguration(confPath.toUri().getPath()); int jmPort = GlobalConfiguration.getInteger(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY, 0); if (jmPort == 0) { LOG.warn("Unable to find job manager port in configuration!"); jmPort = ConfigConstants.DEFAULT_JOB_MANAGER_IPC_PORT; } conf = Utils.initializeYarnConfiguration(); // intialize HDFS LOG.info("Copy App Master jar from local filesystem and add to local environment"); // Copy the application master jar to the filesystem // Create a local resource to point to the destination jar path final FileSystem fs = FileSystem.get(conf); if (fs.getScheme().startsWith("file")) { LOG.warn("The file system scheme is '" + fs.getScheme() + "'. This indicates that the " + "specified Hadoop configuration path is wrong and the sytem is using the default Hadoop configuration values." + "The Flink YARN client needs to store its files in a distributed file system"); } // Create yarnClient final YarnClient yarnClient = YarnClient.createYarnClient(); yarnClient.init(conf); yarnClient.start(); // Query cluster for metrics if (cmd.hasOption(QUERY.getOpt())) { showClusterMetrics(yarnClient); } if (!cmd.hasOption(CONTAINER.getOpt())) { LOG.fatal("Missing required argument " + CONTAINER.getOpt()); printUsage(); yarnClient.stop(); System.exit(1); } // TM Count final int taskManagerCount = Integer.valueOf(cmd.getOptionValue(CONTAINER.getOpt())); System.out.println("Using values:"); System.out.println("\tContainer Count = " + taskManagerCount); System.out.println("\tJar Path = " + localJarPath.toUri().getPath()); System.out.println("\tConfiguration file = " + confPath.toUri().getPath()); System.out.println("\tJobManager memory = " + jmMemory); System.out.println("\tTaskManager memory = " + tmMemory); System.out.println("\tTaskManager cores = " + tmCores); // Create application via yarnClient YarnClientApplication app = yarnClient.createApplication(); GetNewApplicationResponse appResponse = app.getNewApplicationResponse(); Resource maxRes = appResponse.getMaximumResourceCapability(); if (tmMemory > maxRes.getMemory() || tmCores > maxRes.getVirtualCores()) { LOG.fatal("The cluster does not have the requested resources for the TaskManagers available!\n" + "Maximum Memory: " + maxRes.getMemory() + ", Maximum Cores: " + tmCores); yarnClient.stop(); System.exit(1); } if (jmMemory > maxRes.getMemory()) { LOG.fatal("The cluster does not have the requested resources for the JobManager available!\n" + "Maximum Memory: " + maxRes.getMemory()); yarnClient.stop(); System.exit(1); } int totalMemoryRequired = jmMemory + tmMemory * taskManagerCount; ClusterResourceDescription freeClusterMem = getCurrentFreeClusterResources(yarnClient); if (freeClusterMem.totalFreeMemory < totalMemoryRequired) { LOG.fatal("This YARN session requires " + totalMemoryRequired + "MB of memory in the cluster. " + "There are currently only " + freeClusterMem.totalFreeMemory + "MB available."); yarnClient.stop(); System.exit(1); } if (tmMemory > freeClusterMem.containerLimit) { LOG.fatal("The requested amount of memory for the TaskManagers (" + tmMemory + "MB) is more than " + "the largest possible YARN container: " + freeClusterMem.containerLimit); yarnClient.stop(); System.exit(1); } if (jmMemory > freeClusterMem.containerLimit) { LOG.fatal("The requested amount of memory for the JobManager (" + jmMemory + "MB) is more than " + "the largest possible YARN container: " + freeClusterMem.containerLimit); yarnClient.stop(); System.exit(1); } // respect custom JVM options in the YAML file final String javaOpts = GlobalConfiguration.getString(ConfigConstants.FLINK_JVM_OPTIONS, ""); // Set up the container launch context for the application master ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class); String amCommand = "$JAVA_HOME/bin/java" + " -Xmx" + Utils.calculateHeapSize(jmMemory) + "M " + javaOpts; if (hasLog4j) { amCommand += " -Dlog.file=\"" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager-log4j.log\" -Dlog4j.configuration=file:log4j.properties"; } amCommand += " org.apache.flink.yarn.ApplicationMaster" + " " + " 1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager-stdout.log" + " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager-stderr.log"; amContainer.setCommands(Collections.singletonList(amCommand)); System.err.println("amCommand=" + amCommand); // Set-up ApplicationSubmissionContext for the application ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext(); final ApplicationId appId = appContext.getApplicationId(); // Setup jar for ApplicationMaster LocalResource appMasterJar = Records.newRecord(LocalResource.class); LocalResource flinkConf = Records.newRecord(LocalResource.class); Path remotePathJar = Utils.setupLocalResource(conf, fs, appId.toString(), localJarPath, appMasterJar, fs.getHomeDirectory()); Path remotePathConf = Utils.setupLocalResource(conf, fs, appId.toString(), confPath, flinkConf, fs.getHomeDirectory()); Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(2); localResources.put("flink.jar", appMasterJar); localResources.put("flink-conf.yaml", flinkConf); // setup security tokens (code from apache storm) final Path[] paths = new Path[3 + shipFiles.size()]; StringBuffer envShipFileList = new StringBuffer(); // upload ship files for (int i = 0; i < shipFiles.size(); i++) { File shipFile = shipFiles.get(i); LocalResource shipResources = Records.newRecord(LocalResource.class); Path shipLocalPath = new Path("file://" + shipFile.getAbsolutePath()); paths[3 + i] = Utils.setupLocalResource(conf, fs, appId.toString(), shipLocalPath, shipResources, fs.getHomeDirectory()); localResources.put(shipFile.getName(), shipResources); envShipFileList.append(paths[3 + i]); if (i + 1 < shipFiles.size()) { envShipFileList.append(','); } } paths[0] = remotePathJar; paths[1] = remotePathConf; paths[2] = new Path(fs.getHomeDirectory(), ".flink/" + appId.toString() + "/"); FsPermission permission = new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL); fs.setPermission(paths[2], permission); // set permission for path. Utils.setTokensFor(amContainer, paths, this.conf); amContainer.setLocalResources(localResources); fs.close(); // Setup CLASSPATH for ApplicationMaster Map<String, String> appMasterEnv = new HashMap<String, String>(); Utils.setupEnv(conf, appMasterEnv); // set configuration values appMasterEnv.put(Client.ENV_TM_COUNT, String.valueOf(taskManagerCount)); appMasterEnv.put(Client.ENV_TM_CORES, String.valueOf(tmCores)); appMasterEnv.put(Client.ENV_TM_MEMORY, String.valueOf(tmMemory)); appMasterEnv.put(Client.FLINK_JAR_PATH, remotePathJar.toString()); appMasterEnv.put(Client.ENV_APP_ID, appId.toString()); appMasterEnv.put(Client.ENV_CLIENT_HOME_DIR, fs.getHomeDirectory().toString()); appMasterEnv.put(Client.ENV_CLIENT_SHIP_FILES, envShipFileList.toString()); appMasterEnv.put(Client.ENV_CLIENT_USERNAME, UserGroupInformation.getCurrentUser().getShortUserName()); amContainer.setEnvironment(appMasterEnv); // Set up resource type requirements for ApplicationMaster Resource capability = Records.newRecord(Resource.class); capability.setMemory(jmMemory); capability.setVirtualCores(1); appContext.setApplicationName("Flink"); // application name appContext.setAMContainerSpec(amContainer); appContext.setResource(capability); appContext.setQueue(queue); // file that we write into the conf/ dir containing the jobManager address. final File addrFile = new File(confDirPath + CliFrontend.JOBMANAGER_ADDRESS_FILE); Runtime.getRuntime().addShutdownHook(new Thread() { @Override public void run() { try { LOG.info("Killing the Flink-YARN application."); yarnClient.killApplication(appId); LOG.info("Deleting files in " + paths[2]); FileSystem shutFS = FileSystem.get(conf); shutFS.delete(paths[2], true); // delete conf and jar file. shutFS.close(); } catch (Exception e) { LOG.warn("Exception while killing the YARN application", e); } try { addrFile.delete(); } catch (Exception e) { LOG.warn("Exception while deleting the jobmanager address file", e); } LOG.info("YARN Client is shutting down"); yarnClient.stop(); } }); LOG.info("Submitting application master " + appId); yarnClient.submitApplication(appContext); ApplicationReport appReport = yarnClient.getApplicationReport(appId); YarnApplicationState appState = appReport.getYarnApplicationState(); boolean told = false; char[] el = { '/', '|', '\\', '-' }; int i = 0; while (appState != YarnApplicationState.FINISHED && appState != YarnApplicationState.KILLED && appState != YarnApplicationState.FAILED) { if (!told && appState == YarnApplicationState.RUNNING) { System.err.println("Flink JobManager is now running on " + appReport.getHost() + ":" + jmPort); System.err.println("JobManager Web Interface: " + appReport.getTrackingUrl()); // write jobmanager connect information PrintWriter out = new PrintWriter(addrFile); out.println(appReport.getHost() + ":" + jmPort); out.close(); addrFile.setReadable(true, false); // readable for all. told = true; } if (!told) { System.err.print(el[i++] + "\r"); if (i == el.length) { i = 0; } Thread.sleep(500); // wait for the application to switch to RUNNING } else { Thread.sleep(5000); } appReport = yarnClient.getApplicationReport(appId); appState = appReport.getYarnApplicationState(); } LOG.info("Application " + appId + " finished with" + " state " + appState + " at " + appReport.getFinishTime()); if (appState == YarnApplicationState.FAILED || appState == YarnApplicationState.KILLED) { LOG.warn("Application failed. Diagnostics " + appReport.getDiagnostics()); } }
From source file:org.apache.flink.yarn.FlinkYarnClient.java
License:Apache License
/** * This method will block until the ApplicationMaster/JobManager have been * deployed on YARN./* w w w. j a v a 2 s . c o m*/ */ protected AbstractFlinkYarnCluster deployInternal() throws Exception { isReadyForDepoyment(); LOG.info("Using values:"); LOG.info("\tTaskManager count = {}", taskManagerCount); LOG.info("\tJobManager memory = {}", jobManagerMemoryMb); LOG.info("\tTaskManager memory = {}", taskManagerMemoryMb); // Create application via yarnClient yarnApplication = yarnClient.createApplication(); GetNewApplicationResponse appResponse = yarnApplication.getNewApplicationResponse(); // ------------------ Add dynamic properties to local flinkConfiguraton ------ List<Tuple2<String, String>> dynProperties = CliFrontend.getDynamicProperties(dynamicPropertiesEncoded); for (Tuple2<String, String> dynProperty : dynProperties) { flinkConfiguration.setString(dynProperty.f0, dynProperty.f1); } // ------------------ Check if the specified queue exists -------------- try { List<QueueInfo> queues = yarnClient.getAllQueues(); if (queues.size() > 0 && this.yarnQueue != null) { // check only if there are queues configured in yarn and for this session. boolean queueFound = false; for (QueueInfo queue : queues) { if (queue.getQueueName().equals(this.yarnQueue)) { queueFound = true; break; } } if (!queueFound) { String queueNames = ""; for (QueueInfo queue : queues) { queueNames += queue.getQueueName() + ", "; } LOG.warn("The specified queue '" + this.yarnQueue + "' does not exist. " + "Available queues: " + queueNames); } } else { LOG.debug("The YARN cluster does not have any queues configured"); } } catch (Throwable e) { LOG.warn("Error while getting queue information from YARN: " + e.getMessage()); if (LOG.isDebugEnabled()) { LOG.debug("Error details", e); } } // ------------------ Check if the YARN Cluster has the requested resources -------------- // the yarnMinAllocationMB specifies the smallest possible container allocation size. // all allocations below this value are automatically set to this value. final int yarnMinAllocationMB = conf.getInt("yarn.scheduler.minimum-allocation-mb", 0); if (jobManagerMemoryMb < yarnMinAllocationMB || taskManagerMemoryMb < yarnMinAllocationMB) { LOG.warn("The JobManager or TaskManager memory is below the smallest possible YARN Container size. " + "The value of 'yarn.scheduler.minimum-allocation-mb' is '" + yarnMinAllocationMB + "'. Please increase the memory size." + "YARN will allocate the smaller containers but the scheduler will account for the minimum-allocation-mb, maybe not all instances " + "you requested will start."); } // set the memory to minAllocationMB to do the next checks correctly if (jobManagerMemoryMb < yarnMinAllocationMB) { jobManagerMemoryMb = yarnMinAllocationMB; } if (taskManagerMemoryMb < yarnMinAllocationMB) { taskManagerMemoryMb = yarnMinAllocationMB; } Resource maxRes = appResponse.getMaximumResourceCapability(); final String NOTE = "Please check the 'yarn.scheduler.maximum-allocation-mb' and the 'yarn.nodemanager.resource.memory-mb' configuration values\n"; if (jobManagerMemoryMb > maxRes.getMemory()) { failSessionDuringDeployment(); throw new YarnDeploymentException( "The cluster does not have the requested resources for the JobManager available!\n" + "Maximum Memory: " + maxRes.getMemory() + "MB Requested: " + jobManagerMemoryMb + "MB. " + NOTE); } if (taskManagerMemoryMb > maxRes.getMemory()) { failSessionDuringDeployment(); throw new YarnDeploymentException( "The cluster does not have the requested resources for the TaskManagers available!\n" + "Maximum Memory: " + maxRes.getMemory() + " Requested: " + taskManagerMemoryMb + "MB. " + NOTE); } final String NOTE_RSC = "\nThe Flink YARN client will try to allocate the YARN session, but maybe not all TaskManagers are " + "connecting from the beginning because the resources are currently not available in the cluster. " + "The allocation might take more time than usual because the Flink YARN client needs to wait until " + "the resources become available."; int totalMemoryRequired = jobManagerMemoryMb + taskManagerMemoryMb * taskManagerCount; ClusterResourceDescription freeClusterMem = getCurrentFreeClusterResources(yarnClient); if (freeClusterMem.totalFreeMemory < totalMemoryRequired) { LOG.warn("This YARN session requires " + totalMemoryRequired + "MB of memory in the cluster. " + "There are currently only " + freeClusterMem.totalFreeMemory + "MB available." + NOTE_RSC); } if (taskManagerMemoryMb > freeClusterMem.containerLimit) { LOG.warn("The requested amount of memory for the TaskManagers (" + taskManagerMemoryMb + "MB) is more than " + "the largest possible YARN container: " + freeClusterMem.containerLimit + NOTE_RSC); } if (jobManagerMemoryMb > freeClusterMem.containerLimit) { LOG.warn( "The requested amount of memory for the JobManager (" + jobManagerMemoryMb + "MB) is more than " + "the largest possible YARN container: " + freeClusterMem.containerLimit + NOTE_RSC); } // ----------------- check if the requested containers fit into the cluster. int[] nmFree = Arrays.copyOf(freeClusterMem.nodeManagersFree, freeClusterMem.nodeManagersFree.length); // first, allocate the jobManager somewhere. if (!allocateResource(nmFree, jobManagerMemoryMb)) { LOG.warn("Unable to find a NodeManager that can fit the JobManager/Application master. " + "The JobManager requires " + jobManagerMemoryMb + "MB. NodeManagers available: " + Arrays.toString(freeClusterMem.nodeManagersFree) + NOTE_RSC); } // allocate TaskManagers for (int i = 0; i < taskManagerCount; i++) { if (!allocateResource(nmFree, taskManagerMemoryMb)) { LOG.warn("There is not enough memory available in the YARN cluster. " + "The TaskManager(s) require " + taskManagerMemoryMb + "MB each. " + "NodeManagers available: " + Arrays.toString(freeClusterMem.nodeManagersFree) + "\n" + "After allocating the JobManager (" + jobManagerMemoryMb + "MB) and (" + i + "/" + taskManagerCount + ") TaskManagers, " + "the following NodeManagers are available: " + Arrays.toString(nmFree) + NOTE_RSC); } } // ------------------ Prepare Application Master Container ------------------------------ // respect custom JVM options in the YAML file final String javaOpts = flinkConfiguration.getString(ConfigConstants.FLINK_JVM_OPTIONS, ""); String logbackFile = configurationDirectory + File.separator + FlinkYarnSessionCli.CONFIG_FILE_LOGBACK_NAME; boolean hasLogback = new File(logbackFile).exists(); String log4jFile = configurationDirectory + File.separator + FlinkYarnSessionCli.CONFIG_FILE_LOG4J_NAME; boolean hasLog4j = new File(log4jFile).exists(); if (hasLogback) { shipFiles.add(new File(logbackFile)); } if (hasLog4j) { shipFiles.add(new File(log4jFile)); } // Set up the container launch context for the application master ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class); String amCommand = "$JAVA_HOME/bin/java" + " -Xmx" + Utils.calculateHeapSize(jobManagerMemoryMb, flinkConfiguration) + "M " + javaOpts; if (hasLogback || hasLog4j) { amCommand += " -Dlog.file=\"" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager-main.log\""; } if (hasLogback) { amCommand += " -Dlogback.configurationFile=file:" + FlinkYarnSessionCli.CONFIG_FILE_LOGBACK_NAME; } if (hasLog4j) { amCommand += " -Dlog4j.configuration=file:" + FlinkYarnSessionCli.CONFIG_FILE_LOG4J_NAME; } amCommand += " " + ApplicationMaster.class.getName() + " " + " 1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager-stdout.log" + " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager-stderr.log"; amContainer.setCommands(Collections.singletonList(amCommand)); LOG.debug("Application Master start command: " + amCommand); // intialize HDFS // Copy the application master jar to the filesystem // Create a local resource to point to the destination jar path final FileSystem fs = FileSystem.get(conf); // hard coded check for the GoogleHDFS client because its not overriding the getScheme() method. if (!fs.getClass().getSimpleName().equals("GoogleHadoopFileSystem") && fs.getScheme().startsWith("file")) { LOG.warn("The file system scheme is '" + fs.getScheme() + "'. This indicates that the " + "specified Hadoop configuration path is wrong and the sytem is using the default Hadoop configuration values." + "The Flink YARN client needs to store its files in a distributed file system"); } // Set-up ApplicationSubmissionContext for the application ApplicationSubmissionContext appContext = yarnApplication.getApplicationSubmissionContext(); appContext.setMaxAppAttempts(flinkConfiguration.getInteger(ConfigConstants.YARN_APPLICATION_ATTEMPTS, 1)); final ApplicationId appId = appContext.getApplicationId(); // Setup jar for ApplicationMaster LocalResource appMasterJar = Records.newRecord(LocalResource.class); LocalResource flinkConf = Records.newRecord(LocalResource.class); Path remotePathJar = Utils.setupLocalResource(conf, fs, appId.toString(), flinkJarPath, appMasterJar, fs.getHomeDirectory()); Path remotePathConf = Utils.setupLocalResource(conf, fs, appId.toString(), flinkConfigurationPath, flinkConf, fs.getHomeDirectory()); Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(2); localResources.put("flink.jar", appMasterJar); localResources.put("flink-conf.yaml", flinkConf); // setup security tokens (code from apache storm) final Path[] paths = new Path[2 + shipFiles.size()]; StringBuilder envShipFileList = new StringBuilder(); // upload ship files for (int i = 0; i < shipFiles.size(); i++) { File shipFile = shipFiles.get(i); LocalResource shipResources = Records.newRecord(LocalResource.class); Path shipLocalPath = new Path("file://" + shipFile.getAbsolutePath()); paths[2 + i] = Utils.setupLocalResource(conf, fs, appId.toString(), shipLocalPath, shipResources, fs.getHomeDirectory()); localResources.put(shipFile.getName(), shipResources); envShipFileList.append(paths[2 + i]); if (i + 1 < shipFiles.size()) { envShipFileList.append(','); } } paths[0] = remotePathJar; paths[1] = remotePathConf; sessionFilesDir = new Path(fs.getHomeDirectory(), ".flink/" + appId.toString() + "/"); FsPermission permission = new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE); fs.setPermission(sessionFilesDir, permission); // set permission for path. Utils.setTokensFor(amContainer, paths, conf); amContainer.setLocalResources(localResources); fs.close(); // Setup CLASSPATH for ApplicationMaster Map<String, String> appMasterEnv = new HashMap<String, String>(); Utils.setupEnv(conf, appMasterEnv); // set configuration values appMasterEnv.put(FlinkYarnClient.ENV_TM_COUNT, String.valueOf(taskManagerCount)); appMasterEnv.put(FlinkYarnClient.ENV_TM_MEMORY, String.valueOf(taskManagerMemoryMb)); appMasterEnv.put(FlinkYarnClient.FLINK_JAR_PATH, remotePathJar.toString()); appMasterEnv.put(FlinkYarnClient.ENV_APP_ID, appId.toString()); appMasterEnv.put(FlinkYarnClient.ENV_CLIENT_HOME_DIR, fs.getHomeDirectory().toString()); appMasterEnv.put(FlinkYarnClient.ENV_CLIENT_SHIP_FILES, envShipFileList.toString()); appMasterEnv.put(FlinkYarnClient.ENV_CLIENT_USERNAME, UserGroupInformation.getCurrentUser().getShortUserName()); appMasterEnv.put(FlinkYarnClient.ENV_SLOTS, String.valueOf(slots)); appMasterEnv.put(FlinkYarnClient.ENV_DETACHED, String.valueOf(detached)); appMasterEnv.put(FlinkYarnClient.ENV_STREAMING_MODE, String.valueOf(streamingMode)); if (dynamicPropertiesEncoded != null) { appMasterEnv.put(FlinkYarnClient.ENV_DYNAMIC_PROPERTIES, dynamicPropertiesEncoded); } amContainer.setEnvironment(appMasterEnv); // Set up resource type requirements for ApplicationMaster Resource capability = Records.newRecord(Resource.class); capability.setMemory(jobManagerMemoryMb); capability.setVirtualCores(1); String name; if (customName == null) { name = "Flink session with " + taskManagerCount + " TaskManagers"; if (detached) { name += " (detached)"; } } else { name = customName; } appContext.setApplicationName(name); // application name appContext.setApplicationType("Apache Flink"); appContext.setAMContainerSpec(amContainer); appContext.setResource(capability); if (yarnQueue != null) { appContext.setQueue(yarnQueue); } LOG.info("Submitting application master " + appId); yarnClient.submitApplication(appContext); LOG.info("Waiting for the cluster to be allocated"); int waittime = 0; loop: while (true) { ApplicationReport report = yarnClient.getApplicationReport(appId); YarnApplicationState appState = report.getYarnApplicationState(); switch (appState) { case FAILED: case FINISHED: case KILLED: throw new YarnDeploymentException("The YARN application unexpectedly switched to state " + appState + " during deployment. \n" + "Diagnostics from YARN: " + report.getDiagnostics() + "\n" + "If log aggregation is enabled on your cluster, use this command to further invesitage the issue:\n" + "yarn logs -applicationId " + appId); //break .. case RUNNING: LOG.info("YARN application has been deployed successfully."); break loop; default: LOG.info("Deploying cluster, current state " + appState); if (waittime > 60000) { LOG.info( "Deployment took more than 60 seconds. Please check if the requested resources are available in the YARN cluster"); } } waittime += 1000; Thread.sleep(1000); } // the Flink cluster is deployed in YARN. Represent cluster return new FlinkYarnCluster(yarnClient, appId, conf, flinkConfiguration, sessionFilesDir, detached); }