Example usage for org.apache.hadoop.fs FileSystem close

List of usage examples for org.apache.hadoop.fs FileSystem close

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem close.

Prototype

@Override
public void close() throws IOException 

Source Link

Document

Close this FileSystem instance.

Usage

From source file:com.datatorrent.stram.cli.DTCli.java

License:Apache License

@SuppressWarnings("unused")
private StramAppLauncher getStramAppLauncher(String jarfileUri, Configuration config, boolean ignorePom)
        throws Exception {
    URI uri = new URI(jarfileUri);
    String scheme = uri.getScheme();
    StramAppLauncher appLauncher = null;
    if (scheme == null || scheme.equals("file")) {
        File jf = new File(uri.getPath());
        appLauncher = new StramAppLauncher(jf, config);
    } else {//from w ww . jav a2s .co  m
        FileSystem tmpFs = FileSystem.newInstance(uri, conf);
        try {
            Path path = new Path(uri.getPath());
            appLauncher = new StramAppLauncher(tmpFs, path, config);
        } finally {
            tmpFs.close();
        }
    }
    if (appLauncher != null) {
        if (verboseLevel > 0) {
            System.err.print(appLauncher.getMvnBuildClasspathOutput());
        }
        return appLauncher;
    } else {
        throw new CliException("Scheme " + scheme + " not supported.");
    }
}

From source file:com.datatorrent.stram.cli.DTCli.java

License:Apache License

private File copyToLocal(String[] files) throws IOException {
    File tmpDir = new File("/tmp/datatorrent/" + ManagementFactory.getRuntimeMXBean().getName());
    tmpDir.mkdirs();/*from   w w w  .j a  v  a 2  s.c  o  m*/
    for (int i = 0; i < files.length; i++) {
        try {
            URI uri = new URI(files[i]);
            String scheme = uri.getScheme();
            if (scheme == null || scheme.equals("file")) {
                files[i] = uri.getPath();
            } else {
                FileSystem tmpFs = FileSystem.newInstance(uri, conf);
                try {
                    Path srcPath = new Path(uri.getPath());
                    Path dstPath = new Path(tmpDir.getAbsolutePath(), String.valueOf(i) + srcPath.getName());
                    tmpFs.copyToLocalFile(srcPath, dstPath);
                    files[i] = dstPath.toUri().getPath();
                } finally {
                    tmpFs.close();
                }
            }
        } catch (URISyntaxException ex) {
            throw new RuntimeException(ex);
        }
    }

    return tmpDir;
}

From source file:com.datatorrent.stram.client.StramAppLauncher.java

License:Apache License

/**
 * Submit application to the cluster and return the app id.
 * Sets the context class loader for application dependencies.
 *
 * @param appConfig//  w  ww .  j  a  v a 2 s .  c o m
 * @return ApplicationId
 * @throws Exception
 */
public ApplicationId launchApp(AppFactory appConfig) throws Exception {
    loadDependencies();
    Configuration conf = propertiesBuilder.conf;
    conf.setEnum(StreamingApplication.ENVIRONMENT, StreamingApplication.Environment.CLUSTER);
    LogicalPlan dag = appConfig.createApp(propertiesBuilder);
    String hdfsTokenMaxLifeTime = conf.get(StramClientUtils.HDFS_TOKEN_MAX_LIFE_TIME);
    if (hdfsTokenMaxLifeTime != null && hdfsTokenMaxLifeTime.trim().length() > 0) {
        dag.setAttribute(LogicalPlan.HDFS_TOKEN_LIFE_TIME, Long.parseLong(hdfsTokenMaxLifeTime));
    }
    String rmTokenMaxLifeTime = conf.get(StramClientUtils.RM_TOKEN_MAX_LIFE_TIME);
    if (rmTokenMaxLifeTime != null && rmTokenMaxLifeTime.trim().length() > 0) {
        dag.setAttribute(LogicalPlan.RM_TOKEN_LIFE_TIME, Long.parseLong(rmTokenMaxLifeTime));
    }
    if (conf.get(StramClientUtils.KEY_TAB_FILE) != null) {
        dag.setAttribute(LogicalPlan.KEY_TAB_FILE, conf.get(StramClientUtils.KEY_TAB_FILE));
    } else if (conf.get(StramUserLogin.DT_AUTH_KEYTAB) != null) {
        Path localKeyTabPath = new Path(conf.get(StramUserLogin.DT_AUTH_KEYTAB));
        FileSystem fs = StramClientUtils.newFileSystemInstance(conf);
        try {
            Path destPath = new Path(StramClientUtils.getDTDFSRootDir(fs, conf), localKeyTabPath.getName());
            if (!fs.exists(destPath)) {
                fs.copyFromLocalFile(false, false, localKeyTabPath, destPath);
            }
            dag.setAttribute(LogicalPlan.KEY_TAB_FILE, destPath.toString());
        } finally {
            fs.close();
        }
    }
    String tokenRefreshFactor = conf.get(StramClientUtils.TOKEN_ANTICIPATORY_REFRESH_FACTOR);
    if (tokenRefreshFactor != null && tokenRefreshFactor.trim().length() > 0) {
        dag.setAttribute(LogicalPlan.TOKEN_REFRESH_ANTICIPATORY_FACTOR, Double.parseDouble(tokenRefreshFactor));
    }
    StramClient client = new StramClient(conf, dag);
    try {
        client.start();
        LinkedHashSet<String> libjars = Sets.newLinkedHashSet();
        String libjarsCsv = conf.get(LIBJARS_CONF_KEY_NAME);
        if (libjarsCsv != null) {
            String[] jars = StringUtils.splitByWholeSeparator(libjarsCsv, StramClient.LIB_JARS_SEP);
            libjars.addAll(Arrays.asList(jars));
        }
        if (deployJars != null) {
            for (File deployJar : deployJars) {
                libjars.add(deployJar.getAbsolutePath());
            }
        }

        client.setResources(libjars);
        client.setFiles(conf.get(FILES_CONF_KEY_NAME));
        client.setArchives(conf.get(ARCHIVES_CONF_KEY_NAME));
        client.setOriginalAppId(conf.get(ORIGINAL_APP_ID));
        client.setQueueName(conf.get(QUEUE_NAME));
        client.startApplication();
        return client.getApplicationReport().getApplicationId();
    } finally {
        client.stop();
    }
}

From source file:com.datatorrent.stram.debug.TupleRecorderTest.java

License:Apache License

@Test
public void testRecorder() throws IOException {
    FileSystem fs = new LocalFileSystem();
    try {/*w  w w .j  a v a 2  s  .  co m*/
        TupleRecorder recorder = new TupleRecorder(null, "application_test_id_1");
        recorder.getStorage().setBytesPerPartFile(4096);
        recorder.getStorage().setLocalMode(true);
        recorder.getStorage().setBasePath("file://" + testWorkDir.getAbsolutePath() + "/recordings");

        recorder.addInputPortInfo("ip1", "str1");
        recorder.addInputPortInfo("ip2", "str2");
        recorder.addInputPortInfo("ip3", "str3");
        recorder.addOutputPortInfo("op1", "str4");
        recorder.setup(null, null);

        recorder.beginWindow(1000);
        recorder.beginWindow(1000);
        recorder.beginWindow(1000);

        Tuple t1 = new Tuple();
        t1.key = "speed";
        t1.value = "5m/h";
        recorder.writeTuple(t1, "ip1");
        recorder.endWindow();
        Tuple t2 = new Tuple();
        t2.key = "speed";
        t2.value = "4m/h";
        recorder.writeTuple(t2, "ip3");
        recorder.endWindow();
        Tuple t3 = new Tuple();
        t3.key = "speed";
        t3.value = "6m/h";
        recorder.writeTuple(t3, "ip2");
        recorder.endWindow();

        recorder.beginWindow(1000);
        Tuple t4 = new Tuple();
        t4.key = "speed";
        t4.value = "2m/h";
        recorder.writeTuple(t4, "op1");
        recorder.endWindow();
        recorder.teardown();

        fs.initialize((new Path(recorder.getStorage().getBasePath()).toUri()), new Configuration());
        Path path;
        FSDataInputStream is;
        String line;
        BufferedReader br;

        path = new Path(recorder.getStorage().getBasePath(), FSPartFileCollection.INDEX_FILE);
        is = fs.open(path);
        br = new BufferedReader(new InputStreamReader(is));

        line = br.readLine();
        //    Assert.assertEquals("check index", "B:1000:T:0:part0.txt", line);
        Assert.assertTrue("check index", line.matches(
                "F:part0.txt:\\d+-\\d+:4:T:1000-1000:33:\\{\"3\":\"1\",\"1\":\"1\",\"0\":\"1\",\"2\":\"1\"\\}"));

        path = new Path(recorder.getStorage().getBasePath(), FSPartFileCollection.META_FILE);
        is = fs.open(path);
        br = new BufferedReader(new InputStreamReader(is));

        ObjectMapper mapper = new ObjectMapper();
        line = br.readLine();
        Assert.assertEquals("check version", "1.2", line);
        br.readLine(); // RecordInfo
        //RecordInfo ri = mapper.readValue(line, RecordInfo.class);
        line = br.readLine();
        PortInfo pi = mapper.readValue(line, PortInfo.class);
        Assert.assertEquals("port1", recorder.getPortInfoMap().get(pi.name).id, pi.id);
        Assert.assertEquals("port1", recorder.getPortInfoMap().get(pi.name).type, pi.type);
        line = br.readLine();
        pi = mapper.readValue(line, PortInfo.class);
        Assert.assertEquals("port2", recorder.getPortInfoMap().get(pi.name).id, pi.id);
        Assert.assertEquals("port2", recorder.getPortInfoMap().get(pi.name).type, pi.type);
        line = br.readLine();
        pi = mapper.readValue(line, PortInfo.class);
        Assert.assertEquals("port3", recorder.getPortInfoMap().get(pi.name).id, pi.id);
        Assert.assertEquals("port3", recorder.getPortInfoMap().get(pi.name).type, pi.type);
        line = br.readLine();
        pi = mapper.readValue(line, PortInfo.class);
        Assert.assertEquals("port4", recorder.getPortInfoMap().get(pi.name).id, pi.id);
        Assert.assertEquals("port4", recorder.getPortInfoMap().get(pi.name).type, pi.type);
        Assert.assertEquals("port size", 4, recorder.getPortInfoMap().size());
        //line = br.readLine();

        path = new Path(recorder.getStorage().getBasePath(), "part0.txt");
        is = fs.open(path);
        br = new BufferedReader(new InputStreamReader(is));

        line = br.readLine();
        Assert.assertTrue("check part0", line.startsWith("B:"));
        Assert.assertTrue("check part0", line.endsWith(":1000"));

        line = br.readLine();
        Assert.assertTrue("check part0 1", line.startsWith("T:"));
        Assert.assertTrue("check part0 1", line.endsWith(":0:30:{\"key\":\"speed\",\"value\":\"5m/h\"}"));

        line = br.readLine();
        Assert.assertTrue("check part0 2", line.startsWith("T:"));
        Assert.assertTrue("check part0 2", line.endsWith(":2:30:{\"key\":\"speed\",\"value\":\"4m/h\"}"));

        line = br.readLine();
        Assert.assertTrue("check part0 3", line.startsWith("T:"));
        Assert.assertTrue("check part0 3", line.endsWith(":1:30:{\"key\":\"speed\",\"value\":\"6m/h\"}"));

        line = br.readLine();
        Assert.assertTrue("check part0 4", line.startsWith("T:"));
        Assert.assertTrue("check part0 4", line.endsWith(":3:30:{\"key\":\"speed\",\"value\":\"2m/h\"}"));

        line = br.readLine();
        Assert.assertTrue("check part0 5", line.startsWith("E:"));
        Assert.assertTrue("check part0 5", line.endsWith(":1000"));
    } catch (IOException ex) {
        throw new RuntimeException(ex);
    } finally {
        fs.close();
    }
}

From source file:com.datatorrent.stram.LaunchContainerRunnable.java

License:Apache License

/**
 * Connects to CM, sets up container launch context and eventually dispatches the container start request to the CM.
 *//*from ww  w .ja  va 2s  .  c o m*/
@Override
public void run() {
    LOG.info("Setting up container launch context for containerid={}", container.getId());
    ContainerLaunchContext ctx = Records.newRecord(ContainerLaunchContext.class);

    setClasspath(containerEnv);
    try {
        // propagate to replace node managers user name (effective in non-secure mode)
        containerEnv.put("HADOOP_USER_NAME", UserGroupInformation.getLoginUser().getUserName());
    } catch (Exception e) {
        LOG.error("Failed to retrieve principal name", e);
    }
    // Set the environment
    ctx.setEnvironment(containerEnv);
    ctx.setTokens(tokens);

    // Set the local resources
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();

    // add resources for child VM
    try {
        // child VM dependencies
        FileSystem fs = StramClientUtils.newFileSystemInstance(nmClient.getConfig());
        try {
            addFilesToLocalResources(LocalResourceType.FILE, dag.getAttributes().get(LogicalPlan.LIBRARY_JARS),
                    localResources, fs);
            String archives = dag.getAttributes().get(LogicalPlan.ARCHIVES);
            if (archives != null) {
                addFilesToLocalResources(LocalResourceType.ARCHIVE, archives, localResources, fs);
            }
            ctx.setLocalResources(localResources);
        } finally {
            fs.close();
        }
    } catch (IOException e) {
        LOG.error("Failed to prepare local resources.", e);
        return;
    }

    // Set the necessary command to execute on the allocated container
    List<CharSequence> vargs = getChildVMCommand(container.getId().toString());

    // Get final command
    StringBuilder command = new StringBuilder(1024);
    for (CharSequence str : vargs) {
        command.append(str).append(" ");
    }
    LOG.info("Launching on node: {} command: {}", container.getNodeId(), command);

    List<String> commands = new ArrayList<String>();
    commands.add(command.toString());
    ctx.setCommands(commands);

    nmClient.startContainerAsync(container, ctx);
}

From source file:com.datatorrent.stram.security.StramUserLogin.java

License:Apache License

public static long refreshTokens(long tokenLifeTime, String destinationDir, String destinationFile,
        final Configuration conf, String hdfsKeyTabFile, final Credentials credentials,
        final InetSocketAddress rmAddress, final boolean renewRMToken) throws IOException {
    long expiryTime = System.currentTimeMillis() + tokenLifeTime;
    //renew tokens
    final String tokenRenewer = conf.get(YarnConfiguration.RM_PRINCIPAL);
    if (tokenRenewer == null || tokenRenewer.length() == 0) {
        throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer");
    }//from w  w  w  .  j  av  a2s. co  m
    FileSystem fs = FileSystem.newInstance(conf);
    File keyTabFile;
    try {
        keyTabFile = FSUtil.copyToLocalFileSystem(fs, destinationDir, destinationFile, hdfsKeyTabFile, conf);
    } finally {
        fs.close();
    }
    UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(
            UserGroupInformation.getCurrentUser().getUserName(), keyTabFile.getAbsolutePath());
    try {
        ugi.doAs(new PrivilegedExceptionAction<Object>() {
            @Override
            public Object run() throws Exception {
                FileSystem fs1 = FileSystem.newInstance(conf);
                YarnClient yarnClient = null;
                if (renewRMToken) {
                    yarnClient = YarnClient.createYarnClient();
                    yarnClient.init(conf);
                    yarnClient.start();
                }
                Credentials creds = new Credentials();
                try {
                    fs1.addDelegationTokens(tokenRenewer, creds);
                    if (renewRMToken) {
                        org.apache.hadoop.yarn.api.records.Token rmDelToken = yarnClient
                                .getRMDelegationToken(new Text(tokenRenewer));
                        Token<RMDelegationTokenIdentifier> rmToken = ConverterUtils.convertFromYarn(rmDelToken,
                                rmAddress);
                        creds.addToken(rmToken.getService(), rmToken);
                    }
                } finally {
                    fs1.close();
                    if (renewRMToken) {
                        yarnClient.stop();
                    }
                }
                credentials.addAll(creds);
                return null;
            }
        });
        UserGroupInformation.getCurrentUser().addCredentials(credentials);
    } catch (InterruptedException e) {
        LOG.error("Error while renewing tokens ", e);
        expiryTime = System.currentTimeMillis();
    } catch (IOException e) {
        LOG.error("Error while renewing tokens ", e);
        expiryTime = System.currentTimeMillis();
    }
    LOG.debug("number of tokens: {}", credentials.getAllTokens().size());
    Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
    while (iter.hasNext()) {
        Token<?> token = iter.next();
        LOG.debug("updated token: {}", token);
    }
    keyTabFile.delete();
    return expiryTime;
}

From source file:com.datatorrent.stram.StramClient.java

License:Apache License

/**
 * Launch application for the dag represented by this client.
 *
 * @throws YarnException/*from w ww  . j  ava2  s.  c o  m*/
 * @throws IOException
 */
public void startApplication() throws YarnException, IOException {
    Class<?>[] defaultClasses;

    if (applicationType.equals(YARN_APPLICATION_TYPE)) {
        //TODO restrict the security check to only check if security is enabled for webservices.
        if (UserGroupInformation.isSecurityEnabled()) {
            defaultClasses = DATATORRENT_SECURITY_CLASSES;
        } else {
            defaultClasses = DATATORRENT_CLASSES;
        }
    } else {
        throw new IllegalStateException(applicationType + " is not a valid application type.");
    }

    LinkedHashSet<String> localJarFiles = findJars(dag, defaultClasses);

    if (resources != null) {
        localJarFiles.addAll(resources);
    }

    YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics();
    LOG.info("Got Cluster metric info from ASM" + ", numNodeManagers=" + clusterMetrics.getNumNodeManagers());

    //GetClusterNodesRequest clusterNodesReq = Records.newRecord(GetClusterNodesRequest.class);
    //GetClusterNodesResponse clusterNodesResp = rmClient.clientRM.getClusterNodes(clusterNodesReq);
    //LOG.info("Got Cluster node info from ASM");
    //for (NodeReport node : clusterNodesResp.getNodeReports()) {
    //  LOG.info("Got node report from ASM for"
    //           + ", nodeId=" + node.getNodeId()
    //           + ", nodeAddress" + node.getHttpAddress()
    //           + ", nodeRackName" + node.getRackName()
    //           + ", nodeNumContainers" + node.getNumContainers()
    //           + ", nodeHealthStatus" + node.getHealthReport());
    //}
    List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo();
    for (QueueUserACLInfo aclInfo : listAclInfo) {
        for (QueueACL userAcl : aclInfo.getUserAcls()) {
            LOG.info("User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName() + ", userAcl="
                    + userAcl.name());
        }
    }

    // Get a new application id
    YarnClientApplication newApp = yarnClient.createApplication();
    appId = newApp.getNewApplicationResponse().getApplicationId();

    // Dump out information about cluster capability as seen by the resource manager
    int maxMem = newApp.getNewApplicationResponse().getMaximumResourceCapability().getMemory();
    LOG.info("Max mem capabililty of resources in this cluster " + maxMem);
    int amMemory = dag.getMasterMemoryMB();
    if (amMemory > maxMem) {
        LOG.info("AM memory specified above max threshold of cluster. Using max value." + ", specified="
                + amMemory + ", max=" + maxMem);
        amMemory = maxMem;
    }

    if (dag.getAttributes().get(LogicalPlan.APPLICATION_ID) == null) {
        dag.setAttribute(LogicalPlan.APPLICATION_ID, appId.toString());
    }

    // Create launch context for app master
    LOG.info("Setting up application submission context for ASM");
    ApplicationSubmissionContext appContext = Records.newRecord(ApplicationSubmissionContext.class);

    // set the application id
    appContext.setApplicationId(appId);
    // set the application name
    appContext.setApplicationName(dag.getValue(LogicalPlan.APPLICATION_NAME));
    appContext.setApplicationType(this.applicationType);
    if (YARN_APPLICATION_TYPE.equals(this.applicationType)) {
        //appContext.setMaxAppAttempts(1); // no retries until Stram is HA
    }

    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

    // Setup security tokens
    // If security is enabled get ResourceManager and NameNode delegation tokens.
    // Set these tokens on the container so that they are sent as part of application submission.
    // This also sets them up for renewal by ResourceManager. The NameNode delegation rmToken
    // is also used by ResourceManager to fetch the jars from HDFS and set them up for the
    // application master launch.
    if (UserGroupInformation.isSecurityEnabled()) {
        Credentials credentials = new Credentials();
        String tokenRenewer = conf.get(YarnConfiguration.RM_PRINCIPAL);
        if (tokenRenewer == null || tokenRenewer.length() == 0) {
            throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer");
        }

        // For now, only getting tokens for the default file-system.
        FileSystem fs = StramClientUtils.newFileSystemInstance(conf);
        try {
            final Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials);
            if (tokens != null) {
                for (Token<?> token : tokens) {
                    LOG.info("Got dt for " + fs.getUri() + "; " + token);
                }
            }
        } finally {
            fs.close();
        }

        addRMDelegationToken(tokenRenewer, credentials);

        DataOutputBuffer dob = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(dob);
        ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
        amContainer.setTokens(fsTokens);
    }

    // set local resources for the application master
    // local files or archives as needed
    // In this scenario, the jar file for the application master is part of the local resources
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();

    // copy required jar files to dfs, to be localized for containers
    FileSystem fs = StramClientUtils.newFileSystemInstance(conf);
    try {
        Path appsBasePath = new Path(StramClientUtils.getDTDFSRootDir(fs, conf), StramClientUtils.SUBDIR_APPS);
        Path appPath = new Path(appsBasePath, appId.toString());

        String libJarsCsv = copyFromLocal(fs, appPath, localJarFiles.toArray(new String[] {}));

        LOG.info("libjars: {}", libJarsCsv);
        dag.getAttributes().put(LogicalPlan.LIBRARY_JARS, libJarsCsv);
        LaunchContainerRunnable.addFilesToLocalResources(LocalResourceType.FILE, libJarsCsv, localResources,
                fs);

        if (archives != null) {
            String[] localFiles = archives.split(",");
            String archivesCsv = copyFromLocal(fs, appPath, localFiles);
            LOG.info("archives: {}", archivesCsv);
            dag.getAttributes().put(LogicalPlan.ARCHIVES, archivesCsv);
            LaunchContainerRunnable.addFilesToLocalResources(LocalResourceType.ARCHIVE, archivesCsv,
                    localResources, fs);
        }

        if (files != null) {
            String[] localFiles = files.split(",");
            String filesCsv = copyFromLocal(fs, appPath, localFiles);
            LOG.info("files: {}", filesCsv);
            dag.getAttributes().put(LogicalPlan.FILES, filesCsv);
            LaunchContainerRunnable.addFilesToLocalResources(LocalResourceType.FILE, filesCsv, localResources,
                    fs);
        }

        dag.getAttributes().put(LogicalPlan.APPLICATION_PATH, appPath.toString());
        if (dag.getAttributes()
                .get(OperatorContext.STORAGE_AGENT) == null) { /* which would be the most likely case */
            Path checkpointPath = new Path(appPath, LogicalPlan.SUBDIR_CHECKPOINTS);
            // use conf client side to pickup any proxy settings from dt-site.xml
            dag.setAttribute(OperatorContext.STORAGE_AGENT,
                    new FSStorageAgent(checkpointPath.toString(), conf));
        }
        if (dag.getAttributes().get(LogicalPlan.CONTAINER_OPTS_CONFIGURATOR) == null) {
            dag.setAttribute(LogicalPlan.CONTAINER_OPTS_CONFIGURATOR, new BasicContainerOptConfigurator());
        }

        // Set the log4j properties if needed
        if (!log4jPropFile.isEmpty()) {
            Path log4jSrc = new Path(log4jPropFile);
            Path log4jDst = new Path(appPath, "log4j.props");
            fs.copyFromLocalFile(false, true, log4jSrc, log4jDst);
            FileStatus log4jFileStatus = fs.getFileStatus(log4jDst);
            LocalResource log4jRsrc = Records.newRecord(LocalResource.class);
            log4jRsrc.setType(LocalResourceType.FILE);
            log4jRsrc.setVisibility(LocalResourceVisibility.APPLICATION);
            log4jRsrc.setResource(ConverterUtils.getYarnUrlFromURI(log4jDst.toUri()));
            log4jRsrc.setTimestamp(log4jFileStatus.getModificationTime());
            log4jRsrc.setSize(log4jFileStatus.getLen());
            localResources.put("log4j.properties", log4jRsrc);
        }

        if (originalAppId != null) {
            Path origAppPath = new Path(appsBasePath, this.originalAppId);
            LOG.info("Restart from {}", origAppPath);
            copyInitialState(origAppPath);
        }

        // push logical plan to DFS location
        Path cfgDst = new Path(appPath, LogicalPlan.SER_FILE_NAME);
        FSDataOutputStream outStream = fs.create(cfgDst, true);
        LogicalPlan.write(this.dag, outStream);
        outStream.close();

        Path launchConfigDst = new Path(appPath, LogicalPlan.LAUNCH_CONFIG_FILE_NAME);
        outStream = fs.create(launchConfigDst, true);
        conf.writeXml(outStream);
        outStream.close();

        FileStatus topologyFileStatus = fs.getFileStatus(cfgDst);
        LocalResource topologyRsrc = Records.newRecord(LocalResource.class);
        topologyRsrc.setType(LocalResourceType.FILE);
        topologyRsrc.setVisibility(LocalResourceVisibility.APPLICATION);
        topologyRsrc.setResource(ConverterUtils.getYarnUrlFromURI(cfgDst.toUri()));
        topologyRsrc.setTimestamp(topologyFileStatus.getModificationTime());
        topologyRsrc.setSize(topologyFileStatus.getLen());
        localResources.put(LogicalPlan.SER_FILE_NAME, topologyRsrc);

        // Set local resource info into app master container launch context
        amContainer.setLocalResources(localResources);

        // Set the necessary security tokens as needed
        //amContainer.setContainerTokens(containerToken);
        // Set the env variables to be setup in the env where the application master will be run
        LOG.info("Set the environment for the application master");
        Map<String, String> env = new HashMap<String, String>();

        // Add application jar(s) location to classpath
        // At some point we should not be required to add
        // the hadoop specific classpaths to the env.
        // It should be provided out of the box.
        // For now setting all required classpaths including
        // the classpath to "." for the application jar(s)
        // including ${CLASSPATH} will duplicate the class path in app master, removing it for now
        //StringBuilder classPathEnv = new StringBuilder("${CLASSPATH}:./*");
        StringBuilder classPathEnv = new StringBuilder("./*");
        String classpath = conf.get(YarnConfiguration.YARN_APPLICATION_CLASSPATH);
        for (String c : StringUtils.isBlank(classpath) ? YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH
                : classpath.split(",")) {
            if (c.equals("$HADOOP_CLIENT_CONF_DIR")) {
                // SPOI-2501
                continue;
            }
            classPathEnv.append(':');
            classPathEnv.append(c.trim());
        }
        env.put("CLASSPATH", classPathEnv.toString());
        // propagate to replace node managers user name (effective in non-secure mode)
        env.put("HADOOP_USER_NAME", UserGroupInformation.getLoginUser().getUserName());

        amContainer.setEnvironment(env);

        // Set the necessary command to execute the application master
        ArrayList<CharSequence> vargs = new ArrayList<CharSequence>(30);

        // Set java executable command
        LOG.info("Setting up app master command");
        vargs.add(javaCmd);
        if (dag.isDebug()) {
            vargs.add("-agentlib:jdwp=transport=dt_socket,server=y,suspend=n");
        }
        // Set Xmx based on am memory size
        // default heap size 75% of total memory
        if (dag.getMasterJVMOptions() != null) {
            vargs.add(dag.getMasterJVMOptions());
        }
        vargs.add("-Xmx" + (amMemory * 3 / 4) + "m");
        vargs.add("-XX:+HeapDumpOnOutOfMemoryError");
        vargs.add("-XX:HeapDumpPath=/tmp/dt-heap-" + appId.getId() + ".bin");
        vargs.add("-Dhadoop.root.logger=" + (dag.isDebug() ? "DEBUG" : "INFO") + ",RFA");
        vargs.add("-Dhadoop.log.dir=" + ApplicationConstants.LOG_DIR_EXPANSION_VAR);
        vargs.add(String.format("-D%s=%s", StreamingContainer.PROP_APP_PATH, dag.assertAppPath()));
        if (dag.isDebug()) {
            vargs.add("-Dlog4j.debug=true");
        }

        String loggersLevel = conf.get(DTLoggerFactory.DT_LOGGERS_LEVEL);
        if (loggersLevel != null) {
            vargs.add(String.format("-D%s=%s", DTLoggerFactory.DT_LOGGERS_LEVEL, loggersLevel));
        }
        vargs.add(StreamingAppMaster.class.getName());
        vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout");
        vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr");

        // Get final command
        StringBuilder command = new StringBuilder(9 * vargs.size());
        for (CharSequence str : vargs) {
            command.append(str).append(" ");
        }

        LOG.info("Completed setting up app master command " + command.toString());
        List<String> commands = new ArrayList<String>();
        commands.add(command.toString());
        amContainer.setCommands(commands);

        // Set up resource type requirements
        // For now, only memory is supported so we set memory requirements
        Resource capability = Records.newRecord(Resource.class);
        capability.setMemory(amMemory);
        appContext.setResource(capability);

        // Service data is a binary blob that can be passed to the application
        // Not needed in this scenario
        // amContainer.setServiceData(serviceData);
        appContext.setAMContainerSpec(amContainer);

        // Set the priority for the application master
        Priority pri = Records.newRecord(Priority.class);
        pri.setPriority(amPriority);
        appContext.setPriority(pri);
        // Set the queue to which this application is to be submitted in the RM
        appContext.setQueue(queueName);

        // Submit the application to the applications manager
        // SubmitApplicationResponse submitResp = rmClient.submitApplication(appRequest);
        // Ignore the response as either a valid response object is returned on success
        // or an exception thrown to denote some form of a failure
        String specStr = Objects.toStringHelper("Submitting application: ")
                .add("name", appContext.getApplicationName()).add("queue", appContext.getQueue())
                .add("user", UserGroupInformation.getLoginUser()).add("resource", appContext.getResource())
                .toString();
        LOG.info(specStr);
        if (dag.isDebug()) {
            //LOG.info("Full submission context: " + appContext);
        }
        yarnClient.submitApplication(appContext);
    } finally {
        fs.close();
    }
}

From source file:com.datatorrent.stram.StreamingContainerManager.java

License:Apache License

/**
 * This method is for saving meta information about this application in HDFS -- the meta information that generally
 * does not change across multiple attempts
 *//*from  ww w .j  a v  a2s.  c o m*/
private void saveMetaInfo() throws IOException {
    Path path = new Path(this.vars.appPath, APP_META_FILENAME + "." + System.nanoTime());
    FileSystem fs = FileSystem.newInstance(path.toUri(), new Configuration());
    try {
        FSDataOutputStream os = fs.create(path);
        try {
            JSONObject top = new JSONObject();
            JSONObject attributes = new JSONObject();
            for (Map.Entry<Attribute<?>, Object> entry : this.plan.getLogicalPlan().getAttributes()
                    .entrySet()) {
                attributes.put(entry.getKey().getSimpleName(), entry.getValue());
            }
            JSONObject customMetrics = new JSONObject();
            for (Map.Entry<String, Map<String, Object>> entry : latestLogicalMetrics.entrySet()) {
                customMetrics.put(entry.getKey(), new JSONArray(entry.getValue().keySet()));
            }
            top.put(APP_META_KEY_ATTRIBUTES, attributes);
            top.put(APP_META_KEY_CUSTOM_METRICS, customMetrics);
            os.write(top.toString().getBytes());
        } catch (JSONException ex) {
            throw new RuntimeException(ex);
        } finally {
            os.close();
        }
        Path origPath = new Path(this.vars.appPath, APP_META_FILENAME);
        fs.rename(path, origPath);
    } finally {
        fs.close();
    }
}

From source file:com.davidgildeh.hadoop.utils.FileUtils.java

License:Apache License

/**
 * Loads a Properties object with key/value properties from file on HDFS
 * //from  w w  w  .j  av  a 2  s .co  m
 * @param path          The path to the properties file
 * @return              The initialised properties loaded from file
 * @throws IOException 
 */
public static Properties loadPropertiesFile(String path) throws IOException {

    // Properties Object to load properties file
    Properties propFile = new Properties();

    Path fsPath = new Path(path);
    FileSystem fileSystem = getFileSystem(fsPath);
    checkFileExists(fileSystem, fsPath);
    FSDataInputStream file = fileSystem.open(fsPath);
    propFile.load(file);
    file.close();
    fileSystem.close();
    return propFile;
}

From source file:com.davidgildeh.hadoop.utils.FileUtils.java

License:Apache License

/**
 * Delete a file on HDFS/* ww  w . j av a2  s  .c o m*/
 * 
 * @param path          The path to the file on HDFS
 * @throws IOException 
 */
public static void deleteFile(String path) throws IOException {

    Path fsPath = new Path(path);
    FileSystem fileSystem = getFileSystem(fsPath);
    checkFileExists(fileSystem, fsPath);

    // Delete file
    fileSystem.delete(fsPath, true);
    fileSystem.close();
}