Example usage for org.apache.hadoop.fs FileSystem close

List of usage examples for org.apache.hadoop.fs FileSystem close

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem close.

Prototype

@Override
public void close() throws IOException 

Source Link

Document

Close this FileSystem instance.

Usage

From source file:eu.stratosphere.yarn.Client.java

License:Apache License

public void run(String[] args) throws Exception {

    if (UserGroupInformation.isSecurityEnabled()) {
        throw new RuntimeException("Stratosphere YARN client does not have security support right now."
                + "File a bug, we will fix it asap");
    }//from w  ww  . j a v a2s . co  m
    //Utils.logFilesInCurrentDirectory(LOG);
    //
    //   Command Line Options
    //
    Options options = new Options();
    options.addOption(VERBOSE);
    options.addOption(STRATOSPHERE_CONF_DIR);
    options.addOption(STRATOSPHERE_JAR);
    options.addOption(JM_MEMORY);
    options.addOption(TM_MEMORY);
    options.addOption(TM_CORES);
    options.addOption(CONTAINER);
    options.addOption(GEN_CONF);
    options.addOption(QUEUE);
    options.addOption(QUERY);
    options.addOption(SHIP_PATH);

    CommandLineParser parser = new PosixParser();
    CommandLine cmd = null;
    try {
        cmd = parser.parse(options, args);
    } catch (MissingOptionException moe) {
        System.out.println(moe.getMessage());
        printUsage();
        System.exit(1);
    }

    if (System.getProperty("log4j.configuration") == null) {
        Logger root = Logger.getRootLogger();
        root.removeAllAppenders();
        PatternLayout layout = new PatternLayout("%d{HH:mm:ss,SSS} %-5p %-60c %x - %m%n");
        ConsoleAppender appender = new ConsoleAppender(layout, "System.err");
        root.addAppender(appender);
        if (cmd.hasOption(VERBOSE.getOpt())) {
            root.setLevel(Level.DEBUG);
            LOG.debug("CLASSPATH: " + System.getProperty("java.class.path"));
        } else {
            root.setLevel(Level.INFO);
        }
    }

    // Jar Path
    Path localJarPath;
    if (cmd.hasOption(STRATOSPHERE_JAR.getOpt())) {
        String userPath = cmd.getOptionValue(STRATOSPHERE_JAR.getOpt());
        if (!userPath.startsWith("file://")) {
            userPath = "file://" + userPath;
        }
        localJarPath = new Path(userPath);
    } else {
        localJarPath = new Path(
                "file://" + Client.class.getProtectionDomain().getCodeSource().getLocation().getPath());
    }

    if (cmd.hasOption(GEN_CONF.getOpt())) {
        LOG.info("Placing default configuration in current directory");
        File outFile = generateDefaultConf(localJarPath);
        LOG.info("File written to " + outFile.getAbsolutePath());
        System.exit(0);
    }

    // Conf Path 
    Path confPath = null;
    String confDirPath = "";
    if (cmd.hasOption(STRATOSPHERE_CONF_DIR.getOpt())) {
        confDirPath = cmd.getOptionValue(STRATOSPHERE_CONF_DIR.getOpt()) + "/";
        File confFile = new File(confDirPath + CONFIG_FILE_NAME);
        if (!confFile.exists()) {
            LOG.fatal("Unable to locate configuration file in " + confFile);
            System.exit(1);
        }
        confPath = new Path(confFile.getAbsolutePath());
    } else {
        System.out.println("No configuration file has been specified");

        // no configuration path given.
        // -> see if there is one in the current directory
        File currDir = new File(".");
        File[] candidates = currDir.listFiles(new FilenameFilter() {
            @Override
            public boolean accept(final File dir, final String name) {
                return name != null && name.endsWith(".yaml");
            }
        });
        if (candidates == null || candidates.length == 0) {
            System.out.println(
                    "No configuration file has been found in current directory.\n" + "Copying default.");
            File outFile = generateDefaultConf(localJarPath);
            confPath = new Path(outFile.toURI());
        } else {
            if (candidates.length > 1) {
                System.out.println("Multiple .yaml configuration files were found in the current directory\n"
                        + "Please specify one explicitly");
                System.exit(1);
            } else if (candidates.length == 1) {
                confPath = new Path(candidates[0].toURI());
            }
        }
    }
    List<File> shipFiles = new ArrayList<File>();
    // path to directory to ship
    if (cmd.hasOption(SHIP_PATH.getOpt())) {
        String shipPath = cmd.getOptionValue(SHIP_PATH.getOpt());
        File shipDir = new File(shipPath);
        if (shipDir.isDirectory()) {
            shipFiles = new ArrayList<File>(Arrays.asList(shipDir.listFiles(new FilenameFilter() {
                @Override
                public boolean accept(File dir, String name) {
                    return !(name.equals(".") || name.equals(".."));
                }
            })));
        } else {
            LOG.warn("Ship directory is not a directory!");
        }
    }
    boolean hasLog4j = false;
    //check if there is a log4j file
    if (confDirPath.length() > 0) {
        File l4j = new File(confDirPath + "/log4j.properties");
        if (l4j.exists()) {
            shipFiles.add(l4j);
            hasLog4j = true;
        }
    }

    // queue
    String queue = "default";
    if (cmd.hasOption(QUEUE.getOpt())) {
        queue = cmd.getOptionValue(QUEUE.getOpt());
    }

    // JobManager Memory
    int jmMemory = 512;
    if (cmd.hasOption(JM_MEMORY.getOpt())) {
        jmMemory = Integer.valueOf(cmd.getOptionValue(JM_MEMORY.getOpt()));
    }

    // Task Managers memory
    int tmMemory = 1024;
    if (cmd.hasOption(TM_MEMORY.getOpt())) {
        tmMemory = Integer.valueOf(cmd.getOptionValue(TM_MEMORY.getOpt()));
    }

    // Task Managers vcores
    int tmCores = 1;
    if (cmd.hasOption(TM_CORES.getOpt())) {
        tmCores = Integer.valueOf(cmd.getOptionValue(TM_CORES.getOpt()));
    }
    Utils.getStratosphereConfiguration(confPath.toUri().getPath());
    int jmPort = GlobalConfiguration.getInteger(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY, 0);
    if (jmPort == 0) {
        LOG.warn("Unable to find job manager port in configuration!");
        jmPort = ConfigConstants.DEFAULT_JOB_MANAGER_IPC_PORT;
    }
    conf = Utils.initializeYarnConfiguration();

    // intialize HDFS
    LOG.info("Copy App Master jar from local filesystem and add to local environment");
    // Copy the application master jar to the filesystem 
    // Create a local resource to point to the destination jar path 
    final FileSystem fs = FileSystem.get(conf);

    if (fs.getScheme().startsWith("file")) {
        LOG.warn("The file system scheme is '" + fs.getScheme() + "'. This indicates that the "
                + "specified Hadoop configuration path is wrong and the sytem is using the default Hadoop configuration values."
                + "The Stratosphere YARN client needs to store its files in a distributed file system");
    }

    // Create yarnClient
    final YarnClient yarnClient = YarnClient.createYarnClient();
    yarnClient.init(conf);
    yarnClient.start();

    // Query cluster for metrics
    if (cmd.hasOption(QUERY.getOpt())) {
        showClusterMetrics(yarnClient);
    }
    if (!cmd.hasOption(CONTAINER.getOpt())) {
        LOG.fatal("Missing required argument " + CONTAINER.getOpt());
        printUsage();
        yarnClient.stop();
        System.exit(1);
    }

    // TM Count
    final int taskManagerCount = Integer.valueOf(cmd.getOptionValue(CONTAINER.getOpt()));

    System.out.println("Using values:");
    System.out.println("\tContainer Count = " + taskManagerCount);
    System.out.println("\tJar Path = " + localJarPath.toUri().getPath());
    System.out.println("\tConfiguration file = " + confPath.toUri().getPath());
    System.out.println("\tJobManager memory = " + jmMemory);
    System.out.println("\tTaskManager memory = " + tmMemory);
    System.out.println("\tTaskManager cores = " + tmCores);

    // Create application via yarnClient
    YarnClientApplication app = yarnClient.createApplication();
    GetNewApplicationResponse appResponse = app.getNewApplicationResponse();
    Resource maxRes = appResponse.getMaximumResourceCapability();
    if (tmMemory > maxRes.getMemory() || tmCores > maxRes.getVirtualCores()) {
        LOG.fatal("The cluster does not have the requested resources for the TaskManagers available!\n"
                + "Maximum Memory: " + maxRes.getMemory() + ", Maximum Cores: " + tmCores);
        yarnClient.stop();
        System.exit(1);
    }
    if (jmMemory > maxRes.getMemory()) {
        LOG.fatal("The cluster does not have the requested resources for the JobManager available!\n"
                + "Maximum Memory: " + maxRes.getMemory());
        yarnClient.stop();
        System.exit(1);
    }
    int totalMemoryRequired = jmMemory + tmMemory * taskManagerCount;
    ClusterResourceDescription freeClusterMem = getCurrentFreeClusterResources(yarnClient);
    if (freeClusterMem.totalFreeMemory < totalMemoryRequired) {
        LOG.fatal("This YARN session requires " + totalMemoryRequired + "MB of memory in the cluster. "
                + "There are currently only " + freeClusterMem.totalFreeMemory + "MB available.");
        yarnClient.stop();
        System.exit(1);
    }
    if (tmMemory > freeClusterMem.containerLimit) {
        LOG.fatal("The requested amount of memory for the TaskManagers (" + tmMemory + "MB) is more than "
                + "the largest possible YARN container: " + freeClusterMem.containerLimit);
        yarnClient.stop();
        System.exit(1);
    }
    if (jmMemory > freeClusterMem.containerLimit) {
        LOG.fatal("The requested amount of memory for the JobManager (" + jmMemory + "MB) is more than "
                + "the largest possible YARN container: " + freeClusterMem.containerLimit);
        yarnClient.stop();
        System.exit(1);
    }

    // respect custom JVM options in the YAML file
    final String javaOpts = GlobalConfiguration.getString(ConfigConstants.STRATOSPHERE_JVM_OPTIONS, "");

    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

    String amCommand = "$JAVA_HOME/bin/java" + " -Xmx" + Utils.calculateHeapSize(jmMemory) + "M " + javaOpts;
    if (hasLog4j) {
        amCommand += " -Dlog.file=\"" + ApplicationConstants.LOG_DIR_EXPANSION_VAR
                + "/jobmanager-log4j.log\" -Dlog4j.configuration=file:log4j.properties";
    }
    amCommand += " eu.stratosphere.yarn.ApplicationMaster" + " " + " 1>"
            + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager-stdout.log" + " 2>"
            + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager-stderr.log";
    amContainer.setCommands(Collections.singletonList(amCommand));

    System.err.println("amCommand=" + amCommand);

    // Set-up ApplicationSubmissionContext for the application
    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    final ApplicationId appId = appContext.getApplicationId();

    // Setup jar for ApplicationMaster
    LocalResource appMasterJar = Records.newRecord(LocalResource.class);
    LocalResource stratosphereConf = Records.newRecord(LocalResource.class);
    Path remotePathJar = Utils.setupLocalResource(conf, fs, appId.toString(), localJarPath, appMasterJar,
            fs.getHomeDirectory());
    Path remotePathConf = Utils.setupLocalResource(conf, fs, appId.toString(), confPath, stratosphereConf,
            fs.getHomeDirectory());
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(2);
    localResources.put("stratosphere.jar", appMasterJar);
    localResources.put("stratosphere-conf.yaml", stratosphereConf);

    // setup security tokens (code from apache storm)
    final Path[] paths = new Path[3 + shipFiles.size()];
    StringBuffer envShipFileList = new StringBuffer();
    // upload ship files
    for (int i = 0; i < shipFiles.size(); i++) {
        File shipFile = shipFiles.get(i);
        LocalResource shipResources = Records.newRecord(LocalResource.class);
        Path shipLocalPath = new Path("file://" + shipFile.getAbsolutePath());
        paths[3 + i] = Utils.setupLocalResource(conf, fs, appId.toString(), shipLocalPath, shipResources,
                fs.getHomeDirectory());
        localResources.put(shipFile.getName(), shipResources);

        envShipFileList.append(paths[3 + i]);
        if (i + 1 < shipFiles.size()) {
            envShipFileList.append(',');
        }
    }

    paths[0] = remotePathJar;
    paths[1] = remotePathConf;
    paths[2] = new Path(fs.getHomeDirectory(), ".stratosphere/" + appId.toString() + "/");
    FsPermission permission = new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL);
    fs.setPermission(paths[2], permission); // set permission for path.
    Utils.setTokensFor(amContainer, paths, this.conf);

    amContainer.setLocalResources(localResources);
    fs.close();

    // Setup CLASSPATH for ApplicationMaster
    Map<String, String> appMasterEnv = new HashMap<String, String>();
    Utils.setupEnv(conf, appMasterEnv);
    // set configuration values
    appMasterEnv.put(Client.ENV_TM_COUNT, String.valueOf(taskManagerCount));
    appMasterEnv.put(Client.ENV_TM_CORES, String.valueOf(tmCores));
    appMasterEnv.put(Client.ENV_TM_MEMORY, String.valueOf(tmMemory));
    appMasterEnv.put(Client.STRATOSPHERE_JAR_PATH, remotePathJar.toString());
    appMasterEnv.put(Client.ENV_APP_ID, appId.toString());
    appMasterEnv.put(Client.ENV_CLIENT_HOME_DIR, fs.getHomeDirectory().toString());
    appMasterEnv.put(Client.ENV_CLIENT_SHIP_FILES, envShipFileList.toString());
    appMasterEnv.put(Client.ENV_CLIENT_USERNAME, UserGroupInformation.getCurrentUser().getShortUserName());

    amContainer.setEnvironment(appMasterEnv);

    // Set up resource type requirements for ApplicationMaster
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(jmMemory);
    capability.setVirtualCores(1);

    appContext.setApplicationName("Stratosphere"); // application name
    appContext.setAMContainerSpec(amContainer);
    appContext.setResource(capability);
    appContext.setQueue(queue);

    // file that we write into the conf/ dir containing the jobManager address.
    final File addrFile = new File(confDirPath + CliFrontend.JOBMANAGER_ADDRESS_FILE);

    Runtime.getRuntime().addShutdownHook(new Thread() {
        @Override
        public void run() {
            try {
                LOG.info("Killing the Stratosphere-YARN application.");
                yarnClient.killApplication(appId);
                LOG.info("Deleting files in " + paths[2]);
                FileSystem shutFS = FileSystem.get(conf);
                shutFS.delete(paths[2], true); // delete conf and jar file.
                shutFS.close();
            } catch (Exception e) {
                LOG.warn("Exception while killing the YARN application", e);
            }
            try {
                addrFile.delete();
            } catch (Exception e) {
                LOG.warn("Exception while deleting the jobmanager address file", e);
            }
            LOG.info("YARN Client is shutting down");
            yarnClient.stop();
        }
    });

    LOG.info("Submitting application master " + appId);
    yarnClient.submitApplication(appContext);
    ApplicationReport appReport = yarnClient.getApplicationReport(appId);
    YarnApplicationState appState = appReport.getYarnApplicationState();
    boolean told = false;
    char[] el = { '/', '|', '\\', '-' };
    int i = 0;
    while (appState != YarnApplicationState.FINISHED && appState != YarnApplicationState.KILLED
            && appState != YarnApplicationState.FAILED) {
        if (!told && appState == YarnApplicationState.RUNNING) {
            System.err
                    .println("Stratosphere JobManager is now running on " + appReport.getHost() + ":" + jmPort);
            System.err.println("JobManager Web Interface: " + appReport.getTrackingUrl());
            // write jobmanager connect information

            PrintWriter out = new PrintWriter(addrFile);
            out.println(appReport.getHost() + ":" + jmPort);
            out.close();
            addrFile.setReadable(true, false); // readable for all.
            told = true;
        }
        if (!told) {
            System.err.print(el[i++] + "\r");
            if (i == el.length) {
                i = 0;
            }
            Thread.sleep(500); // wait for the application to switch to RUNNING
        } else {
            Thread.sleep(5000);
        }

        appReport = yarnClient.getApplicationReport(appId);
        appState = appReport.getYarnApplicationState();
    }

    LOG.info("Application " + appId + " finished with" + " state " + appState + " at "
            + appReport.getFinishTime());
    if (appState == YarnApplicationState.FAILED || appState == YarnApplicationState.KILLED) {
        LOG.warn("Application failed. Diagnostics " + appReport.getDiagnostics());
    }

}

From source file:fm.last.hadoop.tools.Tools.java

License:Apache License

public void close() throws IOException {
    FileSystem fs = FileSystem.get(getConf());
    if (fs != null) {
        fs.close();
        fs = null;/*  ww  w  .  j  a va 2s. c o  m*/
    }
}

From source file:format.OverlapInputFormat.java

License:BSD License

/******
    @Override/*from   w w w . j ava  2s  . c  om*/
    public List<InputSplit> getSplits(JobContext job) throws IOException {
Configuration conf = HadoopUtils.getConfiguration(job);
        
List<InputSplit> defaultSplits = super.getSplits(job);
List<InputSplit> result = new ArrayList<InputSplit>();
        
Path prevFile = null;
FourMcBlockIndex prevIndex = null;
        
for (InputSplit genericSplit : defaultSplits) {
    // Load the index.
    FileSplit fileSplit = (FileSplit) genericSplit;
    Path file = fileSplit.getPath();
    FileSystem fs = file.getFileSystem(conf);
        
    FourMcBlockIndex index;
    if (file.equals(prevFile)) {
        index = prevIndex;
    } else {
        index = FourMcBlockIndex.readIndex(fs, file);
        prevFile = file;
        prevIndex = index;
    }
        
    if (index == null) {
        throw new IOException("BlockIndex unreadable for " + file);
    }
        
    if (index.isEmpty()) { // leave the default split for empty block index
        result.add(fileSplit);
        continue;
    }
        
    long start = fileSplit.getStart();
    long end = start + fileSplit.getLength();
        
    long fourMcStart = index.alignSliceStartToIndex(start, end);
    long fourMcEnd = index.alignSliceEndToIndex(end, fs.getFileStatus(file).getLen());
        
    if (fourMcStart != FourMcBlockIndex.NOT_FOUND && fourMcEnd != FourMcBlockIndex.NOT_FOUND) {
        result.add(new FileSplit(file, fourMcStart, fourMcEnd - fourMcStart, fileSplit.getLocations()));
        LOG.debug("Added 4mc split for " + file + "[start=" + fourMcStart + ", length=" + (fourMcEnd - fourMcStart) + "]");
    }
        
}
        
return result;
    }
 ******/

@Override
public List<InputSplit> getSplits(JobContext context) {
    List<InputSplit> splits = new ArrayList<InputSplit>();
    FileSystem fs = null;
    Path file = OverlapInputFormat.getInputPaths(context)[0];
    Configuration conf = context.getConfiguration();
    long blocksize = Long.parseLong(conf.get("dfs.blocksize"));
    //        long overlap = Long.parseLong(conf.get("pcap.defaultsize"));
    long overlap = 16;
    FSDataInputStream in = null;
    try {
        fs = FileSystem.get(context.getConfiguration());
        in = fs.open(file);
        long pos = 0;
        while (in.available() > 0) {
            FileSplit split = new FileSplit(file, pos, blocksize + overlap, new String[] {});
            splits.add(split);
            pos += blocksize;
            in.skip(blocksize + overlap);
        }
    } catch (IOException e) {
        LOG.error(e.getLocalizedMessage());
    } finally {
        if (in != null) {
            try {
                in.close();
            } catch (Exception e) {
            }
        }
        ;
        if (fs != null) {
            try {
                fs.close();
            } catch (Exception e) {
            }
        }
        ;
    }
    return splits;
}

From source file:fr.jetoile.hadoopunit.component.HdfsBootstrapTest.java

License:Apache License

@Test
public void hdfsShouldStart() throws Exception {

    Assertions.assertThat(Utils.available("127.0.0.1", 20112)).isFalse();

    // Write a file to HDFS containing the test string
    FileSystem hdfsFsHandle = ((HdfsBootstrap) HadoopBootstrap.INSTANCE.getService(Component.HDFS))
            .getHdfsFileSystemHandle();//from  w  w  w  . j av  a 2 s . c  o m
    FSDataOutputStream writer = hdfsFsHandle
            .create(new Path(configuration.getString(Config.HDFS_TEST_FILE_KEY)));
    writer.writeUTF(configuration.getString(Config.HDFS_TEST_STRING_KEY));
    writer.close();

    // Read the file and compare to test string
    FSDataInputStream reader = hdfsFsHandle.open(new Path(configuration.getString(Config.HDFS_TEST_FILE_KEY)));
    assertEquals(reader.readUTF(), configuration.getString(Config.HDFS_TEST_STRING_KEY));
    reader.close();
    hdfsFsHandle.close();

    URL url = new URL(String.format("http://localhost:%s/webhdfs/v1?op=GETHOMEDIRECTORY&user.name=guest",
            configuration.getInt(Config.HDFS_NAMENODE_HTTP_PORT_KEY)));
    URLConnection connection = url.openConnection();
    connection.setRequestProperty("Accept-Charset", "UTF-8");
    BufferedReader response = new BufferedReader(new InputStreamReader(connection.getInputStream()));
    String line = response.readLine();
    response.close();
    assertThat("{\"Path\":\"/user/guest\"}").isEqualTo(line);

}

From source file:fr.jetoile.hadoopunit.component.OozieBootstrapTest.java

License:Apache License

@Test
public void oozieShouldStart() throws Exception {

    LOGGER.info("OOZIE: Test Submit Workflow Start");

    FileSystem hdfsFs = ((HdfsBootstrap) HadoopBootstrap.INSTANCE.getService(Component.HDFS))
            .getHdfsFileSystemHandle();/*  w ww  . j a  v  a  2 s  . c  o m*/
    OozieClient oozieClient = ((OozieBootstrap) HadoopBootstrap.INSTANCE.getService(Component.OOZIE))
            .getOozieClient();

    Path appPath = new Path(hdfsFs.getHomeDirectory(), "testApp");
    hdfsFs.mkdirs(new Path(appPath, "lib"));
    Path workflow = new Path(appPath, "workflow.xml");

    //write workflow.xml
    String wfApp = "<workflow-app xmlns='uri:oozie:workflow:0.1' name='test-wf'>" + "    <start to='end'/>"
            + "    <end name='end'/>" + "</workflow-app>";

    Writer writer = new OutputStreamWriter(hdfsFs.create(workflow));
    writer.write(wfApp);
    writer.close();

    //write job.properties
    Properties conf = oozieClient.createConfiguration();
    conf.setProperty(OozieClient.APP_PATH, workflow.toString());
    conf.setProperty(OozieClient.USER_NAME, UserGroupInformation.getCurrentUser().getUserName());

    //submit and check
    final String jobId = oozieClient.submit(conf);
    WorkflowJob wf = oozieClient.getJobInfo(jobId);
    assertNotNull(wf);
    assertEquals(WorkflowJob.Status.PREP, wf.getStatus());

    LOGGER.info("OOZIE: Workflow: {}", wf.toString());
    hdfsFs.close();
    assertThat("true").isEqualTo("true");

}

From source file:fr.jetoile.hadoopunit.integrationtest.IntegrationBootstrapTest.java

License:Apache License

@Test
public void hdfsShouldStart() throws Exception {

    assertThat(Utils.available("127.0.0.1", configuration.getInt(Config.HDFS_NAMENODE_HTTP_PORT_KEY)))
            .isFalse();/*from   w  ww.  j av a2 s .c o  m*/

    //        org.apache.hadoop.conf.Configuration conf = new org.apache.hadoop.conf.Configuration();
    //        conf.set("fs.default.name", "hdfs://127.0.0.1:" + configuration.getInt(Config.HDFS_NAMENODE_PORT_KEY));
    //
    //        URI uri = URI.create ("hdfs://127.0.0.1:" + configuration.getInt(Config.HDFS_NAMENODE_PORT_KEY));
    //
    //        FileSystem hdfsFsHandle = FileSystem.get (uri, conf);
    FileSystem hdfsFsHandle = HdfsUtils.INSTANCE.getFileSystem();

    FSDataOutputStream writer = hdfsFsHandle
            .create(new Path(configuration.getString(Config.HDFS_TEST_FILE_KEY)));
    writer.writeUTF(configuration.getString(Config.HDFS_TEST_STRING_KEY));
    writer.close();

    // Read the file and compare to test string
    FSDataInputStream reader = hdfsFsHandle.open(new Path(configuration.getString(Config.HDFS_TEST_FILE_KEY)));
    assertEquals(reader.readUTF(), configuration.getString(Config.HDFS_TEST_STRING_KEY));
    reader.close();
    hdfsFsHandle.close();

    URL url = new URL(String.format("http://localhost:%s/webhdfs/v1?op=GETHOMEDIRECTORY&user.name=guest",
            configuration.getInt(Config.HDFS_NAMENODE_HTTP_PORT_KEY)));
    URLConnection connection = url.openConnection();
    connection.setRequestProperty("Accept-Charset", "UTF-8");
    BufferedReader response = new BufferedReader(new InputStreamReader(connection.getInputStream()));
    String line = response.readLine();
    response.close();
    assertThat("{\"Path\":\"/user/guest\"}").isEqualTo(line);

}

From source file:fr.jetoile.hadoopunit.integrationtest.IntegrationBootstrapTest.java

License:Apache License

@Test
public void oozieShouldStart() throws Exception {
    LOGGER.info("OOZIE: Test Submit Workflow Start");

    org.apache.hadoop.conf.Configuration conf = new org.apache.hadoop.conf.Configuration();
    conf.set("fs.default.name", "hdfs://127.0.0.1:" + configuration.getInt(Config.HDFS_NAMENODE_PORT_KEY));

    URI uri = URI.create("hdfs://127.0.0.1:" + configuration.getInt(Config.HDFS_NAMENODE_PORT_KEY));

    FileSystem hdfsFs = FileSystem.get(uri, conf);

    OozieClient oozieClient = new OozieClient("http://" + configuration.getString(OozieBootstrap.OOZIE_HOST)
            + ":" + configuration.getInt(OozieBootstrap.OOZIE_PORT) + "/oozie");

    Path appPath = new Path(hdfsFs.getHomeDirectory(), "testApp");
    hdfsFs.mkdirs(new Path(appPath, "lib"));
    Path workflow = new Path(appPath, "workflow.xml");

    //write workflow.xml
    String wfApp = "<workflow-app xmlns='uri:oozie:workflow:0.1' name='test-wf'>" + "    <start to='end'/>"
            + "    <end name='end'/>" + "</workflow-app>";

    Writer writer = new OutputStreamWriter(hdfsFs.create(workflow));
    writer.write(wfApp);//from  w  ww  .  j  a va 2 s .  c o m
    writer.close();

    //write job.properties
    Properties oozieConf = oozieClient.createConfiguration();
    oozieConf.setProperty(OozieClient.APP_PATH, workflow.toString());
    oozieConf.setProperty(OozieClient.USER_NAME, UserGroupInformation.getCurrentUser().getUserName());

    //submit and check
    final String jobId = oozieClient.submit(oozieConf);
    WorkflowJob wf = oozieClient.getJobInfo(jobId);
    Assert.assertNotNull(wf);
    assertEquals(WorkflowJob.Status.PREP, wf.getStatus());

    LOGGER.info("OOZIE: Workflow: {}", wf.toString());
    hdfsFs.close();

}

From source file:fr.jetoile.hadoopunit.integrationtest.ManualIntegrationBootstrapTest.java

License:Apache License

@Test
public void hdfsShouldStart() throws Exception {

    //        assertThat(Utils.available("127.0.0.1", configuration.getInt(Config.HDFS_NAMENODE_HTTP_PORT_KEY))).isFalse();
    ///*from   ww w .ja  v  a 2 s .  c o  m*/
    //        org.apache.hadoop.conf.Configuration conf = new org.apache.hadoop.conf.Configuration();
    //        conf.set("fs.default.name", "hdfs://127.0.0.1:" + configuration.getInt(Config.HDFS_NAMENODE_PORT_KEY));
    //
    //        URI uri = URI.create ("hdfs://127.0.0.1:" + configuration.getInt(Config.HDFS_NAMENODE_PORT_KEY));
    //
    //        FileSystem hdfsFsHandle = FileSystem.get (uri, conf);
    FileSystem hdfsFsHandle = HdfsUtils.INSTANCE.getFileSystem();

    FSDataOutputStream writer = hdfsFsHandle
            .create(new Path(configuration.getString(Config.HDFS_TEST_FILE_KEY)));
    writer.writeUTF(configuration.getString(Config.HDFS_TEST_STRING_KEY));
    writer.close();

    // Read the file and compare to test string
    FSDataInputStream reader = hdfsFsHandle.open(new Path(configuration.getString(Config.HDFS_TEST_FILE_KEY)));
    assertEquals(reader.readUTF(), configuration.getString(Config.HDFS_TEST_STRING_KEY));
    reader.close();
    hdfsFsHandle.close();

    URL url = new URL(String.format("http://localhost:%s/webhdfs/v1?op=GETHOMEDIRECTORY&user.name=guest",
            configuration.getInt(Config.HDFS_NAMENODE_HTTP_PORT_KEY)));
    URLConnection connection = url.openConnection();
    connection.setRequestProperty("Accept-Charset", "UTF-8");
    BufferedReader response = new BufferedReader(new InputStreamReader(connection.getInputStream()));
    String line = response.readLine();
    response.close();
    assertThat("{\"Path\":\"/user/guest\"}").isEqualTo(line);

}

From source file:fr.jetoile.hadoopunit.integrationtest.ManualIntegrationBootstrapTest.java

License:Apache License

@Test
public void oozieShouldStart() throws Exception {

    LOGGER.info("OOZIE: Test Submit Workflow Start");

    org.apache.hadoop.conf.Configuration conf = new org.apache.hadoop.conf.Configuration();
    conf.set("fs.default.name", "hdfs://127.0.0.1:" + configuration.getInt(Config.HDFS_NAMENODE_PORT_KEY));

    URI uri = URI.create("hdfs://127.0.0.1:" + configuration.getInt(Config.HDFS_NAMENODE_PORT_KEY));

    FileSystem hdfsFs = FileSystem.get(uri, conf);

    OozieClient oozieClient = new OozieClient("http://" + configuration.getString(OozieBootstrap.OOZIE_HOST)
            + ":" + configuration.getInt(OozieBootstrap.OOZIE_PORT) + "/oozie");

    Path appPath = new Path(hdfsFs.getHomeDirectory(), "testApp");
    hdfsFs.mkdirs(new Path(appPath, "lib"));
    Path workflow = new Path(appPath, "workflow.xml");

    //write workflow.xml
    String wfApp = "<workflow-app xmlns='uri:oozie:workflow:0.1' name='test-wf'>" + "    <start to='end'/>"
            + "    <end name='end'/>" + "</workflow-app>";

    Writer writer = new OutputStreamWriter(hdfsFs.create(workflow));
    writer.write(wfApp);//from www  .j av a 2 s.co  m
    writer.close();

    //write job.properties
    Properties oozieConf = oozieClient.createConfiguration();
    oozieConf.setProperty(OozieClient.APP_PATH, workflow.toString());
    oozieConf.setProperty(OozieClient.USER_NAME, UserGroupInformation.getCurrentUser().getUserName());

    //submit and check
    final String jobId = oozieClient.submit(oozieConf);
    WorkflowJob wf = oozieClient.getJobInfo(jobId);
    Assert.assertNotNull(wf);
    assertEquals(WorkflowJob.Status.PREP, wf.getStatus());

    LOGGER.info("OOZIE: Workflow: {}", wf.toString());
    hdfsFs.close();

}

From source file:Hdfs_Operations.HdfsCreateDirectory.java

@Override
public int run(String[] args) throws Exception {
    FileSystem fs = FileSystem.get(getConf());
    Path path = new Path(args[0]);
    fs.mkdirs(path);//ww  w .j a v a 2 s .  c  o  m
    fs.close();

    return 0;
}