Example usage for org.apache.hadoop.fs FileSystem close

List of usage examples for org.apache.hadoop.fs FileSystem close

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem close.

Prototype

@Override
public void close() throws IOException 

Source Link

Document

Close this FileSystem instance.

Usage

From source file:com.ml.ira.algos.LogisticModelParameters.java

License:Apache License

/**
 * Saves a model to an output stream.//  ww w  . ja  v a 2 s . c om
 */
public void saveTo(Path path) throws IOException {
    Closeables.close(lr, false);
    targetCategories = getCsvRecordFactory().getTargetCategories();
    FileSystem ofs = path.getFileSystem(new Configuration());
    FSDataOutputStream out = ofs.create(path, true);
    write(out);
    out.flush();
    ofs.close();
}

From source file:com.ml.ira.algos.LogisticModelParameters.java

License:Apache License

public static LogisticModelParameters loadFrom(Path path) throws IOException {
    FileSystem ofs = path.getFileSystem(new Configuration());
    if (!ofs.exists(path)) {
        throw new IOException(path.toString() + " does not exists. ");
    }/*from w  ww  . j av a  2  s.co m*/
    LogisticModelParameters result = new LogisticModelParameters();
    FSDataInputStream in = ofs.open(path);
    result.readFields(in);
    ofs.close();
    return result;
}

From source file:com.mozilla.grouperfish.text.Dictionary.java

License:Apache License

public static Set<String> loadDictionary(Path dictionaryPath) throws IOException {
    Set<String> dictionary = null;
    FileSystem fs = null;
    try {//  www.  j a v a2  s .c  o m
        fs = FileSystem.get(dictionaryPath.toUri(), new Configuration());
        dictionary = loadDictionary(fs, dictionaryPath);
    } finally {
        if (fs != null) {
            fs.close();
        }
    }

    return dictionary;
}

From source file:com.mozilla.grouperfish.text.Dictionary.java

License:Apache License

public static Map<String, Integer> loadFeatureIndex(Path dictionaryPath) throws IOException {
    Map<String, Integer> featureIndex = null;
    FileSystem fs = null;
    try {/*from   w  w  w.  j  a  va  2 s. c  o  m*/
        fs = FileSystem.get(dictionaryPath.toUri(), new Configuration());
        featureIndex = loadFeatureIndex(fs, dictionaryPath);
    } finally {
        if (fs != null) {
            fs.close();
        }
    }

    return featureIndex;
}

From source file:com.mozilla.grouperfish.text.Dictionary.java

License:Apache License

public static Map<Integer, String> loadInvertedFeatureIndex(Path dictionaryPath) throws IOException {
    Map<Integer, String> featureIndex = null;
    FileSystem fs = null;
    try {/*from   w  w w  .  j  a  v a  2s.  c  om*/
        fs = FileSystem.get(dictionaryPath.toUri(), new Configuration());
        featureIndex = loadInvertedFeatureIndex(fs, dictionaryPath);
    } finally {
        if (fs != null) {
            fs.close();
        }
    }

    return featureIndex;
}

From source file:com.mozilla.grouperfish.transforms.coclustering.text.Dictionary.java

License:Apache License

public static Map<Integer, String> loadInvertedIndexWithKeys(Path dictionaryPath) throws IOException {
    Map<Integer, String> index = null;
    FileSystem fs = null;
    try {//from   w w  w.  j  ava2s.c  om
        fs = FileSystem.get(dictionaryPath.toUri(), new Configuration());
        index = loadInvertedIndexWithKeys(fs, dictionaryPath);
    } finally {
        if (fs != null) {
            fs.close();
        }
    }
    return index;
}

From source file:com.mozilla.hadoop.UnknownPathFinder.java

License:Apache License

/**
 * Get all paths in HDFS under the HBase root directory up to region level depth
 * @param conf//from   www. j  av a  2s. com
 * @param hbaseRootDir
 * @return
 * @throws IOException
 */
public static Set<String> getFilesystemPaths(Configuration conf, Path hbaseRootDir) throws IOException {
    Set<String> fsPaths = null;

    FileSystem hdfs = null;
    try {
        hdfs = FileSystem.get(conf);
        fsPaths = getAllPaths(hdfs, hbaseRootDir, 0, 1);
    } finally {
        if (hdfs != null) {
            hdfs.close();
        }
    }

    LOG.info("# of Directories in filesystem: " + fsPaths.size());

    return fsPaths;
}

From source file:com.mozilla.hadoop.UnknownPathFinder.java

License:Apache License

/**
 * Deletes all of the paths specified//from w  ww.jav a 2  s. co  m
 * @param conf
 * @param paths
 * @return
 * @throws IOException
 */
public static boolean deleteFilesystemPaths(Configuration conf, Collection<String> paths) throws IOException {
    boolean success = true;

    FileSystem hdfs = null;
    try {
        hdfs = FileSystem.get(conf);
        for (String s : paths) {
            Path p = new Path(s);
            if (!hdfs.delete(p, true)) {
                LOG.info("Failed to delete: " + s);
                success = false;
                break;
            } else {
                LOG.info("Successfully deleted: " + s);
            }
        }
    } finally {
        if (hdfs != null) {
            hdfs.close();
        }
    }

    return success;
}

From source file:com.mozilla.socorro.hadoop.RawDumpSize.java

License:LGPL

public int run(String[] args) throws Exception {
    if (args.length != 1) {
        return printUsage();
    }/*from  w  ww . ja  v a2  s  .c  o  m*/

    int rc = -1;
    Job job = initJob(args);
    job.waitForCompletion(true);
    if (job.isSuccessful()) {
        rc = 0;
        FileSystem hdfs = null;
        DescriptiveStatistics rawStats = new DescriptiveStatistics();
        long rawTotal = 0L;
        DescriptiveStatistics processedStats = new DescriptiveStatistics();
        long processedTotal = 0L;
        try {
            hdfs = FileSystem.get(job.getConfiguration());
            Pattern tabPattern = Pattern.compile("\t");
            for (FileStatus status : hdfs.listStatus(FileOutputFormat.getOutputPath(job))) {
                if (!status.isDir()) {
                    BufferedReader reader = null;
                    try {
                        reader = new BufferedReader(new InputStreamReader(hdfs.open(status.getPath())));
                        String line = null;
                        while ((line = reader.readLine()) != null) {
                            String[] splits = tabPattern.split(line);
                            int byteSize = Integer.parseInt(splits[2]);
                            if ("raw".equals(splits[1])) {
                                rawStats.addValue(byteSize);
                                rawTotal += byteSize;
                            } else if ("processed".equals(splits[1])) {
                                processedStats.addValue(byteSize);
                                processedTotal += byteSize;
                            }
                        }
                    } finally {
                        if (reader != null) {
                            reader.close();
                        }
                    }
                }
            }
        } finally {
            if (hdfs != null) {
                hdfs.close();
            }
        }

        System.out.println("===== " + job.getConfiguration().get(START_DATE) + " raw_data:dump =====");
        System.out.println(String.format("Min: %.02f Max: %.02f Mean: %.02f", rawStats.getMin(),
                rawStats.getMax(), rawStats.getMean()));
        System.out.println(String.format("1st Quartile: %.02f 2nd Quartile: %.02f 3rd Quartile: %.02f",
                rawStats.getPercentile(25.0d), rawStats.getPercentile(50.0d), rawStats.getPercentile(75.0d)));
        System.out.println("Total Bytes: " + rawTotal);
        System.out.println("===== " + job.getConfiguration().get(START_DATE) + " processed_data:json =====");
        System.out.println(String.format("Min: %.02f Max: %.02f Mean: %.02f", processedStats.getMin(),
                processedStats.getMax(), processedStats.getMean()));
        System.out.println(String.format("1st Quartile: %.02f 2nd Quartile: %.02f 3rd Quartile: %.02f",
                processedStats.getPercentile(25.0d), processedStats.getPercentile(50.0d),
                processedStats.getPercentile(75.0d)));
        System.out.println("Total Bytes: " + processedTotal);
    }

    return rc;
}

From source file:com.mycompany.app.TestStagingDirectoryPermissions.java

License:Apache License

@Test
public void perms() throws IOException, InterruptedException {
    MiniDFSCluster minidfs = null;/*w  w  w  .jav a 2  s .  c o m*/
    FileSystem fs = null;
    MiniMRClientCluster minimr = null;
    try {
        Configuration conf = new Configuration(true);
        conf.set("fs.permission.umask-mode", "0077");
        minidfs = new MiniDFSCluster.Builder(conf).build();
        minidfs.waitActive();

        fs = minidfs.getFileSystem();
        conf.set(FileSystem.FS_DEFAULT_NAME_KEY, fs.getUri().toString());
        Path p = path("/in");
        fs.mkdirs(p);

        FSDataOutputStream os = fs.create(new Path(p, "input.txt"));
        os.write("hello!".getBytes("UTF-8"));
        os.close();

        String user = UserGroupInformation.getCurrentUser().getUserName();
        Path home = new Path("/User/" + user);
        fs.mkdirs(home);
        minimr = MiniMRClientClusterFactory.create(this.getClass(), 1, conf);
        JobConf job = new JobConf(minimr.getConfig());

        job.setJobName("PermsTest");
        JobClient client = new JobClient(job);
        FileInputFormat.addInputPath(job, p);
        FileOutputFormat.setOutputPath(job, path("/out"));
        job.setInputFormat(TextInputFormat.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Text.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);

        job.setMapperClass(MySleepMapper.class);

        job.setNumReduceTasks(1);
        RunningJob submittedJob = client.submitJob(job);

        // Sleep for a bit to let localization finish
        System.out.println("Sleeping...");
        Thread.sleep(3 * 1000l);
        System.out.println("Done sleeping...");
        assertFalse(UserGroupInformation.isSecurityEnabled());

        Path stagingRoot = path("/tmp/hadoop-yarn/staging/" + user + "/.staging/");
        assertTrue(fs.exists(stagingRoot));
        assertEquals(1, fs.listStatus(stagingRoot).length);
        Path staging = fs.listStatus(stagingRoot)[0].getPath();
        Path jobXml = path(staging + "/job.xml");

        assertTrue(fs.exists(jobXml));

        FileStatus fileStatus = fs.getFileStatus(jobXml);
        System.out.println("job.xml permission = " + fileStatus.getPermission());
        assertTrue(fileStatus.getPermission().getOtherAction().implies(FsAction.READ));
        assertTrue(fileStatus.getPermission().getGroupAction().implies(FsAction.READ));

        submittedJob.waitForCompletion();
    } finally {
        if (minimr != null) {
            minimr.stop();
        }
        if (fs != null) {
            fs.close();
        }
        if (minidfs != null) {
            minidfs.shutdown(true);
        }
    }
}