Example usage for org.apache.hadoop.fs FileSystem close

List of usage examples for org.apache.hadoop.fs FileSystem close

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem close.

Prototype

@Override
public void close() throws IOException 

Source Link

Document

Close this FileSystem instance.

Usage

From source file:tv.icntv.recommend.common.HadoopUtils.java

License:Apache License

public static long count(Path path, PathFilter filter) throws Exception {
    FileSystem fileSystem = null;

    try {//from w  w  w.  j a  v a  2  s.c  o  m
        fileSystem = FileSystem.get(configuration);
        FileStatus[] fs = fileSystem.listStatus(path, filter);
        long count = 0;
        for (FileStatus f : fs) {
            SequenceFile.Reader frequentPatternsReader = new SequenceFile.Reader(fileSystem, f.getPath(),
                    configuration);
            Text key = new Text();
            while (frequentPatternsReader.next(key)) {
                count++;
            }
            frequentPatternsReader.close();
        }
        return count;
    } catch (Exception e) {
        throw e;
    } finally {
        if (null != fileSystem) {
            fileSystem.close();
        }
    }
}

From source file:tv.icntv.recommend.common.HadoopUtils.java

License:Apache License

public static void main(String[] args) throws IOException {
    FileSystem fileSystem = null;
    try {/*from w  w w .j av a  2s .c  o m*/
        fileSystem = FileSystem.get(configuration);
        FileStatus[] list = fileSystem.listStatus(new Path("/user/hadoop/chinacache/"));
        for (FileStatus status : list) {
            System.out.println(status.getPath() + "\t");
        }
    } catch (IOException e) {
        e.printStackTrace(); //To change body of catch statement use File | Settings | File Templates.
    } finally {
        if (null != fileSystem) {
            fileSystem.close();
        }
    }
}

From source file:voldemort.store.readonly.fetcher.HdfsFetcher.java

License:Apache License

public File fetch(String sourceFileUrl, String destinationFile, String hadoopConfigPath) throws IOException {
    if (this.globalThrottleLimit != null) {
        if (this.globalThrottleLimit.getSpeculativeRate() < this.minBytesPerSecond)
            throw new VoldemortException("Too many push jobs.");
        this.globalThrottleLimit.incrementNumJobs();
    }// w  ww.j a va  2s.  com

    ObjectName jmxName = null;
    try {

        final Configuration config = new Configuration();
        FileSystem fs = null;
        config.setInt("io.socket.receive.buffer", bufferSize);
        config.set("hadoop.rpc.socket.factory.class.ClientProtocol", ConfigurableSocketFactory.class.getName());
        config.set("hadoop.security.group.mapping", "org.apache.hadoop.security.ShellBasedUnixGroupsMapping");

        final Path path = new Path(sourceFileUrl);

        boolean isHftpBasedFetch = sourceFileUrl.length() > 4 && sourceFileUrl.substring(0, 4).equals("hftp");
        logger.info("URL : " + sourceFileUrl + " and hftp protocol enabled = " + isHftpBasedFetch);
        logger.info("Hadoop path = " + hadoopConfigPath + " , keytab path = " + HdfsFetcher.keytabPath
                + " , kerberos principal = " + HdfsFetcher.kerberosPrincipal);

        if (hadoopConfigPath.length() > 0 && !isHftpBasedFetch) {

            config.addResource(new Path(hadoopConfigPath + "/core-site.xml"));
            config.addResource(new Path(hadoopConfigPath + "/hdfs-site.xml"));

            String security = config.get(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION);

            if (security == null || !security.equals("kerberos")) {
                logger.error("Security isn't turned on in the conf: "
                        + CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION + " = "
                        + config.get(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION));
                logger.error("Please make sure that the Hadoop config directory path is valid.");
                return null;
            } else {
                logger.info("Security is turned on in the conf. Trying to authenticate ...");

            }
        }

        try {

            if (HdfsFetcher.keytabPath.length() > 0 && !isHftpBasedFetch) {

                if (!new File(HdfsFetcher.keytabPath).exists()) {
                    logger.error("Invalid keytab file path. Please provide a valid keytab path");
                    return null;
                }

                // First login using the specified principal and keytab file
                UserGroupInformation.setConfiguration(config);
                UserGroupInformation.loginUserFromKeytab(HdfsFetcher.kerberosPrincipal, HdfsFetcher.keytabPath);

                /*
                 * If login is successful, get the filesystem object. NOTE:
                 * Ideally we do not need a doAs block for this. Consider
                 * removing it in the future once the Hadoop jars have the
                 * corresponding patch (tracked in the Hadoop Apache
                 * project: HDFS-3367)
                 */
                try {
                    logger.info("I've logged in and am now Doasing as "
                            + UserGroupInformation.getCurrentUser().getUserName());
                    fs = UserGroupInformation.getCurrentUser()
                            .doAs(new PrivilegedExceptionAction<FileSystem>() {

                                public FileSystem run() throws Exception {
                                    FileSystem fs = path.getFileSystem(config);
                                    return fs;
                                }
                            });
                } catch (InterruptedException e) {
                    logger.error(e.getMessage());
                } catch (Exception e) {
                    logger.error("Got an exception while getting the filesystem object: ");
                    logger.error("Exception class : " + e.getClass());
                    e.printStackTrace();
                    for (StackTraceElement et : e.getStackTrace()) {
                        logger.error(et.toString());
                    }
                }
            } else {
                fs = path.getFileSystem(config);
            }

        } catch (IOException e) {
            e.printStackTrace();
            logger.error("Error in authenticating or getting the Filesystem object !!!");
            return null;
        }

        CopyStats stats = new CopyStats(sourceFileUrl, sizeOfPath(fs, path));
        jmxName = JmxUtils.registerMbean("hdfs-copy-" + copyCount.getAndIncrement(), stats);
        File destination = new File(destinationFile);

        if (destination.exists()) {
            throw new VoldemortException(
                    "Version directory " + destination.getAbsolutePath() + " already exists");
        }

        logger.info("Starting fetch for : " + sourceFileUrl);
        boolean result = fetch(fs, path, destination, stats);
        logger.info("Completed fetch : " + sourceFileUrl);

        // Close the filesystem
        fs.close();

        if (result) {
            return destination;
        } else {
            return null;
        }
    } catch (IOException e) {
        logger.error("Error while getting Hadoop filesystem : " + e);
        return null;
    } finally {
        if (this.globalThrottleLimit != null) {
            this.globalThrottleLimit.decrementNumJobs();
        }
        if (jmxName != null)
            JmxUtils.unregisterMbean(jmxName);
    }
}

From source file:voldemort.store.readonly.fetcher.HdfsFetcher.java

License:Apache License

public static void main(String[] args) throws Exception {
    if (args.length < 1)
        Utils.croak("USAGE: java " + HdfsFetcher.class.getName()
                + " url [keytab location] [kerberos username] [hadoop-config-path]");
    String url = args[0];/*from w  ww  .  j a va  2  s .  c  o m*/

    String keytabLocation = "";
    String kerberosUser = "";
    String hadoopPath = "";
    if (args.length == 4) {
        keytabLocation = args[1];
        kerberosUser = args[2];
        hadoopPath = args[3];
    }

    long maxBytesPerSec = 1024 * 1024 * 1024;
    Path p = new Path(url);

    final Configuration config = new Configuration();
    final URI uri = new URI(url);
    config.setInt("io.file.buffer.size", VoldemortConfig.DEFAULT_BUFFER_SIZE);
    config.set("hadoop.rpc.socket.factory.class.ClientProtocol", ConfigurableSocketFactory.class.getName());
    config.setInt("io.socket.receive.buffer", 1 * 1024 * 1024 - 10000);

    FileSystem fs = null;
    p = new Path(url);
    HdfsFetcher.keytabPath = keytabLocation;
    HdfsFetcher.kerberosPrincipal = kerberosUser;

    boolean isHftpBasedFetch = url.length() > 4 && url.substring(0, 4).equals("hftp");
    logger.info("URL : " + url + " and hftp protocol enabled = " + isHftpBasedFetch);

    if (hadoopPath.length() > 0 && !isHftpBasedFetch) {
        config.set("hadoop.security.group.mapping", "org.apache.hadoop.security.ShellBasedUnixGroupsMapping");

        config.addResource(new Path(hadoopPath + "/core-site.xml"));
        config.addResource(new Path(hadoopPath + "/hdfs-site.xml"));

        String security = config.get(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION);

        if (security == null || !security.equals("kerberos")) {
            logger.info("Security isn't turned on in the conf: "
                    + CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION + " = "
                    + config.get(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION));
            logger.info("Fix that.  Exiting.");
            return;
        } else {
            logger.info("Security is turned on in the conf. Trying to authenticate ...");
        }
    }

    try {

        // Get the filesystem object
        if (keytabLocation.length() > 0 && !isHftpBasedFetch) {
            UserGroupInformation.setConfiguration(config);
            UserGroupInformation.loginUserFromKeytab(kerberosUser, keytabLocation);

            final Path path = p;
            try {
                logger.debug("I've logged in and am now Doasing as "
                        + UserGroupInformation.getCurrentUser().getUserName());
                fs = UserGroupInformation.getCurrentUser().doAs(new PrivilegedExceptionAction<FileSystem>() {

                    public FileSystem run() throws Exception {
                        FileSystem fs = path.getFileSystem(config);
                        return fs;
                    }
                });
            } catch (InterruptedException e) {
                logger.error(e.getMessage());
            } catch (Exception e) {
                logger.error("Got an exception while getting the filesystem object: ");
                logger.error("Exception class : " + e.getClass());
                e.printStackTrace();
                for (StackTraceElement et : e.getStackTrace()) {
                    logger.error(et.toString());
                }
            }
        } else {
            fs = p.getFileSystem(config);
        }

    } catch (IOException e) {
        e.printStackTrace();
        System.err.println("Error in getting Hadoop filesystem object !!! Exiting !!!");
        System.exit(-1);
    }

    FileStatus status = fs.listStatus(p)[0];
    long size = status.getLen();
    HdfsFetcher fetcher = new HdfsFetcher(null, maxBytesPerSec, VoldemortConfig.REPORTING_INTERVAL_BYTES,
            VoldemortConfig.DEFAULT_BUFFER_SIZE, 0, keytabLocation, kerberosUser);
    long start = System.currentTimeMillis();

    File location = fetcher.fetch(url, System.getProperty("java.io.tmpdir") + File.separator + start,
            hadoopPath);

    double rate = size * Time.MS_PER_SECOND / (double) (System.currentTimeMillis() - start);
    NumberFormat nf = NumberFormat.getInstance();
    nf.setMaximumFractionDigits(2);
    System.out.println(
            "Fetch to " + location + " completed: " + nf.format(rate / (1024.0 * 1024.0)) + " MB/sec.");
    fs.close();
}