Example usage for org.apache.hadoop.fs FileSystem close

List of usage examples for org.apache.hadoop.fs FileSystem close

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem close.

Prototype

@Override
public void close() throws IOException 

Source Link

Document

Close this FileSystem instance.

Usage

From source file:org.apache.apex.malhar.lib.parser.XmlParser.java

License:Apache License

@Override
public void setup(com.datatorrent.api.Context.OperatorContext context) {
    try {//from  w  w w . j a  v  a 2s.c  o  m
        if (schemaXSDFile != null) {
            Path filePath = new Path(schemaXSDFile);
            Configuration configuration = new Configuration();
            FileSystem fs = FileSystem.newInstance(filePath.toUri(), configuration);
            FSDataInputStream inputStream = fs.open(filePath);

            SchemaFactory factory = SchemaFactory.newInstance(XMLConstants.W3C_XML_SCHEMA_NS_URI);
            schema = factory.newSchema(new StreamSource(inputStream));
            validator = schema.newValidator();
            fs.close();
        }
    } catch (SAXException e) {
        DTThrowable.wrapIfChecked(e);
    } catch (IOException e) {
        DTThrowable.wrapIfChecked(e);
    }
}

From source file:org.apache.asterix.app.external.ExternalIndexingOperations.java

License:Apache License

public static ArrayList<ExternalFile> getSnapshotFromExternalFileSystem(Dataset dataset)
        throws AlgebricksException {
    ArrayList<ExternalFile> files = new ArrayList<ExternalFile>();
    ExternalDatasetDetails datasetDetails = (ExternalDatasetDetails) dataset.getDatasetDetails();
    try {// w  w w  . j  a v a 2 s. c om
        // Create the file system object
        FileSystem fs = getFileSystemObject(datasetDetails.getProperties());
        // Get paths of dataset
        String path = datasetDetails.getProperties().get(ExternalDataConstants.KEY_PATH);
        String[] paths = path.split(",");

        // Add fileStatuses to files
        for (String aPath : paths) {
            FileStatus[] fileStatuses = fs.listStatus(new Path(aPath));
            for (int i = 0; i < fileStatuses.length; i++) {
                int nextFileNumber = files.size();
                if (fileStatuses[i].isDirectory()) {
                    listSubFiles(dataset, fs, fileStatuses[i], files);
                } else {
                    files.add(new ExternalFile(dataset.getDataverseName(), dataset.getDatasetName(),
                            nextFileNumber, fileStatuses[i].getPath().toUri().getPath(),
                            new Date(fileStatuses[i].getModificationTime()), fileStatuses[i].getLen(),
                            ExternalFilePendingOp.PENDING_NO_OP));
                }
            }
        }
        // Close file system
        fs.close();
        if (files.size() == 0) {
            throw new AlgebricksException("File Snapshot retrieved from external file system is empty");
        }
        return files;
    } catch (Exception e) {
        e.printStackTrace();
        throw new AlgebricksException("Unable to get list of HDFS files " + e);
    }
}

From source file:org.apache.asterix.external.util.HDFSUtils.java

License:Apache License

/**
 * Instead of creating the split using the input format, we do it manually
 * This function returns fileSplits (1 per hdfs file block) irrespective of the number of partitions
 * and the produced splits only cover intersection between current files in hdfs and files stored internally
 * in AsterixDB/* ww  w .  ja va 2  s.  c o m*/
 * 1. NoOp means appended file
 * 2. AddOp means new file
 * 3. UpdateOp means the delta of a file
 * @return
 * @throws IOException
 */
public static InputSplit[] getSplits(JobConf conf, List<ExternalFile> files) throws IOException {
    // Create file system object
    FileSystem fs = FileSystem.get(conf);
    ArrayList<FileSplit> fileSplits = new ArrayList<FileSplit>();
    ArrayList<ExternalFile> orderedExternalFiles = new ArrayList<ExternalFile>();
    // Create files splits
    for (ExternalFile file : files) {
        Path filePath = new Path(file.getFileName());
        FileStatus fileStatus;
        try {
            fileStatus = fs.getFileStatus(filePath);
        } catch (FileNotFoundException e) {
            // file was deleted at some point, skip to next file
            continue;
        }
        if (file.getPendingOp() == ExternalFilePendingOp.PENDING_ADD_OP
                && fileStatus.getModificationTime() == file.getLastModefiedTime().getTime()) {
            // Get its information from HDFS name node
            BlockLocation[] fileBlocks = fs.getFileBlockLocations(fileStatus, 0, file.getSize());
            // Create a split per block
            for (BlockLocation block : fileBlocks) {
                if (block.getOffset() < file.getSize()) {
                    fileSplits.add(new FileSplit(filePath, block.getOffset(),
                            (block.getLength() + block.getOffset()) < file.getSize() ? block.getLength()
                                    : (file.getSize() - block.getOffset()),
                            block.getHosts()));
                    orderedExternalFiles.add(file);
                }
            }
        } else if (file.getPendingOp() == ExternalFilePendingOp.PENDING_NO_OP
                && fileStatus.getModificationTime() == file.getLastModefiedTime().getTime()) {
            long oldSize = 0L;
            long newSize = file.getSize();
            for (int i = 0; i < files.size(); i++) {
                if (files.get(i).getFileName() == file.getFileName()
                        && files.get(i).getSize() != file.getSize()) {
                    newSize = files.get(i).getSize();
                    oldSize = file.getSize();
                    break;
                }
            }

            // Get its information from HDFS name node
            BlockLocation[] fileBlocks = fs.getFileBlockLocations(fileStatus, 0, newSize);
            // Create a split per block
            for (BlockLocation block : fileBlocks) {
                if (block.getOffset() + block.getLength() > oldSize) {
                    if (block.getOffset() < newSize) {
                        // Block interact with delta -> Create a split
                        long startCut = (block.getOffset() > oldSize) ? 0L : oldSize - block.getOffset();
                        long endCut = (block.getOffset() + block.getLength() < newSize) ? 0L
                                : block.getOffset() + block.getLength() - newSize;
                        long splitLength = block.getLength() - startCut - endCut;
                        fileSplits.add(new FileSplit(filePath, block.getOffset() + startCut, splitLength,
                                block.getHosts()));
                        orderedExternalFiles.add(file);
                    }
                }
            }
        }
    }
    fs.close();
    files.clear();
    files.addAll(orderedExternalFiles);
    return fileSplits.toArray(new FileSplit[fileSplits.size()]);
}

From source file:org.apache.asterix.metadata.utils.ExternalIndexingOperations.java

License:Apache License

public static List<ExternalFile> getSnapshotFromExternalFileSystem(Dataset dataset) throws AlgebricksException {
    ArrayList<ExternalFile> files = new ArrayList<>();
    ExternalDatasetDetails datasetDetails = (ExternalDatasetDetails) dataset.getDatasetDetails();
    try {/*  w w  w . j a  va 2 s  .c o  m*/
        // Create the file system object
        FileSystem fs = getFileSystemObject(datasetDetails.getProperties());
        // Get paths of dataset
        String path = datasetDetails.getProperties().get(ExternalDataConstants.KEY_PATH);
        String[] paths = path.split(",");

        // Add fileStatuses to files
        for (String aPath : paths) {
            FileStatus[] fileStatuses = fs.listStatus(new Path(aPath));
            for (int i = 0; i < fileStatuses.length; i++) {
                int nextFileNumber = files.size();
                handleFile(dataset, files, fs, fileStatuses[i], nextFileNumber);
            }
        }
        // Close file system
        fs.close();
        if (files.isEmpty()) {
            throw new AlgebricksException("File Snapshot retrieved from external file system is empty");
        }
        return files;
    } catch (Exception e) {
        LOGGER.log(Level.WARNING, "Exception while trying to get snapshot from external system", e);
        throw new AlgebricksException("Unable to get list of HDFS files " + e);
    }
}

From source file:org.apache.blur.HdfsMiniClusterUtil.java

License:Apache License

public static void shutdownDfs(MiniDFSCluster cluster) {
    if (cluster != null) {
        LOG.info("Shutting down Mini DFS ");
        try {//  w  ww  . j  av  a  2 s  . c  o  m
            cluster.shutdown();
        } catch (Exception e) {
            // / Can get a java.lang.reflect.UndeclaredThrowableException thrown
            // here because of an InterruptedException. Don't let exceptions in
            // here be cause of test failure.
        }
        try {
            FileSystem fs = cluster.getFileSystem();
            if (fs != null) {
                LOG.info("Shutting down FileSystem");
                fs.close();
            }
            FileSystem.closeAll();
        } catch (IOException e) {
            LOG.error("error closing file system", e);
        }

        // This has got to be one of the worst hacks I have ever had to do.
        // This is needed to shutdown 2 thread pools that are not shutdown by
        // themselves.
        ThreadGroup threadGroup = Thread.currentThread().getThreadGroup();
        Thread[] threads = new Thread[100];
        int enumerate = threadGroup.enumerate(threads);
        for (int i = 0; i < enumerate; i++) {
            Thread thread = threads[i];
            if (thread.getName().startsWith("pool")) {
                if (thread.isAlive()) {
                    thread.interrupt();
                    LOG.info("Stopping ThreadPoolExecutor [" + thread.getName() + "]");
                    Object target = getField(Thread.class, thread, "target");
                    if (target != null) {
                        ThreadPoolExecutor e = (ThreadPoolExecutor) getField(ThreadPoolExecutor.class, target,
                                "this$0");
                        if (e != null) {
                            e.shutdownNow();
                        }
                    }
                    try {
                        LOG.info("Waiting for thread pool to exit [" + thread.getName() + "]");
                        thread.join();
                    } catch (InterruptedException e) {
                        throw new RuntimeException(e);
                    }
                }
            }
        }
    }
}

From source file:org.apache.blur.MiniCluster.java

License:Apache License

public void shutdownDfs() {
    if (cluster != null) {
        LOG.info("Shutting down Mini DFS ");
        try {//from  w w  w. j a va 2  s  .com
            cluster.shutdown();
        } catch (Exception e) {
            // / Can get a java.lang.reflect.UndeclaredThrowableException thrown
            // here because of an InterruptedException. Don't let exceptions in
            // here be cause of test failure.
        }
        try {
            FileSystem fs = cluster.getFileSystem();
            if (fs != null) {
                LOG.info("Shutting down FileSystem");
                fs.close();
            }
            FileSystem.closeAll();
        } catch (IOException e) {
            LOG.error("error closing file system", e);
        }

        // This has got to be one of the worst hacks I have ever had to do.
        // This is needed to shutdown 2 thread pools that are not shutdown by
        // themselves.
        ThreadGroup threadGroup = group;
        Thread[] threads = new Thread[100];
        int enumerate = threadGroup.enumerate(threads);
        for (int i = 0; i < enumerate; i++) {
            Thread thread = threads[i];
            if (thread.getName().startsWith("pool")) {
                if (thread.isAlive()) {
                    thread.interrupt();
                    LOG.info("Stopping ThreadPoolExecutor [" + thread.getName() + "]");
                    Object target = getField(Thread.class, thread, "target");
                    if (target != null) {
                        ThreadPoolExecutor e = (ThreadPoolExecutor) getField(ThreadPoolExecutor.class, target,
                                "this$0");
                        if (e != null) {
                            e.shutdownNow();
                        }
                    }
                    try {
                        LOG.info("Waiting for thread pool to exit [" + thread.getName() + "]");
                        thread.join();
                    } catch (InterruptedException e) {
                        throw new RuntimeException(e);
                    }
                }
            }
        }
    }
}

From source file:org.apache.coheigea.bigdata.hdfs.HDFSAccessControlEnforcerTest.java

License:Apache License

@org.junit.Test
public void customPermissionsTest() throws Exception {
    FileSystem fileSystem = hdfsCluster.getFileSystem();

    // Write a file - the AccessControlEnforcer won't be invoked as we are the "superuser"
    final Path file = new Path("/tmp/tmpdir/data-file2");
    FSDataOutputStream out = fileSystem.create(file);
    for (int i = 0; i < 1024; ++i) {
        out.write(("data" + i + "\n").getBytes("UTF-8"));
        out.flush();//from  w  w  w . j  a v  a2 s.c  o  m
    }
    out.close();

    // Now try to read the file as "bob" - this should be allowed
    UserGroupInformation ugi = UserGroupInformation.createRemoteUser("bob");
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        public Void run() throws Exception {
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", defaultFs);

            FileSystem fs = FileSystem.get(conf);

            // Read the file
            FSDataInputStream in = fs.open(file);
            ByteArrayOutputStream output = new ByteArrayOutputStream();
            IOUtils.copy(in, output);
            String content = new String(output.toByteArray());
            Assert.assertTrue(content.startsWith("data0"));

            fs.close();
            return null;
        }
    });

    // Now try to read the file as "eve" - this should not be allowed
    ugi = UserGroupInformation.createRemoteUser("eve");
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        public Void run() throws Exception {
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", defaultFs);

            FileSystem fs = FileSystem.get(conf);

            // Read the file
            try {
                fs.open(file);
                Assert.fail("Failure expected on an incorrect permission");
            } catch (AccessControlException ex) {
                // expected
            }

            fs.close();
            return null;
        }
    });

    // Write to the file as the owner, this should be allowed
    out = fileSystem.append(file);
    out.write(("new data\n").getBytes("UTF-8"));
    out.flush();
    out.close();

    // Now try to write to the file as "bob" - this should not be allowed
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        public Void run() throws Exception {
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", defaultFs);

            FileSystem fs = FileSystem.get(conf);

            // Write to the file
            try {
                fs.append(file);
                Assert.fail("Failure expected on an incorrect permission");
            } catch (AccessControlException ex) {
                // expected
            }

            fs.close();
            return null;
        }
    });
}

From source file:org.apache.coheigea.bigdata.hdfs.HDFSKerberosTest.java

License:Apache License

@org.junit.Test
public void readTest() throws Exception {
    FileSystem fileSystem = hdfsCluster.getFileSystem();

    // Write a file - the AccessControlEnforcer won't be invoked as we are the "superuser"
    final Path file = new Path("/tmp/tmpdir/data-file2");
    FSDataOutputStream out = fileSystem.create(file);
    for (int i = 0; i < 1024; ++i) {
        out.write(("data" + i + "\n").getBytes("UTF-8"));
        out.flush();//from   w  ww  .  j av a 2  s.c o m
    }
    out.close();

    // Change permissions to read-only
    fileSystem.setPermission(file, new FsPermission(FsAction.READ, FsAction.NONE, FsAction.NONE));

    // Now try to read the file as "bob" - this should be allowed (by the policy - user)
    final Configuration conf = new Configuration();
    conf.set("fs.defaultFS", defaultFs);
    conf.set("hadoop.security.authentication", "kerberos");
    UserGroupInformation.setConfiguration(conf);

    String basedir = System.getProperty("basedir");
    if (basedir == null) {
        basedir = new File(".").getCanonicalPath();
    }

    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        public Void run() throws Exception {
            FileSystem fs = FileSystem.get(conf);

            // Read the file
            FSDataInputStream in = fs.open(file);
            ByteArrayOutputStream output = new ByteArrayOutputStream();
            IOUtils.copy(in, output);
            String content = new String(output.toByteArray());
            Assert.assertTrue(content.startsWith("data0"));

            fs.close();
            return null;
        }
    });
}

From source file:org.apache.coheigea.bigdata.hdfs.HDFSTest.java

License:Apache License

@org.junit.Test
public void defaultPermissionsTest() throws Exception {
    FileSystem fileSystem = hdfsCluster.getFileSystem();

    // Write a file
    final Path file = new Path("/tmp/tmpdir/data-file2");
    FSDataOutputStream out = fileSystem.create(file);
    for (int i = 0; i < 1024; ++i) {
        out.write(("data" + i + "\n").getBytes("UTF-8"));
        out.flush();/*from   w  w w  . j  ava 2 s.  co  m*/
    }
    out.close();

    // Check status
    // FileStatus status = fileSystem.getFileStatus(file);
    // System.out.println("OWNER: " + status.getOwner());
    // System.out.println("GROUP: " + status.getGroup());
    // System.out.println("PERM: " + status.getPermission().toString());
    // fileSystem.setPermission(file, new FsPermission(FsAction.READ, FsAction.NONE, FsAction.NONE));
    // fileSystem.setOwner(file, "bob", null);

    // Now try to read the file as "bob" - this should be allowed
    UserGroupInformation ugi = UserGroupInformation.createRemoteUser("bob");
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        public Void run() throws Exception {
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", defaultFs);

            FileSystem fs = FileSystem.get(conf);

            // Read the file
            FSDataInputStream in = fs.open(file);
            ByteArrayOutputStream output = new ByteArrayOutputStream();
            IOUtils.copy(in, output);
            String content = new String(output.toByteArray());
            Assert.assertTrue(content.startsWith("data0"));

            fs.close();
            return null;
        }
    });

    // Write to the file as the owner, this should be allowed
    out = fileSystem.append(file);
    out.write(("new data\n").getBytes("UTF-8"));
    out.flush();
    out.close();

    // Now try to write to the file as "bob" - this should not be allowed
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        public Void run() throws Exception {
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", defaultFs);

            FileSystem fs = FileSystem.get(conf);

            // Write to the file
            try {
                fs.append(file);
                Assert.fail("Failure expected on an incorrect permission");
            } catch (AccessControlException ex) {
                // expected
            }

            fs.close();
            return null;
        }
    });
}

From source file:org.apache.coheigea.bigdata.hdfs.HDFSTest.java

License:Apache License

@org.junit.Test
public void testChangedPermissionsTest() throws Exception {
    FileSystem fileSystem = hdfsCluster.getFileSystem();

    // Write a file
    final Path file = new Path("/tmp/tmpdir/data-file3");
    FSDataOutputStream out = fileSystem.create(file);
    for (int i = 0; i < 1024; ++i) {
        out.write(("data" + i + "\n").getBytes("UTF-8"));
        out.flush();/* w  w  w . j  ava2  s  .  com*/
    }
    out.close();

    // Change permissions to read-only
    fileSystem.setPermission(file, new FsPermission(FsAction.READ, FsAction.NONE, FsAction.NONE));

    // Now try to read the file as "bob" - this should fail
    UserGroupInformation ugi = UserGroupInformation.createRemoteUser("bob");
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        public Void run() throws Exception {
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", defaultFs);

            FileSystem fs = FileSystem.get(conf);

            // Read the file
            try {
                FSDataInputStream in = fs.open(file);
                ByteArrayOutputStream output = new ByteArrayOutputStream();
                IOUtils.copy(in, output);
                Assert.fail("Failure expected on an incorrect permission");
            } catch (AccessControlException ex) {
                // expected
            }

            fs.close();
            return null;
        }
    });

}