Example usage for org.apache.hadoop.fs FileSystem getFileStatus

List of usage examples for org.apache.hadoop.fs FileSystem getFileStatus

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem getFileStatus.

Prototype

public abstract FileStatus getFileStatus(Path f) throws IOException;

Source Link

Document

Return a file status object that represents the path.

Usage

From source file:fuse4j.hadoopfs.HdfsClientImpl.java

License:Apache License

@Override
public Object open(int uid, String path, int flags) {
    FileSystem dfs = null;
    try {/*from w  w  w. ja v a2s .c  o m*/
        dfs = getDfs(uid);
        //based on fuse_impls_open in C fuse_dfs
        // 0x8000 is always passed in and hadoop doesn't like it, so killing it here
        // bugbug figure out what this flag is and report problem to Hadoop JIRA
        int hdfs_flags = (flags & 0x7FFF);
        System.out.println("HDFS CLIENT OPEN FILE:" + path + " mode:" + Integer.toOctalString(hdfs_flags));

        //TODO: connect to DFS as calling user to enforce perms
        //see doConnectAsUser(dfs->nn_hostname, dfs->nn_port);

        if ((hdfs_flags & NativeIO.O_RDWR) == NativeIO.O_RDWR) {
            hdfs_flags ^= NativeIO.O_RDWR;
            try {
                FileStatus fileStatus = dfs.getFileStatus(new Path(path));
                if (this.newFiles.containsKey(path)) {
                    // just previously created by "mknod" so open it in write-mode
                    hdfs_flags |= NativeIO.O_WRONLY;
                } else {
                    // File exists; open this as read only.
                    hdfs_flags |= NativeIO.O_RDONLY;
                }
            } catch (IOException e) {
                // File does not exist (maybe?); interpret it as a O_WRONLY
                // If the actual error was something else, we'll get it again when
                // we try to open the file.
                hdfs_flags |= NativeIO.O_WRONLY;
            }
        }

        ///
        Path hPath = new Path(path);
        if ((hdfs_flags & NativeIO.O_WRONLY) == 0) {
            //READ
            System.out.println("HDFS <open> file:" + path);
            return new HdfsFileIoContext(path, dfs.open(hPath));
        } else if ((hdfs_flags & NativeIO.O_APPEND) != 0) {
            //WRITE/APPEND
            System.out.println("HDFS <append> file:" + path);
            return new HdfsFileIoContext(path, dfs.append(hPath));
        } else {
            //WRITE/CREATE
            System.out.println("HDFS <create> file:" + path);
            HdfsFileIoContext fh = this.newFiles.remove(path);
            if (fh == null) {
                fh = new HdfsFileIoContext(path, dfs.create(new Path(path), true));
                System.out.println("File " + path + "created");
            } else {
                System.out.println("File " + path + "already created by a previous <mknod> call");
            }
            System.out.println("files queued:" + this.newFiles.size());
            return fh;
        }
    } catch (Exception e) {
        // fall through to failure
    }
    return null;
}

From source file:gaffer.accumulo.utils.IngestUtils.java

License:Apache License

/**
 * Modify the permissions on a directory and its contents to allow Accumulo
 * access.//from w w  w. j  av  a 2  s.  co m
 * 
 * @param fs  The FileSystem where the directory is
 * @param dirPath  The path to the directory
 * @throws IOException
 */
public static void setDirectoryPermsForAccumulo(FileSystem fs, Path dirPath) throws IOException {
    if (!fs.getFileStatus(dirPath).isDir()) {
        throw new RuntimeException(dirPath + " is not a directory");
    }
    fs.setPermission(dirPath, ACC_DIR_PERMS);
    for (FileStatus file : fs.listStatus(dirPath)) {
        fs.setPermission(file.getPath(), ACC_FILE_PERMS);
    }
}

From source file:gaffer.accumulostore.utils.IngestUtils.java

License:Apache License

/**
 * Modify the permissions on a directory and its contents to allow Accumulo
 * access.//from   w  w  w  . j  a  v  a2 s. c o m
 * <p>
 *
 * @param fs      - The FileSystem in which to create the splits file
 * @param dirPath - The Path to the directory
 * @throws IOException for any IO issues interacting with the file system.
 */

public static void setDirectoryPermsForAccumulo(final FileSystem fs, final Path dirPath) throws IOException {
    if (!fs.getFileStatus(dirPath).isDirectory()) {
        throw new RuntimeException(dirPath + " is not a directory");
    }
    LOGGER.info("Setting permission {} on directory {} and all files within", ACC_DIR_PERMS, dirPath);
    fs.setPermission(dirPath, ACC_DIR_PERMS);
    for (final FileStatus file : fs.listStatus(dirPath)) {
        fs.setPermission(file.getPath(), ACC_FILE_PERMS);
    }
}

From source file:gobblin.compaction.event.CompactionSlaEventHelper.java

License:Apache License

private static long getPreviousPublishTime(Dataset dataset, FileSystem fs) {
    Path compactionCompletePath = new Path(dataset.outputPath(), MRCompactor.COMPACTION_COMPLETE_FILE_NAME);
    try {/*  w w w  . j ava 2s .  c  om*/
        return fs.getFileStatus(compactionCompletePath).getModificationTime();
    } catch (IOException e) {
        LOG.debug("Failed to get previous publish time.", e);
    }
    return -1l;
}

From source file:gobblin.data.management.copy.CopyContext.java

License:Apache License

/**
 * Get cached {@link FileStatus}.//  w  w w .j a  v a2s  .c  om
 */
public Optional<FileStatus> getFileStatus(final FileSystem fs, final Path path) throws IOException {
    try {
        return this.fileStatusCache.get(fs.makeQualified(path), new Callable<Optional<FileStatus>>() {
            @Override
            public Optional<FileStatus> call() throws Exception {
                try {
                    return Optional.of(fs.getFileStatus(path));
                } catch (FileNotFoundException fnfe) {
                    return Optional.absent();
                }
            }
        });
    } catch (ExecutionException ee) {
        throw new IOException(ee.getCause());
    }
}

From source file:gobblin.data.management.copy.writer.TarArchiveInputStreamDataWriterTest.java

License:Apache License

/**
 * Find the test compressed file <code><filePath/code> in classpath and read it as a {@link FileAwareInputStream}
 *///  w  ww  . jav a  2  s  . c o m
private FileAwareInputStream getCompressedInputStream(final String filePath, final String newFileName)
        throws Exception {
    UnGzipConverter converter = new UnGzipConverter();

    FileSystem fs = FileSystem.getLocal(new Configuration());

    String fullPath = getClass().getClassLoader().getResource(filePath).getFile();
    FileStatus status = fs.getFileStatus(testTempPath);

    OwnerAndPermission ownerAndPermission = new OwnerAndPermission(status.getOwner(), status.getGroup(),
            new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
    CopyableFile cf = CopyableFileUtils.getTestCopyableFile(filePath,
            new Path(testTempPath, newFileName).toString(), newFileName, ownerAndPermission);

    FileAwareInputStream fileAwareInputStream = new FileAwareInputStream(cf, fs.open(new Path(fullPath)));

    Iterable<FileAwareInputStream> iterable = converter.convertRecord("outputSchema", fileAwareInputStream,
            new WorkUnitState());

    return Iterables.getFirst(iterable, null);
}

From source file:gobblin.filesystem.InstrumentedHDFSFileSystemTest.java

License:Open Source License

/**
 * This test is disabled because it requires a local hdfs cluster at localhost:8020, which requires installation and setup.
 * Changes to {@link InstrumentedHDFSFileSystem} should be followed by a manual run of this tests.
 *
 * TODO: figure out how to fully automate this test.
 * @throws Exception/*from w ww .  j av a2s  .com*/
 */
@Test(enabled = false)
public void test() throws Exception {

    FileSystem fs = FileSystem.get(new URI("instrumented-hdfs://localhost:8020"), new Configuration());

    String name = UUID.randomUUID().toString();
    fs.mkdirs(new Path("/tmp"));

    // Test absolute paths
    Path absolutePath = new Path("/tmp", name);
    Assert.assertFalse(fs.exists(absolutePath));
    fs.createNewFile(absolutePath);
    Assert.assertTrue(fs.exists(absolutePath));
    Assert.assertEquals(fs.getFileStatus(absolutePath).getLen(), 0);
    fs.delete(absolutePath, false);
    Assert.assertFalse(fs.exists(absolutePath));

    // Test fully qualified paths
    Path fqPath = new Path("instrumented-hdfs://localhost:8020/tmp", name);
    Assert.assertFalse(fs.exists(fqPath));
    fs.createNewFile(fqPath);
    Assert.assertTrue(fs.exists(fqPath));
    Assert.assertEquals(fs.getFileStatus(fqPath).getLen(), 0);
    fs.delete(fqPath, false);
    Assert.assertFalse(fs.exists(fqPath));
}

From source file:gobblin.filesystem.MetricsFileSystemInstrumentationTest.java

License:Apache License

/**
 * This test is disabled because it requires a local hdfs cluster at localhost:8020, which requires installation and setup.
 * Changes to {@link MetricsFileSystemInstrumentation} should be followed by a manual run of this tests.
 *
 * TODO: figure out how to fully automate this test.
 * @throws Exception//from   www  . j a  v  a 2  s .  co m
 */
@Test(enabled = false)
public void test() throws Exception {

    String uri = "instrumented-hdfs://localhost:9000";

    FileSystem fs = FileSystem.get(new URI(uri), new Configuration());

    String name = UUID.randomUUID().toString();
    fs.mkdirs(new Path("/tmp"));

    // Test absolute paths
    Path absolutePath = new Path("/tmp", name);
    Assert.assertFalse(fs.exists(absolutePath));
    fs.createNewFile(absolutePath);
    Assert.assertTrue(fs.exists(absolutePath));
    Assert.assertEquals(fs.getFileStatus(absolutePath).getLen(), 0);
    fs.delete(absolutePath, false);
    Assert.assertFalse(fs.exists(absolutePath));

    // Test fully qualified paths
    Path fqPath = new Path(uri + "/tmp", name);
    Assert.assertFalse(fs.exists(fqPath));
    fs.createNewFile(fqPath);
    Assert.assertTrue(fs.exists(fqPath));
    Assert.assertEquals(fs.getFileStatus(fqPath).getLen(), 0);
    fs.delete(fqPath, false);
    Assert.assertFalse(fs.exists(fqPath));
}

From source file:gobblin.password.PasswordManager.java

License:Apache License

public static Optional<String> getMasterPassword(FileSystem fs, Path masterPasswordFile) {
    try (Closer closer = Closer.create()) {
        if (!fs.exists(masterPasswordFile) || fs.getFileStatus(masterPasswordFile).isDirectory()) {
            LOG.warn(masterPasswordFile//from   w  ww.  jav  a2s.com
                    + " does not exist or is not a file. Cannot decrypt any encrypted password.");
            return Optional.absent();
        }
        InputStream in = closer.register(fs.open(masterPasswordFile));
        return Optional.of(new LineReader(new InputStreamReader(in, Charsets.UTF_8)).readLine());
    } catch (IOException e) {
        throw new RuntimeException("Failed to obtain master password from " + masterPasswordFile, e);
    }
}

From source file:gobblin.source.extractor.extract.google.GoogleCommon.java

License:Apache License

/**
 * Before retrieving private key, it makes sure that original private key's permission is read only on the owner.
 * This is a way to ensure to keep private key private.
 * @param fs//from   ww w . ja  v  a  2  s.  c om
 * @param privateKeyPath
 * @return
 * @throws IOException
 */
private static Path getPrivateKey(FileSystem fs, String privateKeyPath) throws IOException {
    Path keyPath = new Path(privateKeyPath);
    FileStatus fileStatus = fs.getFileStatus(keyPath);
    Preconditions.checkArgument(USER_READ_PERMISSION_ONLY.equals(fileStatus.getPermission()),
            "Private key file should only have read only permission only on user. " + keyPath);
    return keyPath;
}