Example usage for org.apache.hadoop.fs FileSystem getFileStatus

List of usage examples for org.apache.hadoop.fs FileSystem getFileStatus

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem getFileStatus.

Prototype

public abstract FileStatus getFileStatus(Path f) throws IOException;

Source Link

Document

Return a file status object that represents the path.

Usage

From source file:com.cloudera.hadoop.hdfs.nfs.nfs4.state.HDFSState.java

License:Apache License

/**
 * Open a file for read.//w  w  w.j  a v a  2 s.  c o  m
 *
 * @param stateID
 * @param fileHandle
 * @return HDFSInputStream resource allocated
 * @throws NFS4Exception
 * @throws IOException
 */
public synchronized HDFSInputStream openForRead(FileSystem fs, StateID stateID, FileHandle fileHandle)
        throws NFS4Exception, IOException {
    HDFSFile hdfsFile = mOpenFilesMap.get(fileHandle);
    if ((hdfsFile != null) && hdfsFile.isOpenForWrite()) {
        throw new NFS4Exception(NFS4ERR_FILE_OPEN); // TODO lock unavailable
        // should be _LOCK?
    }
    INode inode = mFileHandleINodeMap.getINodeByFileHandle(fileHandle);
    if (inode == null) {
        throw new NFS4Exception(NFS4ERR_STALE);
    }
    Path path = new Path(inode.getPath());
    FileStatus status = fs.getFileStatus(path);
    if (status.isDir()) {
        throw new NFS4Exception(NFS4ERR_ISDIR);
    }
    HDFSInputStream in = new HDFSInputStream(fs.open(path));
    mMetrics.incrementMetric(FILES_OPENED_READ, 1);
    if (hdfsFile == null) {
        hdfsFile = new HDFSFile(fileHandle, inode.getPath(), inode.getNumber());
        mOpenFilesMap.put(fileHandle, hdfsFile);
    }
    hdfsFile.putInputStream(stateID, in);
    return in;
}

From source file:com.cloudera.hadoop.hdfs.nfs.nfs4.state.HDFSState.java

License:Apache License

/**
 * Open a file handle for write/*from  w  w w.ja va2s. co  m*/
 * @param stateID
 * @param fileHandle
 * @param overwrite
 * @throws NFS4Exception
 * @throws IOException
 */
public synchronized HDFSOutputStream openForWrite(FileSystem fs, StateID stateID, FileHandle fileHandle,
        boolean overwrite) throws NFS4Exception, IOException {
    HDFSFile hdsfsFile = mOpenFilesMap.get(fileHandle);
    if (hdsfsFile != null) {
        OpenResource<HDFSOutputStream> file = hdsfsFile.getHDFSOutputStreamForWrite();
        if (file != null) {
            if (file.isOwnedBy(stateID)) {
                return file.get();
            }
            throw new NFS4Exception(NFS4ERR_FILE_OPEN);
        }
    }
    INode inode = mFileHandleINodeMap.getINodeByFileHandle(fileHandle);
    if (inode == null) {
        throw new NFS4Exception(NFS4ERR_STALE);
    }
    Path path = new Path(inode.getPath());
    boolean exists = fs.exists(path);
    // If overwrite = false, fs.create throws IOException which
    // is useless. In case of IOE do we always return EXIST?
    // doesn't seem to make sense. As such, I am mitigating the issue
    // even if there is a known race between the exists and create
    if (!overwrite && exists) {
        // append to a file
        // We used to be NFS4ERR_EXIST here but the linux client behaved
        // rather oddly. It would open the file with overwrite=true but
        // then send the data which was to be appended at offset 0
        throw new NFS4Exception(NFS4ERR_PERM, "File Exists and overwrite = false");
    }
    if (path.getParent() != null) {
        // TODO bad perms will fail with IOException, perhaps we should check
        // that file can be created before trying to so we can return the
        // correct error perm denied
        // check(user, groups, status, access);
    }
    if (exists && fs.getFileStatus(path).isDir()) {
        throw new NFS4Exception(NFS4ERR_ISDIR);
    }
    HDFSOutputStream out = new HDFSOutputStream(fs.create(path, overwrite), path.toString(), fileHandle);
    mMetrics.incrementMetric(FILES_OPENED_WRITE, 1);
    if (hdsfsFile == null) {
        hdsfsFile = new HDFSFile(fileHandle, inode.getPath(), inode.getNumber());
        mOpenFilesMap.put(fileHandle, hdsfsFile);
    }
    hdsfsFile.setHDFSOutputStream(stateID, out);
    return out;
}

From source file:com.cloudera.hoop.client.fs.TestHoopFileSystem.java

License:Open Source License

private void testCreate(Path path, boolean override) throws Exception {
    Configuration conf = new Configuration();
    conf.set("fs.http.impl", HoopFileSystem.class.getName());
    FileSystem fs = FileSystem.get(getJettyURL().toURI(), conf);
    FsPermission permission = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
    OutputStream os = fs.create(new Path(path.toUri().getPath()), permission, override, 1024, (short) 2,
            100 * 1024 * 1024, null);//from  w  ww . j  a v a  2  s .  c om
    os.write(1);
    os.close();
    fs.close();

    fs = FileSystem.get(getHadoopConf());
    FileStatus status = fs.getFileStatus(path);
    Assert.assertEquals(status.getReplication(), 2);
    Assert.assertEquals(status.getBlockSize(), 100 * 1024 * 1024);
    Assert.assertEquals(status.getPermission(), permission);
    InputStream is = fs.open(path);
    Assert.assertEquals(is.read(), 1);
    is.close();
    fs.close();
}

From source file:com.cloudera.hoop.client.fs.TestHoopFileSystem.java

License:Open Source License

private void testListStatus() throws Exception {
    FileSystem fs = FileSystem.get(getHadoopConf());
    Path path = new Path(getHadoopTestDir(), "foo.txt");
    OutputStream os = fs.create(path);
    os.write(1);/*  w  ww . j a  v a  2s  .c  o m*/
    os.close();
    FileStatus status1 = fs.getFileStatus(path);
    fs.close();

    Configuration conf = new Configuration();
    conf.set("fs.http.impl", HoopFileSystem.class.getName());
    fs = FileSystem.get(getJettyURL().toURI(), conf);
    FileStatus status2 = fs.getFileStatus(new Path(path.toUri().getPath()));
    fs.close();

    Assert.assertEquals(status2.getPermission(), status1.getPermission());
    Assert.assertEquals(status2.getPath().toUri().getPath(), status1.getPath().toUri().getPath());
    Assert.assertEquals(status2.getReplication(), status1.getReplication());
    Assert.assertEquals(status2.getBlockSize(), status1.getBlockSize());
    Assert.assertEquals(status2.getAccessTime(), status1.getAccessTime());
    Assert.assertEquals(status2.getModificationTime(), status1.getModificationTime());
    Assert.assertEquals(status2.getOwner(), status1.getOwner());
    Assert.assertEquals(status2.getGroup(), status1.getGroup());
    Assert.assertEquals(status2.getLen(), status1.getLen());

    FileStatus[] stati = fs.listStatus(path.getParent());
    Assert.assertEquals(stati.length, 1);
    Assert.assertEquals(stati[0].getPath().getName(), path.getName());
}

From source file:com.cloudera.hoop.client.fs.TestHoopFileSystem.java

License:Open Source License

private void testSetTimes() throws Exception {
    FileSystem fs = FileSystem.get(getHadoopConf());
    Path path = new Path(getHadoopTestDir(), "foo.txt");
    OutputStream os = fs.create(path);
    os.write(1);/*ww  w.  j  a  v  a2 s .  co  m*/
    os.close();
    FileStatus status1 = fs.getFileStatus(path);
    fs.close();
    long at = status1.getAccessTime();
    long mt = status1.getModificationTime();

    Configuration conf = new Configuration();
    conf.set("fs.http.impl", HoopFileSystem.class.getName());
    fs = FileSystem.get(getJettyURL().toURI(), conf);
    fs.setTimes(path, mt + 10, at + 20);
    fs.close();

    fs = FileSystem.get(getHadoopConf());
    status1 = fs.getFileStatus(path);
    fs.close();
    long atNew = status1.getAccessTime();
    long mtNew = status1.getModificationTime();
    Assert.assertEquals(mtNew, mt + 10);
    Assert.assertEquals(atNew, at + 20);
}

From source file:com.cloudera.hoop.client.fs.TestHoopFileSystem.java

License:Open Source License

private void testSetPermission() throws Exception {
    FileSystem fs = FileSystem.get(getHadoopConf());
    Path path = new Path(getHadoopTestDir(), "foo.txt");
    OutputStream os = fs.create(path);
    os.write(1);/*from  www .j  a v a 2 s .co  m*/
    os.close();
    fs.close();

    Configuration conf = new Configuration();
    conf.set("fs.http.impl", HoopFileSystem.class.getName());
    fs = FileSystem.get(getJettyURL().toURI(), conf);
    FsPermission permission1 = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
    fs.setPermission(path, permission1);
    fs.close();

    fs = FileSystem.get(getHadoopConf());
    FileStatus status1 = fs.getFileStatus(path);
    fs.close();
    FsPermission permission2 = status1.getPermission();
    Assert.assertEquals(permission2, permission1);
}

From source file:com.cloudera.hoop.client.fs.TestHoopFileSystem.java

License:Open Source License

private void testSetOwner() throws Exception {
    FileSystem fs = FileSystem.get(getHadoopConf());
    Path path = new Path(getHadoopTestDir(), "foo.txt");
    OutputStream os = fs.create(path);
    os.write(1);/*from   ww w  . jav a2 s.c  o m*/
    os.close();
    fs.close();

    Configuration conf = new Configuration();
    conf.set("fs.http.impl", HoopFileSystem.class.getName());
    fs = FileSystem.get(getJettyURL().toURI(), conf);
    String user = getHadoopUsers()[1];
    String group = getHadoopUserGroups(user)[0];
    fs.setOwner(path, user, group);
    fs.close();

    fs = FileSystem.get(getHadoopConf());
    FileStatus status1 = fs.getFileStatus(path);
    fs.close();
    Assert.assertEquals(status1.getOwner(), user);
    Assert.assertEquals(status1.getGroup(), group);
}

From source file:com.cloudera.hoop.client.fs.TestHoopFileSystem.java

License:Open Source License

private void testSetReplication() throws Exception {
    FileSystem fs = FileSystem.get(getHadoopConf());
    Path path = new Path(getHadoopTestDir(), "foo.txt");
    OutputStream os = fs.create(path);
    os.write(1);/*  w  ww .j  a  v a 2s. co  m*/
    os.close();
    fs.close();
    fs.setReplication(path, (short) 2);

    Configuration conf = new Configuration();
    conf.set("fs.http.impl", HoopFileSystem.class.getName());
    fs = FileSystem.get(getJettyURL().toURI(), conf);
    fs.setReplication(path, (short) 1);
    fs.close();

    fs = FileSystem.get(getHadoopConf());
    FileStatus status1 = fs.getFileStatus(path);
    fs.close();
    Assert.assertEquals(status1.getReplication(), (short) 1);
}

From source file:com.cloudera.hoop.fs.FSFileStatus.java

License:Open Source License

/**
 * Executes the filesystem operation./*from   w  ww  . j a  va  2s .co  m*/
 *
 * @param fs filesystem instance to use.
 * @return a Map object (JSON friendly) with the file status.
 * @throws IOException thrown if an IO error occured.
 */
@Override
public Map execute(FileSystem fs) throws IOException {
    FileStatus status = fs.getFileStatus(path);
    return FSUtils.fileStatusToJSON(status, HoopServer.get().getBaseUrl());
}

From source file:com.cloudera.impala.catalog.HdfsTable.java

License:Apache License

/**
 * Loads the file block metadata for the given collection of FileDescriptors.  The
 * FileDescriptors are passed as a tree, where the first level is indexed by
 * filesystem, the second level is indexed by partition location, and the leaves are
 * the list of files that exist under each directory.
 *//*w  ww .j av  a 2s.c o m*/
private void loadBlockMd(Map<FsKey, Map<String, List<FileDescriptor>>> perFsFileDescs) throws RuntimeException {
    Preconditions.checkNotNull(perFsFileDescs);
    LOG.debug("load block md for " + name_);

    for (FsKey fsEntry : perFsFileDescs.keySet()) {
        FileSystem fs = fsEntry.filesystem;
        // Store all BlockLocations so they can be reused when loading the disk IDs.
        List<BlockLocation> blockLocations = Lists.newArrayList();
        int numCachedBlocks = 0;
        Map<String, List<FileDescriptor>> partitionToFds = perFsFileDescs.get(fsEntry);
        Preconditions.checkNotNull(partitionToFds);
        // loop over all files and record their block metadata, minus volume ids
        for (String partitionDir : partitionToFds.keySet()) {
            Path partDirPath = new Path(partitionDir);
            for (FileDescriptor fileDescriptor : partitionToFds.get(partitionDir)) {
                Path p = new Path(partDirPath, fileDescriptor.getFileName());
                try {
                    FileStatus fileStatus = fs.getFileStatus(p);
                    // fileDescriptors should not contain directories.
                    Preconditions.checkArgument(!fileStatus.isDirectory());
                    BlockLocation[] locations = fs.getFileBlockLocations(fileStatus, 0, fileStatus.getLen());
                    Preconditions.checkNotNull(locations);
                    blockLocations.addAll(Arrays.asList(locations));

                    // Loop over all blocks in the file.
                    for (BlockLocation block : locations) {
                        String[] blockHostPorts = block.getNames();
                        try {
                            blockHostPorts = block.getNames();
                        } catch (IOException e) {
                            // this shouldn't happen, getNames() doesn't throw anything
                            String errorMsg = "BlockLocation.getNames() failed:\n" + e.getMessage();
                            LOG.error(errorMsg);
                            throw new IllegalStateException(errorMsg);
                        }
                        // Now enumerate all replicas of the block, adding any unknown hosts to
                        // hostIndex_ and the index for that host to replicaHostIdxs.
                        List<Integer> replicaHostIdxs = new ArrayList<Integer>(blockHostPorts.length);
                        for (int i = 0; i < blockHostPorts.length; ++i) {
                            String[] ip_port = blockHostPorts[i].split(":");
                            Preconditions.checkState(ip_port.length == 2);
                            TNetworkAddress network_address = new TNetworkAddress(ip_port[0],
                                    Integer.parseInt(ip_port[1]));
                            replicaHostIdxs.add(hostIndex_.getIndex(network_address));
                        }
                        fileDescriptor.addFileBlock(
                                new FileBlock(block.getOffset(), block.getLength(), replicaHostIdxs));
                    }
                } catch (IOException e) {
                    throw new RuntimeException(
                            "couldn't determine block locations for path '" + p + "':\n" + e.getMessage(), e);
                }
            }
        }

        if (SUPPORTS_VOLUME_ID && fs instanceof DistributedFileSystem) {
            LOG.trace("loading disk ids for: " + getFullName() + ". nodes: " + getNumNodes() + ". file system: "
                    + fsEntry);
            loadDiskIds((DistributedFileSystem) fs, blockLocations, partitionToFds);
            LOG.trace("completed load of disk ids for: " + getFullName());
        }
    }
}