Example usage for org.apache.hadoop.fs FileSystem isFile

List of usage examples for org.apache.hadoop.fs FileSystem isFile

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem isFile.

Prototype

@Deprecated
public boolean isFile(Path f) throws IOException 

Source Link

Document

True iff the named path is a regular file.

Usage

From source file:org.apache.falcon.converter.OozieProcessMapper.java

License:Apache License

private void addArchiveForCustomJars(Cluster cluster, Workflow processWorkflow, List<String> archiveList,
        Path libPath) throws FalconException {
    if (libPath == null) {
        return;//from w ww .j  a v  a 2 s  .  co  m
    }

    try {
        final FileSystem fs = libPath.getFileSystem(ClusterHelper.getConfiguration(cluster));
        if (fs.isFile(libPath)) { // File, not a Dir
            archiveList.add(libPath.toString());
            return;
        }

        // lib path is a directory, add each file under the lib dir to archive
        final FileStatus[] fileStatuses = fs.listStatus(libPath, new PathFilter() {
            @Override
            public boolean accept(Path path) {
                try {
                    return fs.isFile(path) && path.getName().endsWith(".jar");
                } catch (IOException ignore) {
                    return false;
                }
            }
        });

        for (FileStatus fileStatus : fileStatuses) {
            archiveList.add(fileStatus.getPath().toString());
        }
    } catch (IOException e) {
        throw new FalconException("Error adding archive for custom jars under: " + libPath, e);
    }
}

From source file:org.apache.falcon.entity.DatasourceHelper.java

License:Apache License

/**
 * fetch the password from file./* w  w w  .  ja  v  a 2s . c  o  m*/
 *
 * @param passwordFilePath
 * @return
 * @throws FalconException
 */

private static String fetchPasswordInfoFromFile(String passwordFilePath) throws FalconException {
    try {
        Path path = new Path(passwordFilePath);
        FileSystem fs = HadoopClientFactory.get().createProxiedFileSystem(path.toUri());
        if (!fs.exists(path)) {
            throw new IOException("The password file does not exist! ");
        }

        if (!fs.isFile(path)) {
            throw new IOException("The password file cannot be a directory! ");
        }

        InputStream is = fs.open(path);
        StringWriter writer = new StringWriter();
        try {
            IOUtils.copy(is, writer);
            return writer.toString();
        } finally {
            IOUtils.closeQuietly(is);
            IOUtils.closeQuietly(writer);
            fs.close();
        }
    } catch (IOException ioe) {
        LOG.error("Error reading password file from HDFS : " + ioe);
        throw new FalconException(ioe);
    }
}

From source file:org.apache.falcon.entity.ProcessHelper.java

License:Apache License

public static Path getUserWorkflowPath(Process process, org.apache.falcon.entity.v0.cluster.Cluster cluster,
        Path buildPath) throws FalconException {
    try {/*from   www  .jav  a 2 s . com*/
        FileSystem fs = HadoopClientFactory.get()
                .createProxiedFileSystem(ClusterHelper.getConfiguration(cluster));
        Path wfPath = new Path(process.getWorkflow().getPath());
        if (fs.isFile(wfPath)) {
            return new Path(buildPath.getParent(), EntityUtil.PROCESS_USER_DIR + "/" + wfPath.getName());
        } else {
            return new Path(buildPath.getParent(), EntityUtil.PROCESS_USER_DIR);
        }
    } catch (IOException e) {
        throw new FalconException("Failed to get workflow path", e);
    }
}

From source file:org.apache.falcon.entity.ProcessHelper.java

License:Apache License

public static Path getUserLibPath(Process process, org.apache.falcon.entity.v0.cluster.Cluster cluster,
        Path buildPath) throws FalconException {
    try {// w w w  . ja v a 2s . c  om
        if (process.getWorkflow().getLib() == null) {
            return null;
        }
        Path libPath = new Path(process.getWorkflow().getLib());

        FileSystem fs = HadoopClientFactory.get()
                .createProxiedFileSystem(ClusterHelper.getConfiguration(cluster));
        if (fs.isFile(libPath)) {
            return new Path(buildPath, EntityUtil.PROCESS_USERLIB_DIR + "/" + libPath.getName());
        } else {
            return new Path(buildPath, EntityUtil.PROCESS_USERLIB_DIR);
        }
    } catch (IOException e) {
        throw new FalconException("Failed to get user lib path", e);
    }
}

From source file:org.apache.falcon.extensions.store.ExtensionStore.java

License:Apache License

public String getResource(final String extensionResourcePath) throws FalconException {
    StringBuilder definition = new StringBuilder();
    Path resourcePath = new Path(extensionResourcePath);
    FileSystem fileSystem = HadoopClientFactory.get().createFalconFileSystem(resourcePath.toUri());
    try {/* w  ww.j  av  a  2s  .co  m*/
        if (fileSystem.isFile(resourcePath)) {
            definition.append(getExtensionResource(extensionResourcePath.toString()));
        } else {
            RemoteIterator<LocatedFileStatus> fileStatusListIterator = fileSystem.listFiles(resourcePath,
                    false);
            while (fileStatusListIterator.hasNext()) {
                LocatedFileStatus fileStatus = fileStatusListIterator.next();
                Path filePath = fileStatus.getPath();
                definition.append("Contents of file ").append(filePath.getName()).append(":\n");
                definition.append(getExtensionResource(filePath.toString())).append("\n \n");
            }
        }
    } catch (IOException e) {
        LOG.error("Exception while getting file(s) with path : " + extensionResourcePath, e);
        throw new StoreAccessException(e);
    }

    return definition.toString();

}

From source file:org.apache.falcon.oozie.process.ProcessExecutionWorkflowBuilder.java

License:Apache License

protected void addArchiveForCustomJars(Cluster cluster, List<String> archiveList, String lib)
        throws FalconException {
    if (StringUtils.isBlank(lib)) {
        return;//from w  w w .  j av a 2  s . c  o m
    }

    String[] libPaths = lib.split(EntityUtil.WF_LIB_SEPARATOR);
    for (String path : libPaths) {
        Path libPath = new Path(path);
        try {
            final FileSystem fs = HadoopClientFactory.get()
                    .createProxiedFileSystem(ClusterHelper.getConfiguration(cluster));
            if (fs.isFile(libPath)) { // File, not a Dir
                archiveList.add(libPath.toString());
                return;
            }

            // lib path is a directory, add each file under the lib dir to archive
            final FileStatus[] fileStatuses = fs.listStatus(libPath, new PathFilter() {
                @Override
                public boolean accept(Path path) {
                    try {
                        return fs.isFile(path) && path.getName().endsWith(".jar");
                    } catch (IOException ignore) {
                        return false;
                    }
                }
            });

            for (FileStatus fileStatus : fileStatuses) {
                archiveList.add(fileStatus.getPath().toString());
            }
        } catch (IOException e) {
            throw new FalconException("Error adding archive for custom jars under: " + libPath, e);
        }
    }
}

From source file:org.apache.falcon.util.HdfsClassLoader.java

License:Apache License

private static URL[] copyHdfsJarFilesToTempDir(String databaseName, List<String> jars) throws IOException {
    List<URL> urls = new ArrayList<URL>();

    final Configuration conf = new Configuration();
    Path localPath = createTempDir(databaseName, conf);

    for (String jar : jars) {
        Path jarPath = new Path(jar);
        final FileSystem fs = jarPath.getFileSystem(conf);
        if (fs.isFile(jarPath) && jarPath.getName().endsWith(".jar")) {
            LOG.info("Copying jarFile = " + jarPath);
            fs.copyToLocalFile(jarPath, localPath);
        }/*w w  w  .  j  av a  2 s  .  co  m*/
    }
    urls.addAll(getJarsInPath(localPath.toUri().toURL()));

    return urls.toArray(new URL[urls.size()]);
}

From source file:org.apache.flume.sink.customhdfs.HDFSDataStream.java

License:Apache License

protected void doOpen(Configuration conf, Path dstPath, FileSystem hdfs) throws IOException {
    if (useRawLocalFileSystem) {
        if (hdfs instanceof LocalFileSystem) {
            hdfs = ((LocalFileSystem) hdfs).getRaw();
        } else {/*  w  w w. j a  v  a  2s  . c  o  m*/
            logger.warn("useRawLocalFileSystem is set to true but file system "
                    + "is not of type LocalFileSystem: " + hdfs.getClass().getName());
        }
    }

    boolean appending = false;
    if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile(dstPath)) {
        outStream = hdfs.append(dstPath);
        appending = true;
    } else {
        outStream = hdfs.create(dstPath);
    }

    serializer = EventSerializerFactory.getInstance(serializerType, serializerContext, outStream);
    if (appending && !serializer.supportsReopen()) {
        outStream.close();
        serializer = null;
        throw new IOException("serializer (" + serializerType + ") does not support append");
    }

    // must call superclass to check for replication issues
    registerCurrentStream(outStream, hdfs, dstPath);

    if (appending) {
        serializer.afterReopen();
    } else {
        serializer.afterCreate();
    }
}

From source file:org.apache.flume.sink.customhdfs.HDFSSequenceFile.java

License:Apache License

protected void open(Path dstPath, CompressionCodec codeC, CompressionType compType, Configuration conf,
        FileSystem hdfs) throws IOException {
    if (useRawLocalFileSystem) {
        if (hdfs instanceof LocalFileSystem) {
            hdfs = ((LocalFileSystem) hdfs).getRaw();
        } else {//from  w w  w. j av a  2s  .c o  m
            logger.warn("useRawLocalFileSystem is set to true but file system "
                    + "is not of type LocalFileSystem: " + hdfs.getClass().getName());
        }
    }
    if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile(dstPath)) {
        outStream = hdfs.append(dstPath);
    } else {
        outStream = hdfs.create(dstPath);
    }
    writer = SequenceFile.createWriter(conf, outStream, serializer.getKeyClass(), serializer.getValueClass(),
            compType, codeC);

    registerCurrentStream(outStream, hdfs, dstPath);
}

From source file:org.apache.flume.sink.hdfs.HDFSCompressedDataStream.java

License:Apache License

@Override
public void open(String filePath, CompressionCodec codec, CompressionType cType) throws IOException {
    Configuration conf = new Configuration();
    Path dstPath = new Path(filePath);
    FileSystem hdfs = dstPath.getFileSystem(conf);
    if (useRawLocalFileSystem) {
        if (hdfs instanceof LocalFileSystem) {
            hdfs = ((LocalFileSystem) hdfs).getRaw();
        } else {//  ww  w .j av  a2  s.co m
            logger.warn("useRawLocalFileSystem is set to true but file system "
                    + "is not of type LocalFileSystem: " + hdfs.getClass().getName());
        }
    }

    boolean appending = false;
    if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile(dstPath)) {
        fsOut = hdfs.append(dstPath);
        appending = true;
    } else {
        fsOut = hdfs.create(dstPath);
    }
    cmpOut = codec.createOutputStream(fsOut);
    serializer = EventSerializerFactory.getInstance(serializerType, serializerContext, cmpOut);
    if (appending && !serializer.supportsReopen()) {
        cmpOut.close();
        serializer = null;
        throw new IOException("serializer (" + serializerType + ") does not support append");
    }

    registerCurrentStream(fsOut, hdfs, dstPath);

    if (appending) {
        serializer.afterReopen();
    } else {
        serializer.afterCreate();
    }
    isFinished = false;
}