Example usage for org.apache.hadoop.fs FileSystem isDirectory

List of usage examples for org.apache.hadoop.fs FileSystem isDirectory

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem isDirectory.

Prototype

@Deprecated
public boolean isDirectory(Path f) throws IOException 

Source Link

Document

True iff the named path is a directory.

Usage

From source file:gobblin.util.HadoopUtils.java

License:Apache License

/**
 * Renames a src {@link Path} on fs {@link FileSystem} to a dst {@link Path}. If fs is a {@link LocalFileSystem} and
 * src is a directory then {@link File#renameTo} is called directly to avoid a directory rename race condition where
 * {@link org.apache.hadoop.fs.RawLocalFileSystem#rename} copies the conflicting src directory into dst resulting in
 * an extra nested level, such as /root/a/b/c/e/e where e is repeated.
 *
 * @param fs the {@link FileSystem} where the src {@link Path} exists
 * @param src the source {@link Path} which will be renamed
 * @param dst the {@link Path} to rename to
 * @return true if rename succeeded, false if rename failed.
 * @throws IOException if rename failed for reasons other than target exists.
 *//*w w w .  j a  v  a 2 s  .c  o m*/
public static boolean renamePathHandleLocalFSRace(FileSystem fs, Path src, Path dst) throws IOException {
    if (DecoratorUtils.resolveUnderlyingObject(fs) instanceof LocalFileSystem && fs.isDirectory(src)) {
        LocalFileSystem localFs = (LocalFileSystem) DecoratorUtils.resolveUnderlyingObject(fs);
        File srcFile = localFs.pathToFile(src);
        File dstFile = localFs.pathToFile(dst);

        return srcFile.renameTo(dstFile);
    } else {
        return fs.rename(src, dst);
    }
}

From source file:gobblin.util.HadoopUtils.java

License:Apache License

/**
 * Try to set owner and permissions for the path. Will not throw exception.
 *//*from  ww  w.  j a  va 2 s  . c  o  m*/
public static void setPermissions(Path location, Optional<String> owner, Optional<String> group, FileSystem fs,
        FsPermission permission) {
    try {
        if (!owner.isPresent()) {
            return;
        }
        if (!group.isPresent()) {
            return;
        }
        fs.setOwner(location, owner.get(), group.get());
        fs.setPermission(location, permission);
        if (!fs.isDirectory(location)) {
            return;
        }
        for (FileStatus fileStatus : fs.listStatus(location)) {
            setPermissions(fileStatus.getPath(), owner, group, fs, permission);
        }
    } catch (IOException e) {
        log.warn("Exception occurred while trying to change permissions : " + e.getMessage());
    }
}

From source file:gobblin.util.HadoopUtils.java

License:Apache License

public static boolean hasContent(FileSystem fs, Path path) throws IOException {
    if (!fs.isDirectory(path)) {
        return true;
    }/*from  www . j  a  v a2 s  .co m*/
    boolean content = false;
    for (FileStatus fileStatus : fs.listStatus(path)) {
        content = content || hasContent(fs, fileStatus.getPath());
        if (content) {
            break;
        }
    }
    return content;
}

From source file:gobblin.util.io.StreamUtils.java

License:Apache License

/**
 * Similiar to {@link #tar(FileSystem, Path, Path)} except the source and destination {@link FileSystem} can be different.
 *
 * @see #tar(FileSystem, Path, Path)//from   w  ww  . jav  a2s . c o  m
 */
public static void tar(FileSystem sourceFs, FileSystem destFs, Path sourcePath, Path destPath)
        throws IOException {
    try (FSDataOutputStream fsDataOutputStream = destFs.create(destPath);
            TarArchiveOutputStream tarArchiveOutputStream = new TarArchiveOutputStream(
                    new GzipCompressorOutputStream(fsDataOutputStream),
                    ConfigurationKeys.DEFAULT_CHARSET_ENCODING.name())) {

        FileStatus fileStatus = sourceFs.getFileStatus(sourcePath);

        if (sourceFs.isDirectory(sourcePath)) {
            dirToTarArchiveOutputStreamRecursive(fileStatus, sourceFs, Optional.<Path>absent(),
                    tarArchiveOutputStream);
        } else {
            try (FSDataInputStream fsDataInputStream = sourceFs.open(sourcePath)) {
                fileToTarArchiveOutputStream(fileStatus, fsDataInputStream, new Path(sourcePath.getName()),
                        tarArchiveOutputStream);
            }
        }
    }
}

From source file:gobblin.util.io.StreamUtils.java

License:Apache License

/**
 * Helper method for {@link #tar(FileSystem, FileSystem, Path, Path)} that recursively adds a directory to a given
 * {@link TarArchiveOutputStream}./*from w w w  .j a v  a 2 s.c o m*/
 */
private static void dirToTarArchiveOutputStreamRecursive(FileStatus dirFileStatus, FileSystem fs,
        Optional<Path> destDir, TarArchiveOutputStream tarArchiveOutputStream) throws IOException {

    Preconditions.checkState(fs.isDirectory(dirFileStatus.getPath()));

    Path dir = destDir.isPresent() ? new Path(destDir.get(), dirFileStatus.getPath().getName())
            : new Path(dirFileStatus.getPath().getName());
    dirToTarArchiveOutputStream(dir, tarArchiveOutputStream);

    for (FileStatus childFileStatus : fs.listStatus(dirFileStatus.getPath())) {
        Path childFile = new Path(dir, childFileStatus.getPath().getName());

        if (fs.isDirectory(childFileStatus.getPath())) {
            dirToTarArchiveOutputStreamRecursive(childFileStatus, fs, Optional.of(childFile),
                    tarArchiveOutputStream);
        } else {
            try (FSDataInputStream fsDataInputStream = fs.open(childFileStatus.getPath())) {
                fileToTarArchiveOutputStream(childFileStatus, fsDataInputStream, childFile,
                        tarArchiveOutputStream);
            }
        }
    }
}

From source file:gov.llnl.ontology.text.hbase.GzipTarInputFormat.java

License:Open Source License

/**
 * Returns a {@link List} of {@link FileSplit}s.  Each {@link FileSplit}
 * will be a gzipped tarball of xml documents.  Each tarred file should
 * contain a single document.//from   w  w w  . j a  v a 2 s  . com
 */
public List<InputSplit> getSplits(JobContext context) throws IOException {
    List<InputSplit> splits = new ArrayList<InputSplit>();

    // Get the list of zipped files to be processed and add each zipped file
    // as an InputSplit.
    FileSystem fs = FileSystem.get(context.getConfiguration());
    for (Path file : getInputPaths(context)) {
        // Check that the list of files exists.  Throw an exception if it
        // does not.
        if (fs.isDirectory(file) || !fs.exists(file))
            throw new IOException("File does not exist: " + file);

        // Read the contents of the file list and add each line as a
        // FileSplit.
        BufferedReader br = new BufferedReader(new InputStreamReader(fs.open(file)));
        for (String line = null; (line = br.readLine()) != null;)
            splits.add(new FileSplit(new Path(line), 0, Integer.MAX_VALUE, null));
    }
    return splits;
}

From source file:gr.ntua.h2rdf.LoadTriples.DistinctIds.java

License:Open Source License

public Job createSubmittableJob(String[] args) throws IOException, ClassNotFoundException {
    //io.compression.codecs
    Job job = new Job();

    job.setInputFormatClass(TextInputFormat.class);
    Configuration conf = new Configuration();
    Path blockProjection = new Path("blockIds/");
    Path translations = new Path("translations/");
    Path sample = new Path("sample/");
    Path temp = new Path("temp/");
    Path uniqueIds = new Path("uniqueIds/");
    FileSystem fs;
    try {//  w w w  . ja  va2  s  .  co m
        fs = FileSystem.get(conf);
        if (fs.exists(uniqueIds)) {
            fs.delete(uniqueIds, true);
        }
        if (fs.exists(translations)) {
            fs.delete(translations, true);
        }
        if (fs.exists(blockProjection)) {
            fs.delete(blockProjection, true);
        }
        if (fs.exists(sample)) {
            fs.delete(sample, true);
        }
        if (fs.exists(temp)) {
            fs.delete(temp, true);
        }

        FileOutputFormat.setOutputPath(job, uniqueIds);
        Path inp = new Path(args[0]);
        FileInputFormat.setInputPaths(job, inp);

        double type = 1;
        double datasetSize = 0;
        if (fs.isFile(inp)) {
            datasetSize = fs.getFileStatus(inp).getLen();
        } else if (fs.isDirectory(inp)) {
            FileStatus[] s = fs.listStatus(inp);
            for (int i = 0; i < s.length; i++) {
                if (s[i].getPath().getName().toString().endsWith(".gz"))
                    type = 27;
                if (s[i].getPath().getName().toString().endsWith(".snappy"))
                    type = 10;
                datasetSize += s[i].getLen();
            }
        } else {
            FileStatus[] s = fs.globStatus(inp);
            for (int i = 0; i < s.length; i++) {
                if (s[i].getPath().getName().toString().endsWith(".gz"))
                    type = 27;
                if (s[i].getPath().getName().toString().endsWith(".snappy"))
                    type = 10;
                datasetSize += s[i].getLen();
            }
        }
        datasetSize = datasetSize * type;
        System.out.println("type: " + type);
        System.out.println("datasetSize: " + datasetSize);
        samplingRate = (double) sampleChunk / (double) datasetSize;
        if (samplingRate >= 0.1) {
            samplingRate = 0.1;
        }
        if (samplingRate <= 0.001) {
            samplingRate = 0.001;
        }
        numReducers = (int) (datasetSize / ReducerChunk);
        if (numReducers == 0)
            numReducers = 1;
        numReducers++;
    } catch (IOException e) {
        e.printStackTrace();
    }

    HBaseAdmin hadmin = new HBaseAdmin(conf);
    HTableDescriptor desc = new HTableDescriptor(TABLE_NAME);

    HColumnDescriptor family = new HColumnDescriptor("counter");
    desc.addFamily(family);
    if (!hadmin.tableExists(TABLE_NAME)) {
        hadmin.createTable(desc);
    }

    job.setNumReduceTasks(numReducers);
    job.setMapOutputKeyClass(ImmutableBytesWritable.class);
    job.setMapOutputValueClass(IntWritable.class);
    job.setOutputKeyClass(ImmutableBytesWritable.class);
    job.setOutputValueClass(ImmutableBytesWritable.class);
    job.setOutputFormatClass(SequenceFileOutputFormat.class);
    job.setJarByClass(DistinctIds.class);
    job.setMapperClass(Map.class);
    job.setReducerClass(Reduce.class);

    job.setPartitionerClass(SamplingPartitioner.class);

    FileOutputFormat.setCompressOutput(job, true);
    FileOutputFormat.setOutputCompressorClass(job, GzipCodec.class);
    job.getConfiguration().set("mapred.compress.map.output", "true");
    job.getConfiguration().set("mapred.map.output.compression.codec",
            "org.apache.hadoop.io.compress.SnappyCodec");

    //job.setCombinerClass(Combiner.class);
    job.setJobName("Distinct Id Wordcount");
    job.getConfiguration().setBoolean("mapred.map.tasks.speculative.execution", false);
    job.getConfiguration().setBoolean("mapred.reduce.tasks.speculative.execution", false);
    job.getConfiguration().setInt("io.sort.mb", 100);
    job.getConfiguration().setInt("io.file.buffer.size", 131072);
    job.getConfiguration().setInt("mapred.job.reuse.jvm.num.tasks", -1);

    return job;

}

From source file:hr.fer.tel.rovkp.homework01.task03.Program.java

public static String work(String hdfsURI, String hdfsPath, String localPath)
        throws URISyntaxException, IOException {

    Configuration config = new Configuration();
    FileSystem hdfs = FileSystem.get(new URI(hdfsURI), config);
    LocalFileSystem localFileSystem = LocalFileSystem.getLocal(config);

    Path pathLocal = new Path(localPath);
    Path pathHdfs = new Path(hdfsPath);

    boolean isLocalFile = localFileSystem.isFile(pathLocal) || localFileSystem.isDirectory(pathLocal);
    boolean isHdfsFile = hdfs.isFile(pathHdfs) || hdfs.isDirectory(pathHdfs);

    return new StringBuilder().append(localPath).append(isLocalFile ? " is not" : " is")
            .append(" a valid local path.\n").append(hdfsPath).append(isHdfsFile ? " is not" : " is")
            .append(" a valid hdfs path.").toString();
}

From source file:hydrograph.engine.spark.datasource.utils.AWSS3Util.java

License:Apache License

public void upload(RunFileTransferEntity runFileTransferEntity) {
    log.debug("Start AWSS3Util upload");
    int retryAttempt = 0;
    int i;/*from  w w w .ja  va 2s.  c  o m*/

    java.nio.file.Path inputfile = new File(runFileTransferEntity.getLocalPath()).toPath();
    String keyName = inputfile.getFileName().toString();
    log.info("keyName is: " + keyName);
    log.info("bucket name is:" + runFileTransferEntity.getBucketName());
    log.info("Folder Name is" + runFileTransferEntity.getFolder_name_in_bucket());

    String amazonFileUploadLocationOriginal = null;
    FileInputStream stream = null;
    File filecheck = new File(runFileTransferEntity.getLocalPath());
    if (runFileTransferEntity.getFailOnError())
        if (!(filecheck.isFile() || filecheck.isDirectory())
                && !(runFileTransferEntity.getLocalPath().contains("hdfs://"))) {
            Log.error("Invalid local path.Please provide valid path");
            throw new AWSUtilException("Invalid local path");
        }

    if (runFileTransferEntity.getRetryAttempt() == 0)
        retryAttempt = 1;
    else
        retryAttempt = runFileTransferEntity.getRetryAttempt();

    for (i = 0; i < retryAttempt; i++) {
        log.info("connection attempt: " + (i + 1));
        try {
            AmazonS3 s3Client = null;
            ClientConfiguration clientConf = new ClientConfiguration();
            clientConf.setProtocol(Protocol.HTTPS);
            if (runFileTransferEntity.getCrediationalPropertiesFile() == null) {
                BasicAWSCredentials creds = new BasicAWSCredentials(runFileTransferEntity.getAccessKeyID(),
                        runFileTransferEntity.getSecretAccessKey());
                s3Client = AmazonS3ClientBuilder.standard().withClientConfiguration(clientConf)
                        .withRegion(runFileTransferEntity.getRegion())
                        .withCredentials(new AWSStaticCredentialsProvider(creds)).build();
            } else {
                File securityFile = new File(runFileTransferEntity.getCrediationalPropertiesFile());
                PropertiesCredentials creds = new PropertiesCredentials(securityFile);

                s3Client = AmazonS3ClientBuilder.standard().withClientConfiguration(clientConf)
                        .withRegion(runFileTransferEntity.getRegion())
                        .withCredentials(new AWSStaticCredentialsProvider(creds)).build();
            }

            String s3folderName = null;
            String filepath = runFileTransferEntity.getFolder_name_in_bucket();
            log.debug("file path name" + filepath);
            s3folderName = filepath;

            if (s3folderName != null && !s3folderName.trim().equals("")) {
                amazonFileUploadLocationOriginal = runFileTransferEntity.getBucketName() + "/" + s3folderName;
            } else {
                amazonFileUploadLocationOriginal = runFileTransferEntity.getBucketName();
            }

            File f = new File(runFileTransferEntity.getLocalPath());

            if (runFileTransferEntity.getLocalPath().contains("hdfs://")) {
                log.debug("Provided HDFS local path ");
                String inputPath = runFileTransferEntity.getLocalPath();
                String s1 = inputPath.substring(7, inputPath.length());
                String s2 = s1.substring(0, s1.indexOf("/"));
                File file = new File("/tmp");
                if (!file.exists())
                    file.mkdir();
                Configuration conf = new Configuration();
                conf.set("fs.defaultFS", "hdfs://" + s2);
                FileSystem hdfsFileSystem = FileSystem.get(conf);
                Path local = new Path("/tmp");
                String s = inputPath.substring(7, inputPath.length());
                String hdfspath = s.substring(s.indexOf("/"), s.length());
                Path hdfs = new Path(hdfspath);
                ObjectMetadata objectMetadata = new ObjectMetadata();
                if (runFileTransferEntity.getEncoding() != null)
                    objectMetadata.setContentEncoding(runFileTransferEntity.getEncoding());
                File dir = new File(hdfspath);
                if (hdfsFileSystem.isDirectory(new Path(hdfspath))) {
                    InputStream is = null;
                    OutputStream os = null;
                    String localDirectory = hdfspath.substring(hdfspath.lastIndexOf("/") + 1);
                    FileStatus[] fileStatus = hdfsFileSystem
                            .listStatus(new Path(runFileTransferEntity.getLocalPath()));
                    Path[] paths = FileUtil.stat2Paths(fileStatus);
                    File dirs = null;

                    try {
                        String folderName = hdfspath.substring(hdfspath.lastIndexOf("/") + 1);

                        DateFormat df = new SimpleDateFormat("dd-MM-yyyy");
                        String dateWithoutTime = df.format(new Date()).toString();
                        Random ran = new Random();
                        String tempFolder = "ftp_sftp_" + System.nanoTime() + "_" + ran.nextInt(1000);
                        dirs = new File("/tmp/" + tempFolder);

                        boolean success = dirs.mkdirs();
                        for (Path files : paths) {
                            is = hdfsFileSystem.open(files);
                            os = new BufferedOutputStream(new FileOutputStream(dirs + "/" + files.getName()));
                            org.apache.hadoop.io.IOUtils.copyBytes(is, os, conf);
                        }

                        for (File files : dirs.listFiles()) {

                            if (files.isFile()) {
                                s3Client.putObject(new PutObjectRequest(
                                        amazonFileUploadLocationOriginal + "/" + folderName, files.getName(),
                                        files));
                            }

                        }
                    }

                    catch (IOException e) {
                        Log.error("IOException occured while transfering the file", e);
                    } finally {
                        org.apache.hadoop.io.IOUtils.closeStream(is);
                        org.apache.hadoop.io.IOUtils.closeStream(os);
                        if (dirs != null) {

                            FileUtils.deleteDirectory(dirs);
                        }

                    }

                } else {
                    hdfsFileSystem.copyToLocalFile(false, hdfs, local);
                    stream = new FileInputStream("/tmp/" + f.getName());
                    File S3file = new File("/tmp/" + f.getName());

                    PutObjectRequest putObjectRequest = new PutObjectRequest(amazonFileUploadLocationOriginal,
                            keyName, file);
                    PutObjectResult result = s3Client.putObject(putObjectRequest);
                }
            } else {

                ObjectMetadata objectMetadata = new ObjectMetadata();
                if (runFileTransferEntity.getEncoding() != null)
                    objectMetadata.setContentEncoding(runFileTransferEntity.getEncoding());

                if (Files.isDirectory(inputfile)) {

                    File fileloc = new File(inputfile.toAbsolutePath().toString());
                    String folderName = new File(runFileTransferEntity.getLocalPath()).getName();
                    for (File files : fileloc.listFiles()) {

                        if (files.isFile()) {
                            PutObjectRequest putObjectRequest = new PutObjectRequest(
                                    amazonFileUploadLocationOriginal + "/" + folderName, files.getName(),
                                    files);

                            PutObjectResult result = s3Client.putObject(putObjectRequest);
                        }

                    }

                } else {
                    PutObjectRequest putObjectRequest = null;
                    File file = new File(runFileTransferEntity.getLocalPath());
                    stream = new FileInputStream(runFileTransferEntity.getLocalPath());
                    putObjectRequest = new PutObjectRequest(amazonFileUploadLocationOriginal, keyName, file);
                    PutObjectResult result = s3Client.putObject(putObjectRequest);
                }
            }

        }

        catch (AmazonServiceException e) {
            if (e.getStatusCode() == 403 || e.getStatusCode() == 404) {
                if (runFileTransferEntity.getFailOnError())
                    Log.error("Incorrect details provided.Please provide valid details", e);
                throw new AWSUtilException("Incorrect details provided");

            }

            {
                try {
                    Thread.sleep(runFileTransferEntity.getRetryAfterDuration());
                } catch (Exception e1) {
                    Log.error("Exception occured while sleeping the thread");
                }
                continue;
            }

        } catch (Exception e) {
            log.error("error while transferring file", e);
            try {
                Thread.sleep(runFileTransferEntity.getRetryAfterDuration());
            } catch (Exception e1) {
                Log.error("Exception occured while sleeping the thread");
            }
            continue;
        } catch (Error err) {
            Log.error("Error occured while uploading the file", err);
            throw new AWSUtilException(err);
        }
        done = true;
        break;
    }
    if (runFileTransferEntity.getFailOnError() && !done) {
        log.error("File transfer failed");
        throw new AWSUtilException("File transfer failed");
    } else if (!done) {
        log.error("File transfer failed but mentioned fail on error as false");
    }
    if (i == runFileTransferEntity.getRetryAttempt()) {
        if (runFileTransferEntity.getFailOnError())
            throw new AWSUtilException("File transfer failed");
    }
    log.debug("Finished AWSS3Util upload");
}

From source file:hydrograph.engine.spark.datasource.utils.FTPUtil.java

License:Apache License

public void upload(RunFileTransferEntity runFileTransferEntity) {
    log.debug("Start FTPUtil upload");

    FTPClient ftpClient = new FTPClient();
    ftpClient.enterLocalPassiveMode();//from  ww  w  .  j av a2s.c  om
    ftpClient.setBufferSize(1024000);

    int retryAttempt = runFileTransferEntity.getRetryAttempt();
    int attemptCount = 1;
    int i = 0;

    InputStream inputStream = null;
    boolean login = false;
    File filecheck = new File(runFileTransferEntity.getInputFilePath());
    log.info("input file name" + filecheck.getName());
    if (runFileTransferEntity.getFailOnError()) {
        if (!(filecheck.isFile() || filecheck.isDirectory())
                && !(runFileTransferEntity.getInputFilePath().contains("hdfs://"))) {
            log.error("Invalid input file path. Please provide valid input file path.");
            throw new FTPUtilException("Invalid input file path");
        }
    }

    boolean done = false;
    for (i = 0; i < retryAttempt; i++) {
        try {
            log.info("Connection attempt: " + (i + 1));
            if (runFileTransferEntity.getTimeOut() != 0)
                if (runFileTransferEntity.getEncoding() != null)
                    ftpClient.setControlEncoding(runFileTransferEntity.getEncoding());
            ftpClient.setConnectTimeout(runFileTransferEntity.getTimeOut());
            log.debug("connection details: " + "/n" + "Username: " + runFileTransferEntity.getUserName() + "/n"
                    + "HostName " + runFileTransferEntity.getHostName() + "/n" + "Portno"
                    + runFileTransferEntity.getPortNo());
            ftpClient.connect(runFileTransferEntity.getHostName(), runFileTransferEntity.getPortNo());
            login = ftpClient.login(runFileTransferEntity.getUserName(), runFileTransferEntity.getPassword());
            if (!login) {
                log.error("Invalid FTP details provided. Please provide correct FTP details.");
                throw new FTPUtilException("Invalid FTP details");
            }
            ftpClient.enterLocalPassiveMode();
            ftpClient.setFileType(FTP.BINARY_FILE_TYPE);
            if (runFileTransferEntity.getInputFilePath().contains("hdfs://")) {
                log.debug("Processing for HDFS input file path");
                String inputPath = runFileTransferEntity.getInputFilePath();

                String s1 = inputPath.substring(7, inputPath.length());

                String s2 = s1.substring(0, s1.indexOf("/"));

                int index = runFileTransferEntity.getInputFilePath()
                        .replaceAll(Matcher.quoteReplacement("\\"), "/").lastIndexOf('/');

                String file_name = runFileTransferEntity.getInputFilePath().substring(index + 1);

                File f = new File("/tmp");
                if (!f.exists())
                    f.mkdir();
                Configuration conf = new Configuration();
                conf.set("fs.defaultFS", "hdfs://" + s2);
                FileSystem hdfsFileSystem = FileSystem.get(conf);
                Path local = new Path("/tmp");
                String s = inputPath.substring(7, inputPath.length());
                String hdfspath = s.substring(s.indexOf("/"), s.length());
                File dir = new File(hdfspath);
                Random ran = new Random();
                String tempFolder = "ftp_sftp_" + System.nanoTime() + "_" + ran.nextInt(1000);
                File dirs = new File("/tmp/" + tempFolder);
                boolean success = dirs.mkdirs();
                if (hdfsFileSystem.isDirectory(new Path(hdfspath))) {
                    log.debug("Provided HDFS input path is for directory.");
                    InputStream is = null;
                    OutputStream os = null;
                    String localDirectory = hdfspath.substring(hdfspath.lastIndexOf("/") + 1);
                    FileStatus[] fileStatus = hdfsFileSystem
                            .listStatus(new Path(runFileTransferEntity.getInputFilePath()));
                    Path[] paths = FileUtil.stat2Paths(fileStatus);
                    try {
                        String folderName = hdfspath.substring(hdfspath.lastIndexOf("/") + 1);
                        Path hdfs = new Path(hdfspath);
                        for (Path file : paths) {
                            is = hdfsFileSystem.open(file);
                            os = new BufferedOutputStream(
                                    new FileOutputStream(dirs + "" + File.separatorChar + file.getName()));
                            IOUtils.copyBytes(is, os, conf);
                        }
                        ftpClient.changeWorkingDirectory(runFileTransferEntity.getOutFilePath()
                                .replaceAll(Matcher.quoteReplacement("\\"), "/"));
                        ftpClient.removeDirectory(folderName);
                        ftpClient.makeDirectory(folderName);
                        ftpClient.changeWorkingDirectory(runFileTransferEntity.getOutFilePath().replaceAll(
                                Matcher.quoteReplacement("\\"), "/") + File.separatorChar + folderName);
                        for (File files : dirs.listFiles()) {

                            if (files.isFile())
                                ftpClient.storeFile(files.getName().toString(),
                                        new BufferedInputStream(new FileInputStream(files)));

                        }
                    } catch (IOException e) {
                        log.error("Failed while doing FTP file", e);
                        //throw e;
                    } finally {
                        IOUtils.closeStream(is);
                        IOUtils.closeStream(os);
                        if (dirs != null) {
                            FileUtils.deleteDirectory(dirs);
                        }
                    }
                } else {
                    try {
                        Path hdfs = new Path(hdfspath);
                        hdfsFileSystem.copyToLocalFile(false, hdfs, local);
                        inputStream = new FileInputStream(dirs + file_name);
                        ftpClient.storeFile(file_name, new BufferedInputStream(inputStream));
                    } catch (Exception e) {
                        log.error("Failed while doing FTP file", e);
                        throw new FTPUtilException("Failed while doing FTP file", e);
                    } finally {
                        FileUtils.deleteDirectory(dirs);
                    }
                }
            } else {
                java.nio.file.Path file = new File(runFileTransferEntity.getInputFilePath()).toPath();
                if (Files.isDirectory(file)) {
                    log.debug("Provided input file path is for directory");
                    File dir = new File(runFileTransferEntity.getInputFilePath());
                    String folderName = new File(runFileTransferEntity.getInputFilePath()).getName();
                    ftpClient.changeWorkingDirectory(runFileTransferEntity.getOutFilePath()
                            .replaceAll(Matcher.quoteReplacement("\\"), "/"));
                    try {
                        ftpClient.removeDirectory(folderName);
                    } catch (IOException e) {
                        log.error("Failed while doing FTP file", e);
                        throw new FTPUtilException("Failed while doing FTP file", e);
                    }
                    ftpClient.makeDirectory(folderName);

                    ftpClient.changeWorkingDirectory(runFileTransferEntity.getOutFilePath()
                            .replaceAll(Matcher.quoteReplacement("\\"), "/") + "/" + folderName);
                    for (File files : dir.listFiles()) {

                        if (files.isFile())
                            ftpClient.storeFile(files.getName().toString(),
                                    new BufferedInputStream(new FileInputStream(files)));
                    }
                } else {

                    inputStream = new FileInputStream(runFileTransferEntity.getInputFilePath());
                    ftpClient.changeWorkingDirectory(runFileTransferEntity.getOutFilePath()
                            .replaceAll(Matcher.quoteReplacement("\\"), "/"));
                    int index = runFileTransferEntity.getInputFilePath()
                            .replaceAll(Matcher.quoteReplacement("\\"), "/").lastIndexOf('/');
                    String file_name = runFileTransferEntity.getInputFilePath().substring(index + 1);
                    ftpClient.storeFile(file_name, new BufferedInputStream(inputStream));
                }

            }
        } catch (Exception e) {
            log.error("Failed while doing FTP file", e);
            if (!login && runFileTransferEntity.getFailOnError()) {
                throw new FTPUtilException("Invalid FTP details");
            }
            try {
                Thread.sleep(runFileTransferEntity.getRetryAfterDuration());
            } catch (Exception e1) {
                log.error("Failed while sleeping for retry duration", e1);
            }
            continue;
        } finally {
            try {
                if (inputStream != null)
                    inputStream.close();
            } catch (IOException ioe) {

            }
        }
        done = true;
        break;
    }

    try {
        if (ftpClient != null) {
            ftpClient.logout();
            ftpClient.disconnect();

        }
    } catch (Exception e) {
        log.error("Failed while clossing the connection", e);
    } catch (Error e) {
        log.error("Failed while clossing the connection", e);
        //throw new RuntimeException(e);
    }

    if (runFileTransferEntity.getFailOnError() && !done) {
        log.error("File transfer failed");
        throw new FTPUtilException("File transfer failed");
    } else if (!done) {
        log.error("File transfer failed but mentioned fail on error as false");
    }
    log.debug("Finished FTPUtil upload");
}