Example usage for org.apache.commons.vfs2 FileObject getChildren

List of usage examples for org.apache.commons.vfs2 FileObject getChildren

Introduction

In this page you can find the example usage for org.apache.commons.vfs2 FileObject getChildren.

Prototype

FileObject[] getChildren() throws FileSystemException;

Source Link

Document

Lists the children of this file.

Usage

From source file:org.obiba.opal.web.shell.reporting.ProjectReportTemplateResource.java

@GET
@Path("/reports")
public List<Opal.ReportDto> getReports() throws FileSystemException {
    getReportTemplate();/*w ww .  ja  v  a 2s. co m*/
    FileObject reportFolder = getReportFolder();
    List<Opal.ReportDto> reports = Lists.newArrayList();
    if (reportFolder.exists()) {
        for (FileObject reportFile : reportFolder.getChildren()) {
            if (reportFile.getType() == FileType.FILE
                    && reportFile.getName().getBaseName().startsWith(name + "-") && reportFile.isReadable()) {
                reports.add(getReportDto(reportFile));
            }
        }
    }
    return reports;
}

From source file:org.obiba.opal.web.shell.reporting.ProjectReportTemplateResource.java

@GET
@Path("/reports/latest")
public Response getReport() throws FileSystemException {
    getReportTemplate();/*from   w  w  w . j a va  2s  .c o  m*/

    FileObject reportFolder = getReportFolder();
    if (!reportFolder.exists()) {
        return Response.status(Response.Status.NOT_FOUND).build();
    }

    FileObject lastReportFile = null;
    File lastReport = null;
    for (FileObject reportFile : reportFolder.getChildren()) {
        if (reportFile.getType() == FileType.FILE && reportFile.getName().getBaseName().startsWith(name + "-")
                && reportFile.isReadable()) {
            File report = opalRuntime.getFileSystem().getLocalFile(reportFile);
            if (lastReport == null || report.lastModified() > lastReport.lastModified()) {
                lastReport = report;
                lastReportFile = reportFile;
            }
        }
    }

    return lastReportFile == null ? Response.status(Response.Status.NOT_FOUND).build()
            : Response.ok(getReportDto(lastReportFile)).build();
}

From source file:org.ow2.proactive_grid_cloud_portal.dataspace.FileSystem.java

public static boolean isEmpty(FileObject fo) throws FileSystemException {
    fo.refresh();//from  w  ww . j a va  2  s. c  o  m
    FileObject[] children = fo.getChildren();
    return children == null || children.length == 0;
}

From source file:org.ow2.proactive_grid_cloud_portal.scheduler.SchedulerStateRest.java

/**
 * Either Pulls a file from the given DataSpace to the local file system or
 * list the content of a directory if the path refers to a directory In the
 * case the path to a file is given, the content of this file will be
 * returns as an input stream In the case the path to a directory is given,
 * the input stream returned will be a text stream containing at each line
 * the content of the directory//from  ww  w  .  j  a v a 2s  . c om
 * 
 * @param sessionId
 *            a valid session id
 * @param spaceName
 *            the name of the data space involved (GLOBAL or USER)
 * @param filePath
 *            the path to the file or directory whose content must be
 *            received
 **/
@Override
public InputStream pullFile(@HeaderParam("sessionid") String sessionId,
        @PathParam("spaceName") String spaceName, @PathParam("filePath") String filePath)
        throws IOException, NotConnectedRestException, PermissionRestException {

    checkAccess(sessionId, "pullFile");
    Session session = dataspaceRestApi.checkSessionValidity(sessionId);

    filePath = normalizeFilePath(filePath, null);

    FileObject sourcefo = dataspaceRestApi.resolveFile(session, spaceName, filePath);

    if (!sourcefo.exists() || !sourcefo.isReadable()) {
        RuntimeException ex = new IllegalArgumentException(
                "File " + filePath + " does not exist or is not readable in space " + spaceName);
        logger.error(ex);
        throw ex;
    }

    if (sourcefo.getType().equals(FileType.FOLDER)) {
        logger.info("[pullFile] reading directory content from " + sourcefo.getURL());
        // if it's a folder we return an InputStream listing its content
        StringBuilder sb = new StringBuilder();
        String nl = System.lineSeparator();
        for (FileObject fo : sourcefo.getChildren()) {
            sb.append(fo.getName().getBaseName() + nl);

        }
        return IOUtils.toInputStream(sb.toString());

    } else if (sourcefo.getType().equals(FileType.FILE)) {
        logger.info("[pullFile] reading file content from " + sourcefo.getURL());
        return sourcefo.getContent().getInputStream();
    } else {
        RuntimeException ex = new IllegalArgumentException(
                "File " + filePath + " has an unsupported type " + sourcefo.getType());
        logger.error(ex);
        throw ex;
    }

}

From source file:org.pentaho.di.core.hadoop.HadoopConfigurationBootstrap.java

public synchronized List<HadoopConfigurationInfo> getHadoopConfigurationInfos()
        throws KettleException, ConfigurationException, IOException {
    List<HadoopConfigurationInfo> result = new ArrayList<>();
    FileObject hadoopConfigurationsDir = resolveHadoopConfigurationsDirectory();
    // If the folder doesn't exist, return an empty list
    if (hadoopConfigurationsDir.exists()) {
        String activeId = getActiveConfigurationId();
        String willBeActiveId = getWillBeActiveConfigurationId();
        for (FileObject childFolder : hadoopConfigurationsDir.getChildren()) {
            if (childFolder.getType() == FileType.FOLDER) {
                String id = childFolder.getName().getBaseName();
                FileObject configPropertiesFile = childFolder.getChild(CONFIG_PROPERTIES);
                if (configPropertiesFile.exists()) {
                    Properties properties = new Properties();
                    properties.load(configPropertiesFile.getContent().getInputStream());
                    result.add(new HadoopConfigurationInfo(id, properties.getProperty("name", id),
                            id.equals(activeId), willBeActiveId.equals(id)));
                }//  w  w w . jav a2  s .c o m
            }
        }
    }
    return result;
}

From source file:org.pentaho.di.plugins.fileopensave.providers.vfs.VFSFileProvider.java

/**
 * @param file//from   w ww . j a  v a2s .  com
 * @param filters
 * @return
 */
@Override
public List<VFSFile> getFiles(VFSFile file, String filters) {
    if (file.getPath() == null) {
        return getRoot(file);
    }
    List<VFSFile> files = new ArrayList<>();
    try {
        FileObject fileObject = KettleVFS.getFileObject(file.getPath(), new Variables(),
                VFSHelper.getOpts(file.getPath(), file.getConnection()));
        FileType fileType = fileObject.getType();
        if (fileType.hasChildren()) {
            FileObject[] children = fileObject.getChildren();
            for (FileObject child : children) {
                FileType fileType1 = child.getType();
                if (fileType1.hasChildren()) {
                    files.add(VFSDirectory.create(file.getPath(), child, file.getConnection()));
                } else {
                    if (Utils.matches(child.getName().getBaseName(), filters)) {
                        files.add(VFSFile.create(file.getPath(), child, file.getConnection()));
                    }
                }
            }
        }
    } catch (KettleFileException | FileSystemException ignored) {
        // File does not exist
    }
    return files;
}

From source file:org.pentaho.di.trans.steps.pentahoreporting.urlrepository.FileObjectContentLocation.java

/**
 * Lists all content entities stored in this content-location. This method filters out all files that have an invalid
 * name (according to the repository rules).
 *
 * @return the content entities for this location.
 * @throws ContentIOException if an repository error occured.
 *///from   w ww. j a  v a  2s. c o  m
public ContentEntity[] listContents() throws ContentIOException {
    try {
        final FileObject file = getBackend();
        final FileObject[] files = file.getChildren();
        final ContentEntity[] entities = new ContentEntity[files.length];
        for (int i = 0; i < files.length; i++) {
            final FileObject child = files[i];
            if (RepositoryUtilities.isInvalidPathName(child.getPublicURIString())) {
                continue;
            }

            if (child.isFolder()) {
                entities[i] = new FileObjectContentLocation(this, child);
            } else if (child.isFile()) {
                entities[i] = new FileObjectContentLocation(this, child);
            }
        }
        return entities;
    } catch (FileSystemException e) {
        throw new RuntimeException(e);
    }
}

From source file:org.pentaho.di.trans.steps.textfileoutput.TextFileOutputSplittingIT.java

@After
public void tearDown() throws Exception {
    transMeta = null;//  www  .  j a  v  a2  s  . c o  m

    FileObject folder = getFolder();
    for (FileObject fileObject : folder.getChildren()) {
        fileObject.delete();
    }
}

From source file:org.pentaho.googledrive.vfs.test.GoogleDriveFileObjectTest.java

@Test
public void testFileObject() throws Exception {
    FileSystemManager manager = mock(FileSystemManager.class);
    GoogleDriveFileObject fileObjectMock = mock(GoogleDriveFileObject.class);
    when(manager.resolveFile(FOLDER)).thenReturn(fileObjectMock);
    when(fileObjectMock.isFolder()).thenReturn(true);
    when(fileObjectMock.exists()).thenReturn(true);
    when(fileObjectMock.delete()).thenReturn(true);
    FileObject fileObject = manager.resolveFile(FOLDER);
    fileObject.createFolder();//w w  w .  j av  a2 s. co m
    assertTrue(fileObject.isFolder());
    assertTrue(fileObject.exists());
    assertTrue(fileObject.delete());
    assertNull(fileObject.getChildren());
}

From source file:org.pentaho.hadoop.shim.hsp101.HadoopShim.java

@Override
public void onLoad(HadoopConfiguration config, HadoopConfigurationFileSystemManager fsm) throws Exception {
    fsm.addProvider(config, "hdfs", config.getIdentifier(), new HDFSFileProvider());
    setDistributedCacheUtil(new DistributedCacheUtilImpl(config) {
        /**//  w w  w  . ja  va2 s  .  c om
         * Default permission for cached files
         * <p/>
         * Not using FsPermission.createImmutable due to EOFExceptions when using it with Hadoop 0.20.2
         */
        private final FsPermission CACHED_FILE_PERMISSION = new FsPermission((short) 0755);

        public void addFileToClassPath(Path file, Configuration conf) throws IOException {
            String classpath = conf.get("mapred.job.classpath.files");
            conf.set("mapred.job.classpath.files", classpath == null ? file.toString()
                    : classpath + getClusterPathSeparator() + file.toString());
            FileSystem fs = FileSystem.get(conf);
            URI uri = fs.makeQualified(file).toUri();

            DistributedCache.addCacheFile(uri, conf);
        }

        /**
         * Stages the source file or folder to a Hadoop file system and sets their permission and replication
         * value appropriately to be used with the Distributed Cache. WARNING: This will delete the contents of
         * dest before staging the archive.
         *
         * @param source    File or folder to copy to the file system. If it is a folder all contents will be
         *                  copied into dest.
         * @param fs        Hadoop file system to store the contents of the archive in
         * @param dest      Destination to copy source into. If source is a file, the new file name will be
         *                  exactly dest. If source is a folder its contents will be copied into dest. For more
         *                  info see {@link FileSystem#copyFromLocalFile(org.apache.hadoop.fs.Path,
         *                  org.apache.hadoop.fs.Path)}.
         * @param overwrite Should an existing file or folder be overwritten? If not an exception will be
         *                  thrown.
         * @throws IOException         Destination exists is not a directory
         * @throws KettleFileException Source does not exist or destination exists and overwrite is false.
         */
        public void stageForCache(FileObject source, FileSystem fs, Path dest, boolean overwrite)
                throws IOException, KettleFileException {
            if (!source.exists()) {
                throw new KettleFileException(BaseMessages.getString(DistributedCacheUtilImpl.class,
                        "DistributedCacheUtil.SourceDoesNotExist", source));
            }

            if (fs.exists(dest)) {
                if (overwrite) {
                    // It is a directory, clear it out
                    fs.delete(dest, true);
                } else {
                    throw new KettleFileException(BaseMessages.getString(DistributedCacheUtilImpl.class,
                            "DistributedCacheUtil.DestinationExists", dest.toUri().getPath()));
                }
            }

            // Use the same replication we'd use for submitting jobs
            short replication = (short) fs.getConf().getInt("mapred.submit.replication", 10);

            copyFile(source, fs, dest, overwrite);
            fs.setReplication(dest, replication);
        }

        private void copyFile(FileObject source, FileSystem fs, Path dest, boolean overwrite)
                throws IOException {
            if (source.getType() == FileType.FOLDER) {
                fs.mkdirs(dest);
                fs.setPermission(dest, CACHED_FILE_PERMISSION);
                for (FileObject fileObject : source.getChildren()) {
                    copyFile(fileObject, fs, new Path(dest, fileObject.getName().getBaseName()), overwrite);
                }
            } else {
                try (FSDataOutputStream fsDataOutputStream = fs.create(dest, overwrite)) {
                    IOUtils.copy(source.getContent().getInputStream(), fsDataOutputStream);
                    fs.setPermission(dest, CACHED_FILE_PERMISSION);
                }
            }
        }

        public String getClusterPathSeparator() {
            return System.getProperty("hadoop.cluster.path.separator", ",");
        }
    });
}