List of usage examples for org.apache.commons.vfs2 FileObject exists
boolean exists() throws FileSystemException;
From source file:org.pentaho.hadoop.shim.HadoopConfigurationLocator.java
/** * Attempt to find any Hadoop configuration as a direct descendant of the provided directory. * * @param baseDir Directory to look for Hadoop configurations in * @throws ConfigurationException// ww w . ja v a2 s . c om */ private void findHadoopConfigurations(FileObject baseDir, ActiveHadoopConfigurationLocator activeLocator) throws ConfigurationException { configurations = new HashMap<String, HadoopConfiguration>(); try { if (!baseDir.exists()) { throw new ConfigurationException(BaseMessages.getString(PKG, "Error.HadoopConfigurationDirectoryDoesNotExist", baseDir.getURL())); } for (FileObject f : baseDir.findFiles(new FileSelector() { @Override public boolean includeFile(FileSelectInfo info) throws Exception { return info.getDepth() == 1 && FileType.FOLDER.equals(info.getFile().getType()); } @Override public boolean traverseDescendents(FileSelectInfo info) throws Exception { return info.getDepth() == 0; } })) { // Only load the specified configuration (ID should match the basename, we allow case-insensitivity) if (f.getName().getBaseName().equalsIgnoreCase(activeLocator.getActiveConfigurationId())) { HadoopConfiguration config = loadHadoopConfiguration(f); if (config != null) { configurations.put(config.getIdentifier(), config); } } } } catch (FileSystemException ex) { throw new ConfigurationException(BaseMessages.getString(PKG, "Error.UnableToLoadConfigurations", baseDir.getName().getFriendlyURI()), ex); } }
From source file:org.pentaho.hadoop.shim.HadoopConfigurationLocator.java
/** * Parse a set of URLs from a comma-separated list of URLs. If the URL points to a directory all jar files within that * directory will be returned as well./*from w ww.j ava2 s .c o m*/ * * @param urlString Comma-separated list of URLs (relative or absolute) * @return List of URLs resolved from {@code urlString} */ protected List<URL> parseURLs(FileObject root, String urlString) { if (urlString == null || urlString.trim().isEmpty()) { return Collections.emptyList(); } String[] paths = urlString.split(","); List<URL> urls = new ArrayList<URL>(); for (String path : paths) { try { FileObject file = root.resolveFile(path.trim()); if (!file.exists()) { file = defaultFsm.resolveFile(path.trim()); } if (FileType.FOLDER.equals(file.getType())) { // Add directories with a trailing / so the URL ClassLoader interprets // them as directories urls.add(new URL(file.getURL().toExternalForm() + "/")); // Also add all jars within this directory urls.addAll(findJarsIn(file, 1, new HashSet<String>())); } else { urls.add(file.getURL()); } } catch (Exception e) { // Log invalid path logger.error(BaseMessages.getString(PKG, "Error.InvalidClasspathEntry", path)); } } return urls; }
From source file:org.pentaho.hadoop.shim.HadoopConfigurationLocatorTest.java
@BeforeClass public static void setup() throws Exception { // Create a test hadoop configuration "a" FileObject ramRoot = VFS.getManager().resolveFile(HADOOP_CONFIGURATIONS_PATH); FileObject aConfigFolder = ramRoot.resolveFile("a"); if (aConfigFolder.exists()) { aConfigFolder.delete(new AllFileSelector()); }//w w w . j ava 2s. c o m aConfigFolder.createFolder(); assertEquals(FileType.FOLDER, aConfigFolder.getType()); // Create the properties file for the configuration as hadoop-configurations/a/config.properties configFile = aConfigFolder.resolveFile("config.properties"); Properties p = new Properties(); p.setProperty("name", "Test Configuration A"); p.setProperty("classpath", ""); p.setProperty("ignore.classes", ""); p.setProperty("library.path", ""); p.setProperty("required.classes", HadoopConfigurationLocatorTest.class.getName()); p.store(configFile.getContent().getOutputStream(), "Test Configuration A"); configFile.close(); // Create the implementation jar FileObject implJar = aConfigFolder.resolveFile("a-config.jar"); implJar.createFile(); // Use ShrinkWrap to create the jar and write it out to VFS JavaArchive archive = ShrinkWrap.create(JavaArchive.class, "a-configuration.jar") .addAsServiceProvider(HadoopShim.class, MockHadoopShim.class).addClass(MockHadoopShim.class); archive.as(ZipExporter.class).exportTo(implJar.getContent().getOutputStream()); }
From source file:org.pentaho.hadoop.shim.HadoopExcludeJarsTest.java
@BeforeClass public static void setup() throws Exception { // Create a test hadoop configuration FileObject ramRoot = VFS.getManager().resolveFile(HADOOP_CONFIGURATIONS_PATH); if (ramRoot.exists()) { ramRoot.delete(new AllFileSelector()); }/*from w ww. java 2s .co m*/ ramRoot.createFolder(); // Create the implementation jars ramRoot.resolveFile("xercesImpl-2.9.1.jar").createFile(); ramRoot.resolveFile("xml-apis-1.3.04.jar").createFile(); ramRoot.resolveFile("xml-apis-ext-1.3.04.jar").createFile(); ramRoot.resolveFile("xerces-version-1.8.0.jar").createFile(); ramRoot.resolveFile("xercesImpl2-2.9.1.jar").createFile(); ramRoot.resolveFile("pentaho-hadoop-shims-api-61.2016.04.01-196.jar").createFile(); ramRoot.resolveFile("commands-3.3.0-I20070605-0010.jar").createFile(); ramRoot.resolveFile("postgresql-9.3-1102-jdbc4.jar").createFile(); ramRoot.resolveFile("trilead-ssh2-build213.jar").createFile(); ramRoot.resolveFile("trilead-ssh2-build215.jar").createFile(); }
From source file:org.pentaho.hadoop.shim.HadoopRunningOnClusterTest.java
@BeforeClass public static void setup() throws Exception { // Create a test hadoop configuration FileObject ramRoot = VFS.getManager().resolveFile(CONFIG_PROPERTY_CLASSPATH); if (ramRoot.exists()) { ramRoot.delete(new AllFileSelector()); }// w ww .java2 s . c om ramRoot.createFolder(); // Create the implementation jars ramRoot.resolveFile("hadoop-mapreduce-client-app-2.7.0-mapr-1602.jar").createFile(); ramRoot.resolveFile("hadoop-mapreduce-client-common-2.7.0-mapr-1602.jar").createFile(); ramRoot.resolveFile("hadoop-mapreduce-client-contrib-2.7.0-mapr-1602.jar").createFile(); ramRoot.resolveFile("hadoop-mapreduce-client-core-2.7.0-mapr-1602.jar").createFile(); ramRoot.resolveFile("hadoop-mapreduce-client-hs-2.7.0-mapr-1602.jar").createFile(); pmrFolder = tempFolder.newFolder("pmr"); urlTestResources = Thread.currentThread().getContextClassLoader().getResource(PMR_PROPERTIES); Files.copy(Paths.get(urlTestResources.toURI()), Paths.get(pmrFolder.getAbsolutePath(), PMR_PROPERTIES)); }
From source file:org.pentaho.hadoop.shim.hsp101.HadoopShim.java
@Override public void onLoad(HadoopConfiguration config, HadoopConfigurationFileSystemManager fsm) throws Exception { fsm.addProvider(config, "hdfs", config.getIdentifier(), new HDFSFileProvider()); setDistributedCacheUtil(new DistributedCacheUtilImpl(config) { /**/*from w w w .j a v a 2 s .c om*/ * Default permission for cached files * <p/> * Not using FsPermission.createImmutable due to EOFExceptions when using it with Hadoop 0.20.2 */ private final FsPermission CACHED_FILE_PERMISSION = new FsPermission((short) 0755); public void addFileToClassPath(Path file, Configuration conf) throws IOException { String classpath = conf.get("mapred.job.classpath.files"); conf.set("mapred.job.classpath.files", classpath == null ? file.toString() : classpath + getClusterPathSeparator() + file.toString()); FileSystem fs = FileSystem.get(conf); URI uri = fs.makeQualified(file).toUri(); DistributedCache.addCacheFile(uri, conf); } /** * Stages the source file or folder to a Hadoop file system and sets their permission and replication * value appropriately to be used with the Distributed Cache. WARNING: This will delete the contents of * dest before staging the archive. * * @param source File or folder to copy to the file system. If it is a folder all contents will be * copied into dest. * @param fs Hadoop file system to store the contents of the archive in * @param dest Destination to copy source into. If source is a file, the new file name will be * exactly dest. If source is a folder its contents will be copied into dest. For more * info see {@link FileSystem#copyFromLocalFile(org.apache.hadoop.fs.Path, * org.apache.hadoop.fs.Path)}. * @param overwrite Should an existing file or folder be overwritten? If not an exception will be * thrown. * @throws IOException Destination exists is not a directory * @throws KettleFileException Source does not exist or destination exists and overwrite is false. */ public void stageForCache(FileObject source, FileSystem fs, Path dest, boolean overwrite) throws IOException, KettleFileException { if (!source.exists()) { throw new KettleFileException(BaseMessages.getString(DistributedCacheUtilImpl.class, "DistributedCacheUtil.SourceDoesNotExist", source)); } if (fs.exists(dest)) { if (overwrite) { // It is a directory, clear it out fs.delete(dest, true); } else { throw new KettleFileException(BaseMessages.getString(DistributedCacheUtilImpl.class, "DistributedCacheUtil.DestinationExists", dest.toUri().getPath())); } } // Use the same replication we'd use for submitting jobs short replication = (short) fs.getConf().getInt("mapred.submit.replication", 10); copyFile(source, fs, dest, overwrite); fs.setReplication(dest, replication); } private void copyFile(FileObject source, FileSystem fs, Path dest, boolean overwrite) throws IOException { if (source.getType() == FileType.FOLDER) { fs.mkdirs(dest); fs.setPermission(dest, CACHED_FILE_PERMISSION); for (FileObject fileObject : source.getChildren()) { copyFile(fileObject, fs, new Path(dest, fileObject.getName().getBaseName()), overwrite); } } else { try (FSDataOutputStream fsDataOutputStream = fs.create(dest, overwrite)) { IOUtils.copy(source.getContent().getInputStream(), fsDataOutputStream); fs.setPermission(dest, CACHED_FILE_PERMISSION); } } } public String getClusterPathSeparator() { return System.getProperty("hadoop.cluster.path.separator", ","); } }); }
From source file:org.pentaho.metaverse.impl.VfsLineageCollector.java
@Override public List<String> listArtifacts(final String startingDate, final String endingDate) throws IllegalArgumentException { List<String> paths = new ArrayList<>(); try {/* w w w . ja v a 2 s. co m*/ FileSystemOptions opts = new FileSystemOptions(); FileObject lineageRootFolder = KettleVFS.getFileObject(getOutputFolder(), opts); FileSelector dateRangeFilter = new VfsDateRangeFilter(format, startingDate, endingDate); FileSelector depthFilter = new FileDepthSelector(1, 256); if (lineageRootFolder.exists() && lineageRootFolder.getType() == FileType.FOLDER) { // get the folders that come on or after the startingDate FileObject[] dayFolders = lineageRootFolder.findFiles(dateRangeFilter); for (FileObject dayFolder : dayFolders) { FileObject[] listThisFolder = dayFolder.findFiles(depthFilter); for (FileObject currentFile : listThisFolder) { if (currentFile.getType() == FileType.FILE) { paths.add(currentFile.getName().getPath()); } } } } return paths; } catch (Exception e) { throw new IllegalArgumentException(e); } }
From source file:org.pentaho.metaverse.impl.VfsLineageCollector.java
@Override public List<String> listArtifactsForFile(String pathToArtifact, String startingDate, String endingDate) throws IllegalArgumentException { List<String> paths = new ArrayList<>(); try {/*w w w .j ava2s . c om*/ FileSystemOptions opts = new FileSystemOptions(); FileObject lineageRootFolder = KettleVFS.getFileObject(getOutputFolder(), opts); FileSelector dateRangeFilter = new VfsDateRangeFilter(format, startingDate, endingDate); FileSelector depthFilter = new FileDepthSelector(1, 256); if (lineageRootFolder.exists() && lineageRootFolder.getType() == FileType.FOLDER) { // get all of the date folders of lineage we have FileObject[] dayFolders = lineageRootFolder.findFiles(dateRangeFilter); for (FileObject dayFolder : dayFolders) { FileObject[] listThisFolder = dayFolder.findFiles(depthFilter); for (FileObject currentFile : listThisFolder) { FileObject requested = currentFile.resolveFile(pathToArtifact); if (requested.exists() && requested.getType() == FileType.FOLDER) { FileObject[] requestedChildren = requested.getChildren(); for (FileObject requestedChild : requestedChildren) { if (requestedChild.getType() == FileType.FILE) { paths.add(requestedChild.getName().getPath()); } } } } } } return paths; } catch (Exception e) { throw new IllegalArgumentException(e); } }
From source file:org.pentaho.platform.repository.solution.filebased.FileObjectTestHelper.java
public static FileObject mockFile(final String contents, final boolean exists) throws FileSystemException { FileObject fileObject = mock(FileObject.class); when(fileObject.exists()).thenReturn(exists); FileContent fileContent = mock(FileContent.class); when(fileObject.getContent()).thenReturn(fileContent); when(fileContent.getInputStream()).thenReturn(IOUtils.toInputStream(contents)); final FileObject parent = mock(FileObject.class); when(fileObject.getParent()).thenReturn(parent); final FileName fileName = mock(FileName.class); when(parent.getName()).thenReturn(fileName); when(fileName.getURI()).thenReturn("mondrian:/catalog"); return fileObject; }
From source file:org.pentaho.reporting.designer.extensions.pentaho.repository.dialogs.RepositoryOpenDialog.java
public String performOpen(final AuthenticationData loginData, final String previousSelection) throws FileSystemException, UnsupportedEncodingException { fileSystemRoot = PublishUtil.createVFSConnection(VFS.getManager(), loginData); if (previousSelection == null) { setSelectedView(fileSystemRoot); } else {/*from w w w. j a va 2 s. c o m*/ final FileObject view = fileSystemRoot.resolveFile(previousSelection); if (view == null) { setSelectedView(fileSystemRoot); } else { if (view.exists() == false) { setSelectedView(fileSystemRoot); } else if (view.getType() == FileType.FOLDER) { setSelectedView(view); } else { setSelectedView(view.getParent()); } } } if (StringUtils.isEmpty(fileNameTextField.getText(), true) && previousSelection != null) { final String fileName = IOUtils.getInstance().getFileName(previousSelection); DebugLog.log("Setting filename to " + fileName); fileNameTextField.setText(fileName); } getConfirmAction().setEnabled(validateInputs(false)); if (super.performEdit() == false || selectedView == null) { return null; } return getSelectedFile(); }