List of usage examples for org.apache.hadoop.fs FileSystem newInstance
public static FileSystem newInstance(URI uri, Configuration config) throws IOException
From source file:com.qubole.streamx.s3.S3Storage.java
License:Apache License
public S3Storage(Configuration hadoopConf, HdfsSinkConnectorConfig config, String url) throws IOException { fs = FileSystem.newInstance(URI.create(url), hadoopConf); this.hadoopConf = hadoopConf; this.url = url; this.config = config; }
From source file:com.streamsets.pipeline.stage.destination.hdfs.metadataexecutor.HdfsConnectionConfig.java
License:Apache License
public void init(Stage.Context context, String prefix, List<Stage.ConfigIssue> issues) { conf = new Configuration(); conf.setClass("fs.file.impl", RawLocalFileSystem.class, FileSystem.class); if (hdfsKerberos) { conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, UserGroupInformation.AuthenticationMethod.KERBEROS.name()); try {//w w w . j a va2 s .c o m conf.set(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, "hdfs/_HOST@" + HadoopSecurityUtil.getDefaultRealm()); } catch (Exception ex) { if (!hdfsConfigs.containsKey(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)) { issues.add(context.createConfigIssue(Groups.HDFS.name(), null, HdfsMetadataErrors.HDFS_METADATA_001, ex.toString())); } } } if (hdfsConfDir != null && !hdfsConfDir.isEmpty()) { File hadoopConfigDir = new File(hdfsConfDir); if (!hadoopConfigDir.isAbsolute()) { hadoopConfigDir = new File(context.getResourcesDirectory(), hdfsConfDir).getAbsoluteFile(); } if (!hadoopConfigDir.exists()) { issues.add(context.createConfigIssue(Groups.HDFS.name(), JOIN.join(prefix, "hdfsConfDir"), HdfsMetadataErrors.HDFS_METADATA_002, hadoopConfigDir.getPath())); } else if (!hadoopConfigDir.isDirectory()) { issues.add(context.createConfigIssue(Groups.HDFS.name(), JOIN.join(prefix, "hdfsConfDir"), HdfsMetadataErrors.HDFS_METADATA_003, hadoopConfigDir.getPath())); } else { File coreSite = new File(hadoopConfigDir, "core-site.xml"); if (coreSite.exists()) { if (!coreSite.isFile()) { issues.add(context.createConfigIssue(Groups.HDFS.name(), JOIN.join(prefix, "hdfsConfDir"), HdfsMetadataErrors.HDFS_METADATA_004, coreSite.getPath())); } conf.addResource(new Path(coreSite.getAbsolutePath())); } File hdfsSite = new File(hadoopConfigDir, "hdfs-site.xml"); if (hdfsSite.exists()) { if (!hdfsSite.isFile()) { issues.add(context.createConfigIssue(Groups.HDFS.name(), JOIN.join(prefix, "hdfsConfDir"), HdfsMetadataErrors.HDFS_METADATA_004, hdfsSite.getPath())); } conf.addResource(new Path(hdfsSite.getAbsolutePath())); } } } // Unless user specified non-empty, non-null HDFS URI, we need to retrieve it's value if (StringUtils.isEmpty(hdfsUri)) { hdfsUri = conf.get("fs.defaultFS"); } for (Map.Entry<String, String> config : hdfsConfigs.entrySet()) { conf.set(config.getKey(), config.getValue()); } try { loginUgi = HadoopSecurityUtil.getLoginUser(conf); userUgi = HadoopSecurityUtil.getProxyUser(hdfsUser, context, loginUgi, issues, Groups.HDFS.name(), JOIN.join(prefix, "hdfsUser")); } catch (IOException e) { LOG.error("Can't create UGI", e); issues.add(context.createConfigIssue(Groups.HDFS.name(), null, HdfsMetadataErrors.HDFS_METADATA_005, e.getMessage(), e)); } if (!issues.isEmpty()) { return; } try { fs = getUGI().doAs( (PrivilegedExceptionAction<FileSystem>) () -> FileSystem.newInstance(new URI(hdfsUri), conf)); } catch (Exception ex) { LOG.error("Can't retrieve FileSystem instance", ex); issues.add(context.createConfigIssue(Groups.HDFS.name(), null, HdfsMetadataErrors.HDFS_METADATA_005, ex.getMessage(), ex)); } }
From source file:com.streamsets.pipeline.stage.destination.hdfs.metadataxecutor.HdfsConnectionConfig.java
License:Apache License
public void init(Stage.Context context, String prefix, List<Stage.ConfigIssue> issues) { conf = new Configuration(); conf.setClass("fs.file.impl", RawLocalFileSystem.class, FileSystem.class); if (hdfsKerberos) { conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, UserGroupInformation.AuthenticationMethod.KERBEROS.name()); try {/*from ww w . ja va 2s . c om*/ conf.set(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, "hdfs/_HOST@" + HadoopSecurityUtil.getDefaultRealm()); } catch (Exception ex) { if (!hdfsConfigs.containsKey(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)) { issues.add(context.createConfigIssue(Groups.HDFS.name(), null, HdfsMetadataErrors.HDFS_METADATA_001, ex.toString())); } } } if (hdfsConfDir != null && !hdfsConfDir.isEmpty()) { File hadoopConfigDir = new File(hdfsConfDir); if (!hadoopConfigDir.isAbsolute()) { hadoopConfigDir = new File(context.getResourcesDirectory(), hdfsConfDir).getAbsoluteFile(); } if (!hadoopConfigDir.exists()) { issues.add(context.createConfigIssue(Groups.HDFS.name(), JOIN.join(prefix, "hdfsConfDir"), HdfsMetadataErrors.HDFS_METADATA_002, hadoopConfigDir.getPath())); } else if (!hadoopConfigDir.isDirectory()) { issues.add(context.createConfigIssue(Groups.HDFS.name(), JOIN.join(prefix, "hdfsConfDir"), HdfsMetadataErrors.HDFS_METADATA_003, hadoopConfigDir.getPath())); } else { File coreSite = new File(hadoopConfigDir, "core-site.xml"); if (coreSite.exists()) { if (!coreSite.isFile()) { issues.add(context.createConfigIssue(Groups.HDFS.name(), JOIN.join(prefix, "hdfsConfDir"), HdfsMetadataErrors.HDFS_METADATA_004, coreSite.getPath())); } conf.addResource(new Path(coreSite.getAbsolutePath())); } File hdfsSite = new File(hadoopConfigDir, "hdfs-site.xml"); if (hdfsSite.exists()) { if (!hdfsSite.isFile()) { issues.add(context.createConfigIssue(Groups.HDFS.name(), JOIN.join(prefix, "hdfsConfDir"), HdfsMetadataErrors.HDFS_METADATA_004, hdfsSite.getPath())); } conf.addResource(new Path(hdfsSite.getAbsolutePath())); } } } // Unless user specified non-empty, non-null HDFS URI, we need to retrieve it's value if (StringUtils.isEmpty(hdfsUri)) { hdfsUri = conf.get("fs.defaultFS"); } for (Map.Entry<String, String> config : hdfsConfigs.entrySet()) { conf.set(config.getKey(), config.getValue()); } try { loginUgi = HadoopSecurityUtil.getLoginUser(conf); } catch (IOException e) { LOG.error("Can't create login UGI", e); issues.add(context.createConfigIssue(Groups.HDFS.name(), null, HdfsMetadataErrors.HDFS_METADATA_005, e.getMessage(), e)); } if (!issues.isEmpty()) { return; } try { fs = getUGI().doAs(new PrivilegedExceptionAction<FileSystem>() { @Override public FileSystem run() throws Exception { return FileSystem.newInstance(new URI(hdfsUri), conf); } }); } catch (Exception ex) { LOG.error("Can't retrieve FileSystem instance", ex); issues.add(context.createConfigIssue(Groups.HDFS.name(), null, HdfsMetadataErrors.HDFS_METADATA_005, ex.getMessage(), ex)); } }
From source file:com.thinkbiganalytics.kylo.catalog.file.DefaultCatalogFileManager.java
License:Apache License
/** * Executes the specified function in a separate class loader containing the jars of the specified template. *//* w w w.j a v a 2s. c om*/ @VisibleForTesting protected <R> R isolatedFunction(@Nonnull final DataSetTemplate template, @Nonnull final Path path, @Nonnull final FileSystemFunction<R> function) throws IOException { final Configuration conf = DataSetUtil.getConfiguration(template, defaultConf); try (final HadoopClassLoader classLoader = new HadoopClassLoader(conf)) { if (template.getJars() != null) { log.debug("Adding jars to HadoopClassLoader: {}", template.getJars()); classLoader.addJars(template.getJars()); } log.debug("Creating FileSystem from path: {}", path); try (final FileSystem fs = FileSystem.newInstance(path.toUri(), conf)) { return function.apply(fs); } } }
From source file:gobblin.runtime.mapreduce.MRJobLauncher.java
License:Apache License
private static FileSystem buildFileSystem(Properties jobProps, Configuration configuration) throws IOException { URI fsUri = URI.create(jobProps.getProperty(ConfigurationKeys.FS_URI_KEY, ConfigurationKeys.LOCAL_FS_URI)); return FileSystem.newInstance(fsUri, configuration); }
From source file:gobblin.source.extractor.hadoop.HadoopFsHelper.java
License:Apache License
private void createFileSystem(String uri) throws IOException, InterruptedException, URISyntaxException { if (this.state.getPropAsBoolean(ConfigurationKeys.SHOULD_FS_PROXY_AS_USER, ConfigurationKeys.DEFAULT_SHOULD_FS_PROXY_AS_USER)) { // Initialize file system as a proxy user. this.fs = new ProxiedFileSystemWrapper().getProxiedFileSystem(this.state, ProxiedFileSystemWrapper.AuthType.TOKEN, this.state.getProp(ConfigurationKeys.FS_PROXY_AS_USER_TOKEN_FILE), uri, configuration); } else {/*w w w.java 2 s .c o m*/ // Initialize file system as the current user. this.fs = FileSystem.newInstance(URI.create(uri), this.configuration); } }
From source file:gobblin.util.filesystem.FileSystemFactory.java
License:Apache License
@Override public SharedResourceFactoryResponse<FileSystem> createResource(SharedResourcesBroker<S> broker, ScopedConfigView<S, FileSystemKey> config) throws NotConfiguredException { try {/*from www . j a va 2 s . c o m*/ FileSystemKey key = config.getKey(); URI uri = key.getUri(); Configuration hadoopConf = key.getConfiguration(); log.info("Creating instrumented FileSystem for uri " + uri); Class<? extends FileSystem> fsClass = FileSystem.getFileSystemClass(uri.getScheme(), hadoopConf); if (InstrumentedFileSystem.class.isAssignableFrom(fsClass)) { InstrumentedFileSystem tmpfs = (InstrumentedFileSystem) fsClass.newInstance(); hadoopConf = new Configuration(hadoopConf); String schemeKey = "fs." + uri.getScheme() + ".impl"; hadoopConf.set(schemeKey, tmpfs.underlyingFs.getClass().getName()); } FileSystem fs = FileSystem.newInstance(uri, hadoopConf); ServiceLoader<FileSystemInstrumentationFactory> loader = ServiceLoader .load(FileSystemInstrumentationFactory.class); for (FileSystemInstrumentationFactory instrumentationFactory : loader) { fs = instrumentationFactory.instrumentFileSystem(fs, broker, config); } return new ResourceInstance<>(fs); } catch (IOException | ReflectiveOperationException ioe) { throw new RuntimeException(ioe); } }
From source file:gobblin.util.filesystem.InstrumentedLocalFileSystemTest.java
License:Apache License
@Test public void testFromConfigurationOverride() throws Exception { Configuration configuration = new Configuration(); configuration.set("fs.file.impl", InstrumentedLocalFileSystem.class.getName()); FileSystem fs = FileSystem.newInstance(new URI("file:///"), configuration); Assert.assertTrue(fs instanceof InstrumentedLocalFileSystem); Assert.assertTrue(DecoratorUtils.resolveUnderlyingObject(fs) instanceof LocalFileSystem); Assert.assertEquals(fs.getFileStatus(new Path("/tmp")).getPath(), new Path("file:///tmp")); Assert.assertEquals(fs.getUri().getScheme(), "file"); }
From source file:gobblin.util.WriterUtils.java
License:Apache License
private static FileSystem getWriterFsUsingKeytab(State state, URI uri) throws IOException { FileSystem fs = FileSystem.newInstance(uri, new Configuration()); try {/*from ww w . ja v a2s .co m*/ Preconditions.checkArgument(state.contains(ConfigurationKeys.FS_PROXY_AS_USER_NAME), "Missing required property " + ConfigurationKeys.FS_PROXY_AS_USER_NAME); Preconditions.checkArgument(state.contains(ConfigurationKeys.SUPER_USER_NAME_TO_PROXY_AS_OTHERS), "Missing required property " + ConfigurationKeys.SUPER_USER_NAME_TO_PROXY_AS_OTHERS); Preconditions.checkArgument(state.contains(ConfigurationKeys.SUPER_USER_KEY_TAB_LOCATION), "Missing required property " + ConfigurationKeys.SUPER_USER_KEY_TAB_LOCATION); String user = state.getProp(ConfigurationKeys.FS_PROXY_AS_USER_NAME); String superUser = state.getProp(ConfigurationKeys.SUPER_USER_NAME_TO_PROXY_AS_OTHERS); Path keytabLocation = new Path(state.getProp(ConfigurationKeys.SUPER_USER_KEY_TAB_LOCATION)); return ProxiedFileSystemCache.fromKeytab().userNameToProxyAs(user).fsURI(uri) .superUserKeytabLocation(keytabLocation).superUserName(superUser) .conf(HadoopUtils.newConfiguration()).referenceFS(fs).build(); } catch (ExecutionException e) { throw new IOException(e); } }
From source file:io.confluent.connect.hdfs.storage.HdfsStorage.java
License:Apache License
public HdfsStorage(Configuration conf, String url) throws IOException { fs = FileSystem.newInstance(URI.create(url), conf); this.conf = conf; this.url = url; }