List of usage examples for java.net URI getAuthority
public String getAuthority()
From source file:org.cloudfoundry.client.lib.rest.CloudControllerClientImpl.java
protected void extractUriInfo(Map<String, UUID> domains, String uri, Map<String, String> uriInfo) { URI newUri = URI.create(uri); String authority = newUri.getScheme() != null ? newUri.getAuthority() : newUri.getPath(); for (String domain : domains.keySet()) { if (authority != null && authority.endsWith(domain)) { String previousDomain = uriInfo.get("domainName"); if (previousDomain == null || domain.length() > previousDomain.length()) { //Favor most specific subdomains uriInfo.put("domainName", domain); if (domain.length() < authority.length()) { uriInfo.put("host", authority.substring(0, authority.indexOf(domain) - 1)); } else if (domain.length() == authority.length()) { uriInfo.put("host", ""); }//from w w w . j a va 2 s . c om } } } if (uriInfo.get("domainName") == null) { throw new IllegalArgumentException("Domain not found for URI " + uri); } if (uriInfo.get("host") == null) { throw new IllegalArgumentException( "Invalid URI " + uri + " -- host not specified for domain " + uriInfo.get("domainName")); } }
From source file:org.gvnix.web.theme.roo.addon.ThemeOperationsImpl.java
/** * This method will copy the contents of a bundle to a local directory if * the resource does not already exist in the target directory * //from w w w .ja v a 2s.c o m * TODO Duplicated code When finding bundle use URL and when finding local * files use URI Avoid error "unknown protocol: bundle" on commands * * @param sourceDirectory source directory. URI syntax: * [scheme:][//authority][path][?query][#fragment] * @param targetDirectory target directory * @param overwrite if true copy to target dir overwriting destination file * @see JspOperationsImpl#copyDirectoryContents(String, String) */ @SuppressWarnings("unchecked") private void copyRecursivelyURI(URI sourceDirectory, File targetDirectory, boolean overwrite) { Validate.notNull(sourceDirectory, "Source URI required"); Validate.notNull(targetDirectory, "Target directory required"); // if source and target are the same dir, do nothing if (targetDirectory.toURI().equals(sourceDirectory)) { return; } if (!targetDirectory.exists()) { fileManager.createDirectory(targetDirectory.getAbsolutePath()); } // Set of resource URLs to be copied to target dir Set<URI> uris = new HashSet<URI>(); // if source URI schema is file:// , source files are in a local // repository if ("file".equals(sourceDirectory.getScheme())) { uris = FileUtils.findFilesURI(new File(sourceDirectory)); } // if source URI schema is bundle:// , we can access to that bundle // (note the authority contains the bundle ID) and copy Theme // artefacts. URI example // bundle://8.0:0/org/gvnix/web/theme/roo/addon/themes/theme-cit/ else if ("bundle".equals(sourceDirectory.getScheme())) { String uriAuthority = sourceDirectory.getAuthority(); long bundleId = Long.parseLong(uriAuthority.substring(0, uriAuthority.indexOf("."))); // iterate over bundle entries in the given URI path and add them // to URLs to be copied to target dir Enumeration<URL> entries = context.getBundle(bundleId).findEntries(sourceDirectory.getPath(), "*.*", true); while (entries.hasMoreElements()) { try { uris.add(entries.nextElement().toURI()); } catch (URISyntaxException e) { throw new IllegalStateException( "Encountered an error during copying of resources for MVC Theme addon.", e); } } } // it shouldn't occur else { throw new IllegalArgumentException("Could not determine schema for resources for source dir '" .concat(sourceDirectory.toString()).concat("'")); } Validate.notNull(uris, "No resources found to copy in '".concat(sourceDirectory.toString()).concat("'")); // iterate over Theme resources and copy them with same dir layout for (URI uri : uris) { // Remove source directory prefix from absolute url: relative file // path String filePath = uri.toString().substring(sourceDirectory.toString().length()); if (isVersionControlSystemFile(filePath)) { // nothing to do if the URL is of a file from a Version Control // System continue; } File targetFile = new File(targetDirectory, filePath); try { // only copy files and if target file doesn't exist or overwrite // flag is true if (!targetFile.exists()) { // create file using FileManager to fire creation events InputStream inputStream = null; OutputStream outputStream = null; try { inputStream = uri.toURL().openStream(); outputStream = fileManager.createFile(targetFile.getAbsolutePath()).getOutputStream(); IOUtils.copy(inputStream, outputStream); } finally { IOUtils.closeQuietly(inputStream); IOUtils.closeQuietly(outputStream); } } // if file exists and overwrite is true, update the file else if (overwrite) { InputStream inputStream = null; OutputStream outputStream = null; try { inputStream = uri.toURL().openStream(); outputStream = fileManager.updateFile(targetFile.getAbsolutePath()).getOutputStream(); IOUtils.copy(inputStream, outputStream); } finally { IOUtils.closeQuietly(inputStream); IOUtils.closeQuietly(outputStream); } } } catch (IOException e) { throw new IllegalStateException( "Encountered an error during copying of resources for MVC Theme addon.", e); } } }
From source file:org.gvnix.web.theme.roo.addon.ThemeOperationsImpl.java
/** * This method will copy the contents of a bundle to a local directory if * the resource does not already exist in the target directory * /*from w w w .ja va 2 s . com*/ * TODO Duplicated code When finding bundle use URL and when finding local * files use URI Avoid error "unknown protocol: bundle" on commands * * @param sourceDirectory source directory. URI syntax: * [scheme:][//authority][path][?query][#fragment] * @param targetDirectory target directory * @param overwrite if true copy to target dir overwriting destination file * @see JspOperationsImpl#copyDirectoryContents(String, String) */ @SuppressWarnings("unchecked") private void copyRecursivelyURL(URI sourceDirectory, File targetDirectory, boolean overwrite) { Validate.notNull(sourceDirectory, "Source URI required"); Validate.notNull(targetDirectory, "Target directory required"); // if source and target are the same dir, do nothing if (targetDirectory.toURI().equals(sourceDirectory)) { return; } if (!targetDirectory.exists()) { fileManager.createDirectory(targetDirectory.getAbsolutePath()); } // Set of resource URLs to be copied to target dir List<URL> urls = new ArrayList<URL>(); // if source URI schema is file:// , source files are in a local // repository if ("file".equals(sourceDirectory.getScheme())) { urls = FileUtils.findFilesURL(new File(sourceDirectory)); } // if source URI schema is bundle:// , we can access to that bundle // (note the authority contains the bundle ID) and copy Theme // artefacts. URI example // bundle://8.0:0/org/gvnix/web/theme/roo/addon/themes/theme-cit/ else if ("bundle".equals(sourceDirectory.getScheme())) { String uriAuthority = sourceDirectory.getAuthority(); long bundleId = Long.parseLong(uriAuthority.substring(0, uriAuthority.indexOf("."))); // iterate over bundle entries in the given URI path and add them // to URLs to be copied to target dir Enumeration<URL> entries = context.getBundle(bundleId).findEntries(sourceDirectory.getPath(), "*.*", true); while (entries.hasMoreElements()) { urls.add(entries.nextElement()); } } // it shouldn't occur else { throw new IllegalArgumentException("Could not determine schema for resources for source dir '" .concat(sourceDirectory.toString()).concat("'")); } Validate.notNull(urls, "No resources found to copy in '".concat(sourceDirectory.toString()).concat("'")); // remove duplicates on urls urls = FileUtils.removeDuplicates(urls); // iterate over Theme resources and copy them with same dir layout for (URL url : urls) { // Remove source directory prefix from absolute url: relative file // path String filePath = url.toString().substring(sourceDirectory.toString().length()); if (isVersionControlSystemFile(filePath)) { // nothing to do if the URL is of a file from a Version Control // System continue; } File targetFile = new File(targetDirectory, filePath); try { // only copy files and if target file doesn't exist or overwrite // flag is true if (!targetFile.exists()) { // create file using FileManager to fire creation events InputStream inputStream = null; OutputStream outputStream = null; try { inputStream = url.openStream(); outputStream = fileManager.createFile(targetFile.getAbsolutePath()).getOutputStream(); IOUtils.copy(inputStream, outputStream); } finally { IOUtils.closeQuietly(inputStream); IOUtils.closeQuietly(outputStream); } } // if file exists and overwrite is true, update the file else if (overwrite) { InputStream inputStream = null; OutputStream outputStream = null; try { inputStream = url.openStream(); outputStream = fileManager.updateFile(targetFile.getAbsolutePath()).getOutputStream(); IOUtils.copy(inputStream, outputStream); } finally { IOUtils.closeQuietly(inputStream); IOUtils.closeQuietly(outputStream); } } } catch (IOException e) { throw new IllegalStateException( "Encountered an error during copying of resources for MVC Theme addon.", e); } } }
From source file:org.apache.hadoop.hive.ql.exec.Utilities.java
/** * Remove all temporary files and duplicate (double-committed) files from a given directory. * * @return a list of path names corresponding to should-be-created empty buckets. *//*from w ww . ja va 2 s.c om*/ public static List<Path> removeTempOrDuplicateFiles(FileSystem fs, FileStatus[] fileStats, DynamicPartitionCtx dpCtx, FileSinkDesc conf, Configuration hconf, Set<Path> filesKept) throws IOException { if (fileStats == null) { return null; } List<Path> result = new ArrayList<Path>(); HashMap<String, FileStatus> taskIDToFile = null; if (dpCtx != null) { FileStatus parts[] = fileStats; for (int i = 0; i < parts.length; ++i) { assert parts[i].isDir() : "dynamic partition " + parts[i].getPath() + " is not a directory"; FileStatus[] items = fs.listStatus(parts[i].getPath()); // remove empty directory since DP insert should not generate empty partitions. // empty directories could be generated by crashed Task/ScriptOperator if (items.length == 0) { if (!fs.delete(parts[i].getPath(), true)) { LOG.error("Cannot delete empty directory " + parts[i].getPath()); throw new IOException("Cannot delete empty directory " + parts[i].getPath()); } } taskIDToFile = removeTempOrDuplicateFiles(items, fs); if (filesKept != null && taskIDToFile != null) { addFilesToPathSet(taskIDToFile.values(), filesKept); } // if the table is bucketed and enforce bucketing, we should check and generate all buckets if (dpCtx.getNumBuckets() > 0 && taskIDToFile != null && !"tez".equalsIgnoreCase(hconf.get(ConfVars.HIVE_EXECUTION_ENGINE.varname))) { // refresh the file list items = fs.listStatus(parts[i].getPath()); // get the missing buckets and generate empty buckets String taskID1 = taskIDToFile.keySet().iterator().next(); Path bucketPath = taskIDToFile.values().iterator().next().getPath(); for (int j = 0; j < dpCtx.getNumBuckets(); ++j) { String taskID2 = replaceTaskId(taskID1, j); if (!taskIDToFile.containsKey(taskID2)) { // create empty bucket, file name should be derived from taskID2 URI bucketUri = bucketPath.toUri(); String path2 = replaceTaskIdFromFilename(bucketUri.getPath().toString(), j); result.add(new Path(bucketUri.getScheme(), bucketUri.getAuthority(), path2)); } } } } } else { FileStatus[] items = fileStats; if (items.length == 0) { return result; } taskIDToFile = removeTempOrDuplicateFiles(items, fs); if (filesKept != null && taskIDToFile != null) { addFilesToPathSet(taskIDToFile.values(), filesKept); } if (taskIDToFile != null && taskIDToFile.size() > 0 && conf != null && conf.getTable() != null && (conf.getTable().getNumBuckets() > taskIDToFile.size()) && !"tez".equalsIgnoreCase(hconf.get(ConfVars.HIVE_EXECUTION_ENGINE.varname))) { // get the missing buckets and generate empty buckets for non-dynamic partition String taskID1 = taskIDToFile.keySet().iterator().next(); Path bucketPath = taskIDToFile.values().iterator().next().getPath(); for (int j = 0; j < conf.getTable().getNumBuckets(); ++j) { String taskID2 = replaceTaskId(taskID1, j); if (!taskIDToFile.containsKey(taskID2)) { // create empty bucket, file name should be derived from taskID2 URI bucketUri = bucketPath.toUri(); String path2 = replaceTaskIdFromFilename(bucketUri.getPath().toString(), j); result.add(new Path(bucketUri.getScheme(), bucketUri.getAuthority(), path2)); } } } } return result; }
From source file:com.bigstep.datalake.DLFileSystem.java
@Override public synchronized void initialize(URI uri, Configuration conf) throws IOException { super.initialize(uri, conf); uri = selectDatalakeEndpointURI(uri, conf); /* set user pattern based on configuration file */ UserParam.setUserPattern(conf.get(DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY, DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT)); kerberosIdentity = initialiseKerberosIdentity(conf); this.shouldUseEncryption = conf.getBoolean(FS_DL_IMPL_SHOULD_USE_ENCRYPTION_CONFIG_NAME, false); if (this.shouldUseEncryption) { initialiseAesEncryption(conf);// w w w .j av a2 s. c o m } this.homeDirectory = conf.get(FS_DL_IMPL_HOME_DIRECTORY); if (homeDirectory == null) throw new IOException( "The Datalake requires a home directory to be configured in the fs.dl.impl.homeDirectory configuration variable. This is in the form /data_lake/dlxxxx"); this.defaultEndpoint = conf.get(FS_DL_IMPL_DEFAULT_ENDPOINT); if (defaultEndpoint == null) throw new IOException( "The Datalake requires a default endpoint to be configured the fs.dl.impl.defaultEndpoint configuration variable. This is in the form /data_lake/dlxxxx"); URI defaultEndpointURI = URI.create(defaultEndpoint); String authority = uri.getAuthority() == null ? defaultEndpointURI.getAuthority() : uri.getAuthority(); this.baseUri = URI.create(uri.getScheme() + "://" + authority + this.homeDirectory); this.nnAddrs = resolveNNAddr(); LOG.debug("Created kerberosIdentity " + kerberosIdentity + " for " + this.baseUri); boolean isHA = HAUtil.isClientFailoverConfigured(conf, this.baseUri); boolean isLogicalUri = isHA && HAUtil.isLogicalUri(conf, this.baseUri); // In non-HA or non-logical URI case, the code needs to call // getCanonicalUri() in order to handle the case where no port is // specified in the URI this.tokenServiceName = isLogicalUri ? HAUtil.buildTokenServiceForLogicalUri(this.baseUri, getScheme()) : SecurityUtil.buildTokenService(getCanonicalUri()); if (!isHA) { this.retryPolicy = RetryUtils.getDefaultRetryPolicy(conf, DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_KEY, DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_DEFAULT, DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_KEY, DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT, SafeModeException.class); } else { int maxFailoverAttempts = conf.getInt(DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY, DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT); int maxRetryAttempts = conf.getInt(DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_KEY, DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT); int failoverSleepBaseMillis = conf.getInt(DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY, DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT); int failoverSleepMaxMillis = conf.getInt(DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY, DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT); this.retryPolicy = RetryPolicies.failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL, maxFailoverAttempts, maxRetryAttempts, failoverSleepBaseMillis, failoverSleepMaxMillis); } this.workingDir = getHomeDirectory(); //Delegation tokens don't work with httpfs this.canRefreshDelegationToken = false; this.disallowFallbackToInsecureCluster = !conf.getBoolean( CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY, CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT); this.delegationToken = null; this.defaultFilePermissions = Short .decode(conf.get(FS_DL_IMPL_DEFAULT_FILE_PERMISSIONS, this.DEFAULT_FILE_PERMISSIONS)); this.defaultUMask = Short.decode(conf.get(FS_DL_IMPL_DEFAULT_UMASK, this.DEFAULT_UMASK)); this.transportScheme = conf.get(FS_DL_IMPL_TRANSPORT_SCHEME_CONFIG_NAME, FS_DL_IMPL_DEFAULT_TRANSPORT_SCHEME); if (!checkJCE()) throw new IOException(JCE_ERROR); }
From source file:org.apache98.hadoop.fs.FileSystem.java
/** * Check that a Path belongs to this FileSystem. * //from ww w .j a va 2s . c o m * @param path * to check */ protected void checkPath(Path path) { URI uri = path.toUri(); String thatScheme = uri.getScheme(); if (thatScheme == null) { return; } URI thisUri = getCanonicalUri(); String thisScheme = thisUri.getScheme(); // authority and scheme are not case sensitive if (thisScheme.equalsIgnoreCase(thatScheme)) {// schemes match String thisAuthority = thisUri.getAuthority(); String thatAuthority = uri.getAuthority(); if (thatAuthority == null && // path's authority is null thisAuthority != null) { // fs has an authority URI defaultUri = getDefaultUri(getConf()); if (thisScheme.equalsIgnoreCase(defaultUri.getScheme())) { uri = defaultUri; // schemes match, so use this uri instead } else { uri = null; // can't determine auth of the path } } if (uri != null) { // canonicalize uri before comparing with this fs uri = canonicalizeUri(uri); thatAuthority = uri.getAuthority(); if (thisAuthority == thatAuthority || // authorities match (thisAuthority != null && thisAuthority.equalsIgnoreCase(thatAuthority))) { return; } } } throw new IllegalArgumentException("Wrong FS: " + path + ", expected: " + this.getUri()); }
From source file:com.buaa.cfs.fs.FileSystem.java
/** * Check that a Path belongs to this FileSystem. * * @param path to check// w w w .j a v a 2 s. com */ protected void checkPath(Path path) { URI uri = path.toUri(); String thatScheme = uri.getScheme(); if (thatScheme == null) // fs is relative return; URI thisUri = getCanonicalUri(); String thisScheme = thisUri.getScheme(); //authority and scheme are not case sensitive if (thisScheme.equalsIgnoreCase(thatScheme)) {// schemes match String thisAuthority = thisUri.getAuthority(); String thatAuthority = uri.getAuthority(); if (thatAuthority == null && // path's authority is null thisAuthority != null) { // fs has an authority URI defaultUri = getDefaultUri(getConf()); if (thisScheme.equalsIgnoreCase(defaultUri.getScheme())) { uri = defaultUri; // schemes match, so use this uri instead } else { uri = null; // can't determine auth of the path } } if (uri != null) { // canonicalize uri before comparing with this fs uri = canonicalizeUri(uri); thatAuthority = uri.getAuthority(); if (thisAuthority == thatAuthority || // authorities match (thisAuthority != null && thisAuthority.equalsIgnoreCase(thatAuthority))) return; } } throw new IllegalArgumentException("Wrong FS: " + path + ", expected: " + this.getUri()); }
From source file:org.apache.hadoop.hdfs.DFSClient.java
/** * Create a new DFSClient connected to the given nameNodeUri or rpcNamenode. * If HA is enabled and a positive value is set for * {@link DFSConfigKeys#DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY} in the * configuration, the DFSClient will use {@link LossyRetryInvocationHandler} * as its RetryInvocationHandler. Otherwise one of nameNodeUri or rpcNamenode * must be null./*from ww w . java 2 s . c o m*/ */ @VisibleForTesting public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode, Configuration conf, FileSystem.Statistics stats) throws IOException { SpanReceiverHost.getInstance(conf); traceSampler = new SamplerBuilder(TraceUtils.wrapHadoopConf(conf)).build(); // Copy only the required DFSClient configuration this.dfsClientConf = new DfsClientConf(conf); this.conf = conf; this.stats = stats; this.socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class); this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf); this.ugi = UserGroupInformation.getCurrentUser(); this.authority = nameNodeUri == null ? "null" : nameNodeUri.getAuthority(); this.clientName = "DFSClient_" + dfsClientConf.getTaskId() + "_" + DFSUtil.getRandom().nextInt() + "_" + Thread.currentThread().getId(); int numResponseToDrop = conf.getInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT); NameNodeProxies.ProxyAndInfo<ClientProtocol> proxyInfo = null; AtomicBoolean nnFallbackToSimpleAuth = new AtomicBoolean(false); if (numResponseToDrop > 0) { // This case is used for testing. LOG.warn(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY + " is set to " + numResponseToDrop + ", this hacked client will proactively drop responses"); proxyInfo = NameNodeProxies.createProxyWithLossyRetryHandler(conf, nameNodeUri, ClientProtocol.class, numResponseToDrop, nnFallbackToSimpleAuth); } if (proxyInfo != null) { this.dtService = proxyInfo.getDelegationTokenService(); this.namenode = proxyInfo.getProxy(); } else if (rpcNamenode != null) { // This case is used for testing. Preconditions.checkArgument(nameNodeUri == null); this.namenode = rpcNamenode; dtService = null; } else { Preconditions.checkArgument(nameNodeUri != null, "null URI"); proxyInfo = NameNodeProxies.createProxy(conf, nameNodeUri, ClientProtocol.class, nnFallbackToSimpleAuth); this.dtService = proxyInfo.getDelegationTokenService(); this.namenode = proxyInfo.getProxy(); } String localInterfaces[] = conf.getTrimmedStrings(DFSConfigKeys.DFS_CLIENT_LOCAL_INTERFACES); localInterfaceAddrs = getLocalInterfaceAddrs(localInterfaces); if (LOG.isDebugEnabled() && 0 != localInterfaces.length) { LOG.debug("Using local interfaces [" + Joiner.on(',').join(localInterfaces) + "] with addresses [" + Joiner.on(',').join(localInterfaceAddrs) + "]"); } Boolean readDropBehind = (conf.get(DFS_CLIENT_CACHE_DROP_BEHIND_READS) == null) ? null : conf.getBoolean(DFS_CLIENT_CACHE_DROP_BEHIND_READS, false); Long readahead = (conf.get(DFS_CLIENT_CACHE_READAHEAD) == null) ? null : conf.getLong(DFS_CLIENT_CACHE_READAHEAD, 0); Boolean writeDropBehind = (conf.get(DFS_CLIENT_CACHE_DROP_BEHIND_WRITES) == null) ? null : conf.getBoolean(DFS_CLIENT_CACHE_DROP_BEHIND_WRITES, false); this.defaultReadCachingStrategy = new CachingStrategy(readDropBehind, readahead); this.defaultWriteCachingStrategy = new CachingStrategy(writeDropBehind, readahead); this.clientContext = ClientContext.get(conf.get(DFS_CLIENT_CONTEXT, DFS_CLIENT_CONTEXT_DEFAULT), dfsClientConf); this.hedgedReadThresholdMillis = conf.getLong(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS, DFSConfigKeys.DEFAULT_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS); int numThreads = conf.getInt(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE, DFSConfigKeys.DEFAULT_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE); if (numThreads > 0) { this.initThreadsNumForHedgedReads(numThreads); } this.saslClient = new SaslDataTransferClient(conf, DataTransferSaslUtil.getSaslPropertiesResolver(conf), TrustedChannelResolver.getInstance(conf), nnFallbackToSimpleAuth); }
From source file:org.apache.hadoop.hdfs.DFSClient.java
/** * Create a new DFSClient connected to the given nameNodeUri or rpcNamenode. * If HA is enabled and a positive value is set for * {@link DFSConfigKeys#DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY} in the * configuration, the DFSClient will use {@link LossyRetryInvocationHandler} * as its RetryInvocationHandler. Otherwise one of nameNodeUri or rpcNamenode * must be null./*from w w w . j a v a2 s . c o m*/ */ @VisibleForTesting public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode, Configuration conf, FileSystem.Statistics stats) throws IOException { SpanReceiverHost.getInstance(conf); traceSampler = new SamplerBuilder(TraceUtils.wrapHadoopConf(conf)).build(); // Copy only the required DFSClient configuration this.dfsClientConf = new Conf(conf); if (this.dfsClientConf.useLegacyBlockReaderLocal) { LOG.debug("Using legacy short-circuit local reads."); } this.conf = conf; this.stats = stats; this.socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class); this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf); this.ugi = UserGroupInformation.getCurrentUser(); this.authority = nameNodeUri == null ? "null" : nameNodeUri.getAuthority(); this.clientName = "DFSClient_" + dfsClientConf.taskId + "_" + DFSUtil.getRandom().nextInt() + "_" + Thread.currentThread().getId(); int numResponseToDrop = conf.getInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT); NameNodeProxies.ProxyAndInfo<ClientProtocol> proxyInfo = null; AtomicBoolean nnFallbackToSimpleAuth = new AtomicBoolean(false); if (numResponseToDrop > 0) { // This case is used for testing. LOG.warn(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY + " is set to " + numResponseToDrop + ", this hacked client will proactively drop responses"); proxyInfo = NameNodeProxies.createProxyWithLossyRetryHandler(conf, nameNodeUri, ClientProtocol.class, numResponseToDrop, nnFallbackToSimpleAuth); } if (proxyInfo != null) { this.dtService = proxyInfo.getDelegationTokenService(); this.namenode = proxyInfo.getProxy(); } else if (rpcNamenode != null) { // This case is used for testing. Preconditions.checkArgument(nameNodeUri == null); this.namenode = rpcNamenode; dtService = null; } else { Preconditions.checkArgument(nameNodeUri != null, "null URI"); proxyInfo = NameNodeProxies.createProxy(conf, nameNodeUri, ClientProtocol.class, nnFallbackToSimpleAuth); this.dtService = proxyInfo.getDelegationTokenService(); this.namenode = proxyInfo.getProxy(); } String localInterfaces[] = conf.getTrimmedStrings(DFSConfigKeys.DFS_CLIENT_LOCAL_INTERFACES); localInterfaceAddrs = getLocalInterfaceAddrs(localInterfaces); if (LOG.isDebugEnabled() && 0 != localInterfaces.length) { LOG.debug("Using local interfaces [" + Joiner.on(',').join(localInterfaces) + "] with addresses [" + Joiner.on(',').join(localInterfaceAddrs) + "]"); } Boolean readDropBehind = (conf.get(DFS_CLIENT_CACHE_DROP_BEHIND_READS) == null) ? null : conf.getBoolean(DFS_CLIENT_CACHE_DROP_BEHIND_READS, false); Long readahead = (conf.get(DFS_CLIENT_CACHE_READAHEAD) == null) ? null : conf.getLong(DFS_CLIENT_CACHE_READAHEAD, 0); Boolean writeDropBehind = (conf.get(DFS_CLIENT_CACHE_DROP_BEHIND_WRITES) == null) ? null : conf.getBoolean(DFS_CLIENT_CACHE_DROP_BEHIND_WRITES, false); this.defaultReadCachingStrategy = new CachingStrategy(readDropBehind, readahead); this.defaultWriteCachingStrategy = new CachingStrategy(writeDropBehind, readahead); this.clientContext = ClientContext.get(conf.get(DFS_CLIENT_CONTEXT, DFS_CLIENT_CONTEXT_DEFAULT), dfsClientConf); this.hedgedReadThresholdMillis = conf.getLong(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS, DFSConfigKeys.DEFAULT_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS); int numThreads = conf.getInt(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE, DFSConfigKeys.DEFAULT_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE); if (numThreads > 0) { this.initThreadsNumForHedgedReads(numThreads); } this.saslClient = new SaslDataTransferClient(conf, DataTransferSaslUtil.getSaslPropertiesResolver(conf), TrustedChannelResolver.getInstance(conf), nnFallbackToSimpleAuth); }
From source file:com.mellanox.r4h.DFSClient.java
/** * Create a new DFSClient connected to the given nameNodeUri or rpcNamenode. * If HA is enabled and a positive value is set for {@link DFSConfigKeys#DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY} in the * configuration, the DFSClient will use {@link LossyRetryInvocationHandler} as its RetryInvocationHandler. Otherwise one of nameNodeUri or * rpcNamenode/*from w w w. j a va 2 s. c o m*/ * must be null. */ @VisibleForTesting public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode, Configuration conf, FileSystem.Statistics stats) throws IOException { SpanReceiverHost.get(conf, DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX); traceSampler = new SamplerBuilder(TraceUtils.wrapHadoopConf(DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX, conf)) .build(); // Copy only the required DFSClient configuration this.dfsClientConf = new DFSClientConfBridge2_7(conf); if (this.dfsClientConf.isUseLegacyBlockReaderLocal()) { LOG.debug("Using legacy short-circuit local reads."); } this.conf = conf; this.stats = stats; this.socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class); this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf); this.ugi = UserGroupInformation.getCurrentUser(); this.authority = nameNodeUri == null ? "null" : nameNodeUri.getAuthority(); this.clientName = "DFSClient_" + dfsClientConf.getTaskId() + "_" + DFSUtil.getRandom().nextInt() + "_" + Thread.currentThread().getId(); provider = DFSUtil.createKeyProvider(conf); if (LOG.isDebugEnabled()) { if (provider == null) { LOG.debug("No KeyProvider found."); } else { LOG.debug("Found KeyProvider: " + provider.toString()); } } int numResponseToDrop = conf.getInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT); NameNodeProxies.ProxyAndInfo<ClientProtocol> proxyInfo = null; AtomicBoolean nnFallbackToSimpleAuth = new AtomicBoolean(false); if (numResponseToDrop > 0) { // This case is used for testing. LOG.warn(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY + " is set to " + numResponseToDrop + ", this hacked client will proactively drop responses"); proxyInfo = NameNodeProxies.createProxyWithLossyRetryHandler(conf, nameNodeUri, ClientProtocol.class, numResponseToDrop, nnFallbackToSimpleAuth); } if (proxyInfo != null) { this.dtService = proxyInfo.getDelegationTokenService(); this.namenode = proxyInfo.getProxy(); } else if (rpcNamenode != null) { // This case is used for testing. Preconditions.checkArgument(nameNodeUri == null); this.namenode = rpcNamenode; dtService = null; } else { Preconditions.checkArgument(nameNodeUri != null, "null URI"); proxyInfo = NameNodeProxies.createProxy(conf, nameNodeUri, ClientProtocol.class, nnFallbackToSimpleAuth); this.dtService = proxyInfo.getDelegationTokenService(); this.namenode = proxyInfo.getProxy(); } String localInterfaces[] = conf.getTrimmedStrings(DFSConfigKeys.DFS_CLIENT_LOCAL_INTERFACES); localInterfaceAddrs = getLocalInterfaceAddrs(localInterfaces); if (LOG.isDebugEnabled() && 0 != localInterfaces.length) { LOG.debug("Using local interfaces [" + Joiner.on(',').join(localInterfaces) + "] with addresses [" + Joiner.on(',').join(localInterfaceAddrs) + "]"); } Boolean readDropBehind = (conf.get(DFS_CLIENT_CACHE_DROP_BEHIND_READS) == null) ? null : conf.getBoolean(DFS_CLIENT_CACHE_DROP_BEHIND_READS, false); Long readahead = (conf.get(DFS_CLIENT_CACHE_READAHEAD) == null) ? null : conf.getLong(DFS_CLIENT_CACHE_READAHEAD, 0); Boolean writeDropBehind = (conf.get(DFS_CLIENT_CACHE_DROP_BEHIND_WRITES) == null) ? null : conf.getBoolean(DFS_CLIENT_CACHE_DROP_BEHIND_WRITES, false); this.defaultReadCachingStrategy = new CachingStrategy(readDropBehind, readahead); this.defaultWriteCachingStrategy = new CachingStrategy(writeDropBehind, readahead); this.clientContext = ClientContext.get(conf.get(DFS_CLIENT_CONTEXT, DFS_CLIENT_CONTEXT_DEFAULT), dfsClientConf); this.hedgedReadThresholdMillis = conf.getLong(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS, DFSConfigKeys.DEFAULT_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS); int numThreads = conf.getInt(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE, DFSConfigKeys.DEFAULT_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE); if (numThreads > 0) { this.initThreadsNumForHedgedReads(numThreads); } this.saslClient = new SaslDataTransferClient(conf, DataTransferSaslUtil.getSaslPropertiesResolver(conf), TrustedChannelResolver.getInstance(conf), nnFallbackToSimpleAuth); }