List of usage examples for org.apache.hadoop.fs FileSystem resolvePath
public Path resolvePath(final Path p) throws IOException
From source file:com.ruizhan.hadoop.hdfs.Trash.java
License:Apache License
/** * In case of the symlinks or mount points, one has to move the appropriate * trashbin in the actual volume of the path p being deleted. * * Hence we get the file system of the fully-qualified resolved-path and * then move the path p to the trashbin in that volume, * @param fs - the filesystem of path p// w ww .ja v a 2 s . c o m * @param p - the path being deleted - to be moved to trasg * @param conf - configuration * @return false if the item is already in the trash or trash is disabled * @throws IOException on error */ public static boolean moveToAppropriateTrash(FileSystem fs, Path p, Configuration conf) throws IOException { Path fullyResolvedPath = fs.resolvePath(p); FileSystem fullyResolvedFs = FileSystem.get(fullyResolvedPath.toUri(), conf); // If the trash interval is configured server side then clobber this // configuration so that we always respect the server configuration. try { long trashInterval = fullyResolvedFs.getServerDefaults(fullyResolvedPath).getTrashInterval(); if (0 != trashInterval) { Configuration confCopy = new Configuration(conf); confCopy.setLong(CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY, trashInterval); conf = confCopy; } } catch (Exception e) { // If we can not determine that trash is enabled server side then // bail rather than potentially deleting a file when trash is enabled. throw new IOException("Failed to get server trash configuration", e); } Trash trash = new Trash(fullyResolvedFs, conf); boolean success = trash.moveToTrash(fullyResolvedPath); if (success) { System.out.println("Moved: '" + p + "' to trash at: " + trash.getCurrentTrashDir()); } return success; }
From source file:com.scaleoutsoftware.soss.hserver.hadoop.DistributedCacheManager.java
License:Apache License
/** * Set up the distributed cache by localizing the resources, and updating * the configuration with references to the localized resources. * @param conf job configuration/*from w w w . ja v a 2 s .co m*/ * @throws IOException */ public void setup(Configuration conf) throws IOException { //If we are not 0th worker, wait for 0th worker to set up the cache if (InvocationWorker.getIgWorkerIndex() > 0 && InvocationWorker.getNumberOfWorkers() > 1) { try { InvocationWorker.getSynchronizationBarrier().waitForComplete(ACTION_NAME, SYNCHRONIZATION_WAIT_MS, WAIT_GRANULARITY_MS); } catch (Exception e) { throw new RuntimeException(e); } return; } File workDir = new File(System.getProperty("user.dir")); // Generate YARN local resources objects corresponding to the distributed // cache configuration Map<String, LocalResource> localResources = new LinkedHashMap<String, LocalResource>(); MRApps.setupDistributedCache(conf, localResources); //CODE CHANGE FROM ORIGINAL FILE: //We need to clear the resources from jar files, since they are distributed through the IG. // Iterator<Map.Entry<String, LocalResource>> iterator = localResources.entrySet().iterator(); while (iterator.hasNext()) { Entry<String, LocalResource> entry = iterator.next(); if (entry.getKey().endsWith(".jar")) { iterator.remove(); } } // Generating unique numbers for FSDownload. AtomicLong uniqueNumberGenerator = new AtomicLong(System.currentTimeMillis()); // Find which resources are to be put on the local classpath Map<String, Path> classpaths = new HashMap<String, Path>(); Path[] archiveClassPaths = DistributedCache.getArchiveClassPaths(conf); if (archiveClassPaths != null) { for (Path p : archiveClassPaths) { FileSystem remoteFS = p.getFileSystem(conf); p = remoteFS.resolvePath(p.makeQualified(remoteFS.getUri(), remoteFS.getWorkingDirectory())); classpaths.put(p.toUri().getPath().toString(), p); } } Path[] fileClassPaths = DistributedCache.getFileClassPaths(conf); if (fileClassPaths != null) { for (Path p : fileClassPaths) { FileSystem remoteFS = p.getFileSystem(conf); p = remoteFS.resolvePath(p.makeQualified(remoteFS.getUri(), remoteFS.getWorkingDirectory())); classpaths.put(p.toUri().getPath().toString(), p); } } // Localize the resources LocalDirAllocator localDirAllocator = new LocalDirAllocator(MRConfig.LOCAL_DIR); FileContext localFSFileContext = FileContext.getLocalFSFileContext(); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); ExecutorService exec = null; try { ThreadFactory tf = new ThreadFactoryBuilder() .setNameFormat("LocalDistributedCacheManager Downloader #%d").build(); exec = Executors.newCachedThreadPool(tf); Path destPath = localDirAllocator.getLocalPathForWrite(".", conf); Map<LocalResource, Future<Path>> resourcesToPaths = Maps.newHashMap(); for (LocalResource resource : localResources.values()) { Callable<Path> download = new FSDownload(localFSFileContext, ugi, conf, new Path(destPath, Long.toString(uniqueNumberGenerator.incrementAndGet())), resource); Future<Path> future = exec.submit(download); resourcesToPaths.put(resource, future); } for (Entry<String, LocalResource> entry : localResources.entrySet()) { LocalResource resource = entry.getValue(); Path path; try { path = resourcesToPaths.get(resource).get(); } catch (InterruptedException e) { throw new IOException(e); } catch (ExecutionException e) { throw new IOException(e); } String pathString = path.toUri().toString(); String link = entry.getKey(); String target = new File(path.toUri()).getPath(); symlink(workDir, target, link); if (resource.getType() == LocalResourceType.ARCHIVE) { localArchives.add(pathString); } else if (resource.getType() == LocalResourceType.FILE) { localFiles.add(pathString); } else if (resource.getType() == LocalResourceType.PATTERN) { //PATTERN is not currently used in local mode throw new IllegalArgumentException( "Resource type PATTERN is not " + "implemented yet. " + resource.getResource()); } Path resourcePath; try { resourcePath = ConverterUtils.getPathFromYarnURL(resource.getResource()); } catch (URISyntaxException e) { throw new IOException(e); } LOG.info(String.format("Localized %s as %s", resourcePath, path)); String cp = resourcePath.toUri().getPath(); if (classpaths.keySet().contains(cp)) { localClasspaths.add(path.toUri().getPath().toString()); } } } finally { if (exec != null) { exec.shutdown(); } } // Update the configuration object with localized data. if (!localArchives.isEmpty()) { conf.set(MRJobConfig.CACHE_LOCALARCHIVES, StringUtils.arrayToString(localArchives.toArray(new String[localArchives.size()]))); } if (!localFiles.isEmpty()) { conf.set(MRJobConfig.CACHE_LOCALFILES, StringUtils.arrayToString(localFiles.toArray(new String[localArchives.size()]))); } setupCalled = true; //If we are 0th worker, signal action complete if (InvocationWorker.getIgWorkerIndex() == 0 && InvocationWorker.getNumberOfWorkers() > 1) { try { InvocationWorker.getSynchronizationBarrier().signalComplete(ACTION_NAME); } catch (Exception e) { throw new RuntimeException(e); } } }
From source file:org.apache.accumulo.server.master.recovery.HadoopLogCloser.java
License:Apache License
@Override public long close(AccumuloConfiguration conf, VolumeManager fs, Path source) throws IOException { FileSystem ns = fs.getVolumeByPath(source).getFileSystem(); // if path points to a viewfs path, then resolve to underlying filesystem if (ViewFSUtils.isViewFS(ns)) { Path newSource = ns.resolvePath(source); if (!newSource.equals(source) && newSource.toUri().getScheme() != null) { ns = newSource.getFileSystem(CachedConfiguration.getInstance()); source = newSource;//from www.ja va2s.co m } } if (ns instanceof DistributedFileSystem) { DistributedFileSystem dfs = (DistributedFileSystem) ns; try { if (!dfs.recoverLease(source)) { log.info("Waiting for file to be closed " + source.toString()); return conf.getTimeInMillis(Property.MASTER_LEASE_RECOVERY_WAITING_PERIOD); } log.info("Recovered lease on " + source.toString()); } catch (FileNotFoundException ex) { throw ex; } catch (Exception ex) { log.warn("Error recovering lease on " + source.toString(), ex); ns.append(source).close(); log.info("Recovered lease on " + source.toString() + " using append"); } } else if (ns instanceof LocalFileSystem || ns instanceof RawLocalFileSystem) { // ignore } else { throw new IllegalStateException("Don't know how to recover a lease for " + ns.getClass().getName()); } return 0; }
From source file:org.apache.hcatalog.shims.HCatHadoopShims23.java
License:Apache License
@Override public boolean isFileInHDFS(FileSystem fs, Path path) throws IOException { // In case of viewfs we need to lookup where the actual file is to know the filesystem in use. // resolvePath is a sure shot way of knowing which file system the file is. return "hdfs".equals(fs.resolvePath(path).toUri().getScheme()); }
From source file:org.apache.oozie.util.ClasspathUtils.java
License:Apache License
private static void addToClasspathIfNotJar(Path[] paths, URI[] withLinks, Configuration conf, Map<String, String> environment, String classpathEnvVar) throws IOException { if (paths != null) { HashMap<Path, String> linkLookup = new HashMap<Path, String>(); if (withLinks != null) { for (URI u : withLinks) { Path p = new Path(u); FileSystem remoteFS = p.getFileSystem(conf); p = remoteFS.resolvePath(p.makeQualified(remoteFS.getUri(), remoteFS.getWorkingDirectory())); String name = (null == u.getFragment()) ? p.getName() : u.getFragment(); if (!name.toLowerCase(Locale.ENGLISH).endsWith(".jar")) { linkLookup.put(p, name); }/* ww w . java2s.c om*/ } } for (Path p : paths) { FileSystem remoteFS = p.getFileSystem(conf); p = remoteFS.resolvePath(p.makeQualified(remoteFS.getUri(), remoteFS.getWorkingDirectory())); String name = linkLookup.get(p); if (name == null) { name = p.getName(); } if (!name.toLowerCase(Locale.ENGLISH).endsWith(".jar")) { MRApps.addToEnvironment(environment, classpathEnvVar, ApplicationConstants.Environment.PWD.$() + Path.SEPARATOR + name, conf); } } } }
From source file:org.apache.tez.client.TezClientUtils.java
License:Apache License
private static FileStatus[] getLRFileStatus(String fileName, Configuration conf) throws IOException { URI uri;/*from ww w . jav a 2 s. com*/ try { uri = new URI(fileName); } catch (URISyntaxException e) { String message = "Invalid URI defined in configuration for" + " location of TEZ jars. providedURI=" + fileName; LOG.error(message); throw new TezUncheckedException(message, e); } Path p = new Path(uri); FileSystem fs = p.getFileSystem(conf); p = fs.resolvePath(p); if (fs.isDirectory(p)) { return fs.listStatus(p); } else { FileStatus fStatus = fs.getFileStatus(p); return new FileStatus[] { fStatus }; } }
From source file:org.apache.tez.client.TezClientUtils.java
License:Apache License
/** * Helper function to create a YARN LocalResource * @param fs FileSystem object/*from w ww . j ava 2s. c o m*/ * @param p Path of resource to localize * @param type LocalResource Type * @return a YARN LocalResource for the given Path * @throws IOException */ static LocalResource createLocalResource(FileSystem fs, Path p, LocalResourceType type, LocalResourceVisibility visibility) throws IOException { LocalResource rsrc = Records.newRecord(LocalResource.class); FileStatus rsrcStat = fs.getFileStatus(p); rsrc.setResource(ConverterUtils.getYarnUrlFromPath(fs.resolvePath(rsrcStat.getPath()))); rsrc.setSize(rsrcStat.getLen()); rsrc.setTimestamp(rsrcStat.getModificationTime()); rsrc.setType(type); rsrc.setVisibility(visibility); return rsrc; }
From source file:org.apache.tez.common.TezCommonUtils.java
License:Apache License
/** * <p>// w w w.jav a2 s . c o m * This function returns the staging directory defined in the config with * property name <code>TezConfiguration.TEZ_AM_STAGING_DIR</code>. If the * property is not defined in the conf, Tez uses the value defined as * <code>TezConfiguration.TEZ_AM_STAGING_DIR_DEFAULT</code>. In addition, the * function makes sure if the staging directory exists. If not, it creates the * directory with permission <code>TEZ_AM_DIR_PERMISSION</code>. * </p> * * @param conf * TEZ configuration * @return Fully qualified staging directory */ public static Path getTezBaseStagingPath(Configuration conf) { String stagingDirStr = conf.get(TezConfiguration.TEZ_AM_STAGING_DIR, TezConfiguration.TEZ_AM_STAGING_DIR_DEFAULT); Path baseStagingDir; try { Path p = new Path(stagingDirStr); FileSystem fs = p.getFileSystem(conf); if (!fs.exists(p)) { mkDirForAM(fs, p); LOG.info("Stage directory " + p + " doesn't exist and is created"); } baseStagingDir = fs.resolvePath(p); } catch (IOException e) { throw new TezUncheckedException(e); } return baseStagingDir; }
From source file:org.apache.tez.dag.app.rm.container.AMContainerHelpers.java
License:Apache License
/** * Create a {@link LocalResource} record with all the given parameters. *///ww w.ja v a2 s . co m public static LocalResource createLocalResource(FileSystem fc, Path file, LocalResourceType type, LocalResourceVisibility visibility) throws IOException { FileStatus fstat = fc.getFileStatus(file); URL resourceURL = ConverterUtils.getYarnUrlFromPath(fc.resolvePath(fstat.getPath())); long resourceSize = fstat.getLen(); long resourceModificationTime = fstat.getModificationTime(); return LocalResource.newInstance(resourceURL, type, visibility, resourceSize, resourceModificationTime); }
From source file:org.apache.tez.mapreduce.TestMRRJobsDAGApi.java
License:Apache License
private static LocalResource createLocalResource(FileSystem fc, Path file, LocalResourceType type, LocalResourceVisibility visibility) throws IOException { FileStatus fstat = fc.getFileStatus(file); URL resourceURL = ConverterUtils.getYarnUrlFromPath(fc.resolvePath(fstat.getPath())); long resourceSize = fstat.getLen(); long resourceModificationTime = fstat.getModificationTime(); return LocalResource.newInstance(resourceURL, type, visibility, resourceSize, resourceModificationTime); }