List of usage examples for java.nio.file Path getFileSystem
FileSystem getFileSystem();
From source file:de.tiqsolutions.hdfs.HadoopFileSystemProvider.java
@Override public void checkAccess(Path path, AccessMode... modes) throws IOException { FileSystem fs = path.getFileSystem(); if (!HadoopFileSystem.class.isInstance(fs)) throw new IllegalArgumentException("path"); try {//from w ww .j ava 2 s . c o m ((HadoopFileSystem) fs).checkAccess(path, modes); } catch (RemoteException e) { rethrowRemoteException(e, path); } }
From source file:de.tiqsolutions.hdfs.HadoopFileSystemProvider.java
@Override public void createDirectory(Path dir, FileAttribute<?>... attrs) throws IOException { FileSystem fs = dir.getFileSystem(); if (!HadoopFileSystem.class.isInstance(fs)) throw new IllegalArgumentException("dir"); try {/*from w ww . j a v a2 s . c om*/ ((HadoopFileSystem) fs).createDirectory(dir, attrs); } catch (RemoteException e) { rethrowRemoteException(e, dir); } }
From source file:de.tiqsolutions.hdfs.HadoopFileSystemProvider.java
@Override public DirectoryStream<Path> newDirectoryStream(Path dir, Filter<? super Path> filter) throws IOException { FileSystem fs = dir.getFileSystem(); if (!HadoopFileSystem.class.isInstance(fs)) throw new IllegalArgumentException("dir"); try {/* w ww .j a v a 2 s. c om*/ return ((HadoopFileSystem) fs).newDirectoryStream(dir, filter); } catch (RemoteException e) { rethrowRemoteException(e, dir); return null; } }
From source file:de.tiqsolutions.hdfs.HadoopFileSystemProvider.java
@Override public void move(Path source, Path target, CopyOption... options) throws IOException { FileSystem fs = source.getFileSystem(); if (!HadoopFileSystem.class.isInstance(fs)) throw new IllegalArgumentException("source"); if (!fs.provider().equals(target.getFileSystem().provider())) throw new ProviderMismatchException(); List<Rename> renameOptions = new ArrayList<>(); List<CopyOption> copyOptions = Arrays.asList(options); if (copyOptions.contains(StandardCopyOption.REPLACE_EXISTING)) renameOptions.add(Rename.OVERWRITE); try {/*w ww . j a v a 2s . c om*/ ((HadoopFileSystem) fs).getFileContext().rename(((HadoopFileSystemPath) source).getPath(), ((HadoopFileSystemPath) target).getPath(), renameOptions.toArray(new Rename[renameOptions.size()])); } catch (RemoteException e) { rethrowRemoteException(e, source, target); } }
From source file:de.tiqsolutions.hdfs.HadoopFileSystemProvider.java
@Override public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> options, FileAttribute<?>... attrs) throws IOException { FileSystem fs = path.getFileSystem(); if (!HadoopFileSystem.class.isInstance(fs)) throw new IllegalArgumentException("path"); try {// w w w .ja v a 2s . c o m return ((HadoopFileSystem) fs).newByteChannel(path, options, attrs); } catch (RemoteException e) { rethrowRemoteException(e, path); return null; } }
From source file:de.tiqsolutions.hdfs.HadoopFileSystemProvider.java
@Override public boolean isSameFile(Path path, Path path2) throws IOException { if (path == null) throw new NullArgumentException("path"); if (path2 == null) throw new NullArgumentException("path2"); FileSystem fs = path.getFileSystem(); if (!HadoopFileSystem.class.isInstance(fs)) throw new IllegalArgumentException("path"); return ((HadoopFileSystem) fs).isSameFile(path, path2); }
From source file:com.github.zhanhb.ckfinder.connector.support.XmlConfigurationParser.java
private Path getPath(Path first, String... more) { return first == null ? null : first.getFileSystem().getPath(first.toString(), more); }
From source file:de.tiqsolutions.hdfs.HadoopFileSystemProvider.java
@Override public void copy(Path source, Path target, CopyOption... options) throws IOException { List<CopyOption> optionList = Arrays.asList(options); if (!optionList.contains(StandardCopyOption.REPLACE_EXISTING)) { if (Files.exists(target)) throw new java.nio.file.FileAlreadyExistsException(source.toString(), target.toString(), "could not copy file to destination"); } else {/* w w w . ja va 2 s.co m*/ Files.deleteIfExists(target); } FileSystem sourceFS = source.getFileSystem(); FileSystem targetFS = target.getFileSystem(); if (optionList.contains(HadoopCopyOption.REMOTE_COPY) && sourceFS.equals(targetFS)) { remoteCopy(source, target, options); return; } try (SeekableByteChannel sourceChannel = sourceFS.provider().newByteChannel(source, EnumSet.of(StandardOpenOption.READ))) { Set<StandardOpenOption> openOptions = EnumSet.of(StandardOpenOption.WRITE); if (optionList.contains(StandardCopyOption.REPLACE_EXISTING)) openOptions.add(StandardOpenOption.CREATE); else openOptions.add(StandardOpenOption.CREATE_NEW); List<FileAttribute<?>> fileAttributes = new ArrayList<>(); if (optionList.contains(StandardCopyOption.COPY_ATTRIBUTES)) { Set<String> sourceAttrViews = sourceFS.supportedFileAttributeViews(); Set<String> targetAttrViews = targetFS.supportedFileAttributeViews(); if (sourceAttrViews.contains(PosixFileAttributeViewImpl.NAME) && targetAttrViews.contains(PosixFileAttributeViewImpl.NAME)) { PosixFileAttributes posixAttributes = sourceFS.provider().readAttributes(source, PosixFileAttributes.class); fileAttributes.add(PosixFilePermissions.asFileAttribute(posixAttributes.permissions())); } if (sourceAttrViews.contains(HadoopFileAttributeViewImpl.NAME) && targetAttrViews.contains(HadoopFileAttributeViewImpl.NAME)) { final HadoopFileAttributes hdfsAttributes = sourceFS.provider().readAttributes(source, HadoopFileAttributes.class); fileAttributes.add(new FileAttribute<Long>() { @Override public String name() { return HadoopFileAttributeViewImpl.NAME + ":blockSize"; } @Override public Long value() { return hdfsAttributes.getBlockSize(); } }); fileAttributes.add(new FileAttribute<Short>() { @Override public String name() { return HadoopFileAttributeViewImpl.NAME + ":replication"; } @Override public Short value() { return hdfsAttributes.getReplication(); } }); } } FileAttribute<?>[] attributes = fileAttributes.toArray(new FileAttribute<?>[fileAttributes.size()]); try (SeekableByteChannel targetChannel = targetFS.provider().newByteChannel(target, openOptions, attributes)) { int buffSize = getConfiguration().getInt(DFSConfigKeys.DFS_STREAM_BUFFER_SIZE_KEY, DFSConfigKeys.DFS_STREAM_BUFFER_SIZE_DEFAULT); ByteBuffer buffer = ByteBuffer.allocate(buffSize); buffer.clear(); while (sourceChannel.read(buffer) > 0) { buffer.flip(); targetChannel.write(buffer); buffer.clear(); } } if (optionList.contains(StandardCopyOption.COPY_ATTRIBUTES)) { BasicFileAttributes attrs = sourceFS.provider().readAttributes(source, BasicFileAttributes.class); BasicFileAttributeView view = targetFS.provider().getFileAttributeView(target, BasicFileAttributeView.class); view.setTimes(attrs.lastModifiedTime(), attrs.lastAccessTime(), attrs.creationTime()); } } }
From source file:org.apache.taverna.robundle.Bundles.java
protected static void safeMoveOrCopy(Path source, Path destination, boolean move) throws IOException { // First just try to do an atomic move with overwrite try {//from ww w . j a va 2s . c o m if (move && source.getFileSystem().provider().equals(destination.getFileSystem().provider())) { move(source, destination, ATOMIC_MOVE, REPLACE_EXISTING); return; } } catch (AtomicMoveNotSupportedException ex) { // Do the fallback by temporary files below } destination = destination.toAbsolutePath(); String tmpName = destination.getFileName().toString(); Path tmpDestination = createTempFile(destination.getParent(), tmpName, ".tmp"); Path backup = null; try { if (move) { /* * This might do a copy if filestores differ .. hence to avoid * an incomplete (and partially overwritten) destination, we do * it first to a temporary file */ move(source, tmpDestination, REPLACE_EXISTING); } else { copy(source, tmpDestination, REPLACE_EXISTING); } if (exists(destination)) { if (isDirectory(destination)) // ensure it is empty try (DirectoryStream<Path> ds = newDirectoryStream(destination)) { if (ds.iterator().hasNext()) throw new DirectoryNotEmptyException(destination.toString()); } // Keep the files for roll-back in case it goes bad backup = createTempFile(destination.getParent(), tmpName, ".orig"); move(destination, backup, REPLACE_EXISTING); } // OK ; let's swap over try { // prefer ATOMIC_MOVE move(tmpDestination, destination, REPLACE_EXISTING, ATOMIC_MOVE); } catch (AtomicMoveNotSupportedException ex) { /* * possibly a network file system as src/dest should be in same * folder */ move(tmpDestination, destination, REPLACE_EXISTING); } finally { if (!exists(destination) && backup != null) // Restore the backup move(backup, destination); } // It went well, tidy up if (backup != null) deleteIfExists(backup); } finally { deleteIfExists(tmpDestination); } }
From source file:org.audiveris.omr.classifier.AbstractClassifier.java
/** * Load model and norms from the most suitable classifier data files. * If user files do not exist or cannot be unmarshalled, the default files are used. * * @param fileName file name for classifier data * @return the model loaded/*from w w w.j av a 2 s . co m*/ */ protected M load(String fileName) { // First, try user data, if any, in local EVAL folder logger.debug("AbstractClassifier. Trying user data"); { final Path path = WellKnowns.TRAIN_FOLDER.resolve(fileName); if (Files.exists(path)) { try { Path root = ZipFileSystem.open(path); logger.debug("loadModel..."); M model = loadModel(root); logger.debug("loadNorms..."); norms = loadNorms(root); logger.debug("loaded."); root.getFileSystem().close(); if (!isCompatible(model, norms)) { final String msg = "Obsolete classifier user data in " + path + ", trying default data"; logger.warn(msg); } else { // Tell user we are not using the default logger.info("Classifier data loaded from local {}", path); return model; // Normal exit } } catch (Exception ex) { logger.warn("Load error {}", ex.toString(), ex); norms = null; } } } // Second, use default data (in program RES folder) logger.debug("AbstractClassifier. Trying default data"); final URI uri = UriUtil.toURI(WellKnowns.RES_URI, fileName); try { // Must be a path to a true zip *file* final Path zipPath; logger.debug("uri={}", uri); if (uri.toString().startsWith("jar:")) { // We have a .zip within a .jar // Quick fix: copy the .zip into a separate temp file // TODO: investigate a better solution! File tmpFile = File.createTempFile("AbstractClassifier-", ".tmp"); logger.debug("tmpFile={}", tmpFile); tmpFile.deleteOnExit(); try (InputStream is = uri.toURL().openStream()) { FileUtils.copyInputStreamToFile(is, tmpFile); } zipPath = tmpFile.toPath(); } else { zipPath = Paths.get(uri); } final Path root = ZipFileSystem.open(zipPath); M model = loadModel(root); norms = loadNorms(root); root.getFileSystem().close(); if (!isCompatible(model, norms)) { final String msg = "Obsolete classifier default data in " + uri + ", please retrain from scratch"; logger.warn(msg); } else { logger.info("Classifier data loaded from default uri {}", uri); return model; // Normal exit } } catch (Exception ex) { logger.warn("Load error on {} {}", uri, ex.toString(), ex); } norms = null; // No norms return null; // No model }