List of usage examples for java.nio.file Files size
public static long size(Path path) throws IOException
From source file:ec.edu.chyc.manejopersonal.managebean.GestorConvenio.java
private void cargarDatosConvenio(Long id) { convenio = convenioController.findConvenio(id); listaProyectos = new ArrayList(convenio.getProyectosCollection()); try {/*from ww w . jav a2 s .co m*/ Path pathArchivoSubido = ServerUtils.getPathConvenios().resolve(convenio.getArchivoConvenio()); if (Files.isRegularFile(pathArchivoSubido) && Files.exists(pathArchivoSubido)) { Long size = Files.size(pathArchivoSubido); tamanoArchivo = ServerUtils.humanReadableByteCount(size); } else { tamanoArchivo = ""; } } catch (IOException ex) { Logger.getLogger(GestorContrato.class.getName()).log(Level.SEVERE, null, ex); } }
From source file:org.apache.storm.daemon.logviewer.utils.DirectoryCleaner.java
/** * If totalSize of files exceeds the either the per-worker quota or global quota, * Logviewer deletes oldest inactive log files in a worker directory or in all worker dirs. * We use the parameter forPerDir to switch between the two deletion modes. * * @param dirs the list of directories to be scanned for deletion * @param quota the per-dir quota or the total quota for the all directories * @param forPerDir if true, deletion happens for a single dir; otherwise, for all directories globally * @param activeDirs only for global deletion, we want to skip the active logs in activeDirs * @return number of files deleted/*from w ww. j a v a2s . c om*/ */ public DeletionMeta deleteOldestWhileTooLarge(List<Path> dirs, long quota, boolean forPerDir, Set<Path> activeDirs) throws IOException { long totalSize = 0; for (Path dir : dirs) { try (DirectoryStream<Path> stream = getStreamForDirectory(dir)) { for (Path path : stream) { totalSize += Files.size(path); } } } LOG.debug("totalSize: {} quota: {}", totalSize, quota); long toDeleteSize = totalSize - quota; if (toDeleteSize <= 0) { return DeletionMeta.EMPTY; } int deletedFiles = 0; long deletedSize = 0; // the oldest pq_size files in this directory will be placed in PQ, with the newest at the root PriorityQueue<Pair<Path, FileTime>> pq = new PriorityQueue<>(PQ_SIZE, Comparator.comparing((Pair<Path, FileTime> p) -> p.getRight()).reversed()); int round = 0; final Set<Path> excluded = new HashSet<>(); while (toDeleteSize > 0) { LOG.debug("To delete size is {}, start a new round of deletion, round: {}", toDeleteSize, round); for (Path dir : dirs) { try (DirectoryStream<Path> stream = getStreamForDirectory(dir)) { for (Path path : stream) { if (!excluded.contains(path)) { if (isFileEligibleToSkipDelete(forPerDir, activeDirs, dir, path)) { excluded.add(path); } else { Pair<Path, FileTime> p = Pair.of(path, Files.getLastModifiedTime(path)); if (pq.size() < PQ_SIZE) { pq.offer(p); } else if (p.getRight().toMillis() < pq.peek().getRight().toMillis()) { pq.poll(); pq.offer(p); } } } } } } if (!pq.isEmpty()) { // need to reverse the order of elements in PQ to delete files from oldest to newest Stack<Pair<Path, FileTime>> stack = new Stack<>(); while (!pq.isEmpty()) { stack.push(pq.poll()); } while (!stack.isEmpty() && toDeleteSize > 0) { Pair<Path, FileTime> pair = stack.pop(); Path file = pair.getLeft(); final String canonicalPath = file.toAbsolutePath().normalize().toString(); final long fileSize = Files.size(file); final long lastModified = pair.getRight().toMillis(); //Original implementation doesn't actually check if delete succeeded or not. try { Utils.forceDelete(file.toString()); LOG.info("Delete file: {}, size: {}, lastModified: {}", canonicalPath, fileSize, lastModified); toDeleteSize -= fileSize; deletedSize += fileSize; deletedFiles++; } catch (IOException e) { excluded.add(file); } } pq.clear(); round++; if (round >= MAX_ROUNDS) { if (forPerDir) { LOG.warn( "Reach the MAX_ROUNDS: {} during per-dir deletion, you may have too many files in " + "a single directory : {}, will delete the rest files in next interval.", MAX_ROUNDS, dirs.get(0).toAbsolutePath().normalize()); } else { LOG.warn("Reach the MAX_ROUNDS: {} during global deletion, you may have too many files, " + "will delete the rest files in next interval.", MAX_ROUNDS); } break; } } else { LOG.warn("No more files able to delete this round, but {} is over quota by {} MB", forPerDir ? "this directory" : "root directory", toDeleteSize * 1e-6); } } return new DeletionMeta(deletedSize, deletedFiles); }
From source file:org.apache.tika.eval.io.ExtractReader.java
public List<Metadata> loadExtract(Path extractFile) throws ExtractReaderException { List<Metadata> metadataList = null; if (extractFile == null || !Files.isRegularFile(extractFile)) { throw new ExtractReaderException(ExtractReaderException.TYPE.NO_EXTRACT_FILE); }//from w w w . java 2s. c om FileSuffixes fileSuffixes = parseSuffixes(extractFile.getFileName().toString()); if (fileSuffixes.txtOrJson == null) { throw new ExtractReaderException(ExtractReaderException.TYPE.INCORRECT_EXTRACT_FILE_SUFFIX); } if (!Files.isRegularFile(extractFile)) { throw new ExtractReaderException(ExtractReaderException.TYPE.NO_EXTRACT_FILE); } long length = -1L; try { length = Files.size(extractFile); } catch (IOException e) { throw new ExtractReaderException(ExtractReaderException.TYPE.IO_EXCEPTION); } if (length == 0L) { throw new ExtractReaderException(ExtractReaderException.TYPE.ZERO_BYTE_EXTRACT_FILE); } if (minExtractLength > IGNORE_LENGTH && length < minExtractLength) { throw new ExtractReaderException(ExtractReaderException.TYPE.EXTRACT_FILE_TOO_SHORT); } if (maxExtractLength > IGNORE_LENGTH && length > maxExtractLength) { throw new ExtractReaderException(ExtractReaderException.TYPE.EXTRACT_FILE_TOO_LONG); } Reader reader = null; InputStream is = null; try { is = Files.newInputStream(extractFile); if (fileSuffixes.compression != null) { if (fileSuffixes.compression.equals("bz2")) { is = new BZip2CompressorInputStream(is); } else if (fileSuffixes.compression.equals("gz") || fileSuffixes.compression.equals("gzip")) { is = new GzipCompressorInputStream(is); } else if (fileSuffixes.compression.equals("zip")) { is = new ZCompressorInputStream(is); } else { LOG.warn("Can't yet process compression of type: {}", fileSuffixes.compression); return metadataList; } } reader = new BufferedReader(new InputStreamReader(is, StandardCharsets.UTF_8)); } catch (IOException e) { throw new ExtractReaderException(ExtractReaderException.TYPE.IO_EXCEPTION); } try { if (fileSuffixes.txtOrJson.equals("json")) { metadataList = JsonMetadataList.fromJson(reader); if (alterMetadataList.equals(ALTER_METADATA_LIST.FIRST_ONLY) && metadataList.size() > 1) { while (metadataList.size() > 1) { metadataList.remove(metadataList.size() - 1); } } else if (alterMetadataList.equals(ALTER_METADATA_LIST.AS_IS.CONCATENATE_CONTENT_INTO_FIRST) && metadataList.size() > 1) { StringBuilder sb = new StringBuilder(); Metadata containerMetadata = metadataList.get(0); for (int i = 0; i < metadataList.size(); i++) { Metadata m = metadataList.get(i); String c = m.get(RecursiveParserWrapper.TIKA_CONTENT); if (c != null) { sb.append(c); sb.append(" "); } } containerMetadata.set(RecursiveParserWrapper.TIKA_CONTENT, sb.toString()); while (metadataList.size() > 1) { metadataList.remove(metadataList.size() - 1); } } } else { metadataList = generateListFromTextFile(reader, fileSuffixes); } } catch (IOException e) { throw new ExtractReaderException(ExtractReaderException.TYPE.IO_EXCEPTION); } catch (TikaException e) { throw new ExtractReaderException(ExtractReaderException.TYPE.EXTRACT_PARSE_EXCEPTION); } finally { IOUtils.closeQuietly(reader); IOUtils.closeQuietly(is); } return metadataList; }
From source file:com.epam.catgenome.controller.util.MultipartFileSender.java
public void serveResource() throws IOException { if (response == null || request == null) { return;//w ww .j av a 2s . c om } if (!Files.exists(filepath)) { logger.error("File doesn't exist at URI : {}", filepath.toAbsolutePath().toString()); response.sendError(HttpServletResponse.SC_NOT_FOUND); return; } Long length = Files.size(filepath); String fileName = filepath.getFileName().toString(); FileTime lastModifiedObj = Files.getLastModifiedTime(filepath); if (StringUtils.isEmpty(fileName) || lastModifiedObj == null) { response.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); return; } long lastModified = LocalDateTime .ofInstant(lastModifiedObj.toInstant(), ZoneId.of(ZoneOffset.systemDefault().getId())) .toEpochSecond(ZoneOffset.UTC); //String contentType = MimeTypeUtils.probeContentType(filepath); String contentType = null; // Validate request headers for caching --------------------------------------------------- if (!validateHeadersCaching(fileName, lastModified)) { return; } // Validate request headers for resume ---------------------------------------------------- if (!validateHeadersResume(fileName, lastModified)) { return; } // Validate and process range ------------------------------------------------------------- Range full = new Range(0, length - 1, length); List<Range> ranges = processRange(length, fileName, full); if (ranges == null) { return; } // Prepare and initialize response -------------------------------------------------------- // Get content type by file name and set content disposition. String disposition = "inline"; // If content type is unknown, then set the default value. // For all content types, see: http://www.w3schools.com/media/media_mimeref.asp // To add new content types, add new mime-mapping entry in web.xml. if (contentType == null) { contentType = "application/octet-stream"; } else if (!contentType.startsWith("image")) { // Else, expect for images, determine content disposition. If content type is supported by // the browser, then set to inline, else attachment which will pop a 'save as' dialogue. String accept = request.getHeader("Accept"); disposition = accept != null && HttpUtils.accepts(accept, contentType) ? "inline" : "attachment"; } logger.debug("Content-Type : {}", contentType); // Initialize response. response.reset(); response.setBufferSize(DEFAULT_BUFFER_SIZE); response.setHeader("Content-Type", contentType); response.setHeader("Content-Disposition", disposition + ";filename=\"" + fileName + "\""); logger.debug("Content-Disposition : {}", disposition); response.setHeader("Accept-Ranges", "bytes"); response.setHeader("ETag", fileName); response.setDateHeader("Last-Modified", lastModified); response.setDateHeader("Expires", System.currentTimeMillis() + DEFAULT_EXPIRE_TIME); // Send requested file (part(s)) to client ------------------------------------------------ // Prepare streams. try (InputStream input = new BufferedInputStream(Files.newInputStream(filepath)); OutputStream output = response.getOutputStream()) { if (ranges.isEmpty() || ranges.get(0) == full) { // Return full file. logger.info("Return full file"); response.setContentType(contentType); response.setHeader(CONTENT_RANGE_HEADER, "bytes " + full.start + "-" + full.end + "/" + full.total); response.setHeader("Content-Length", String.valueOf(full.length)); Range.copy(input, output, length, full.start, full.length); } else if (ranges.size() == 1) { // Return single part of file. Range r = ranges.get(0); logger.info("Return 1 part of file : from ({}) to ({})", r.start, r.end); response.setContentType(contentType); response.setHeader(CONTENT_RANGE_HEADER, "bytes " + r.start + "-" + r.end + "/" + r.total); response.setHeader("Content-Length", String.valueOf(r.length)); response.setStatus(HttpServletResponse.SC_PARTIAL_CONTENT); // 206. // Copy single part range. Range.copy(input, output, length, r.start, r.length); } else { // Return multiple parts of file. response.setContentType("multipart/byteranges; boundary=" + MULTIPART_BOUNDARY); response.setStatus(HttpServletResponse.SC_PARTIAL_CONTENT); // 206. // Cast back to ServletOutputStream to get the easy println methods. ServletOutputStream sos = (ServletOutputStream) output; // Copy multi part range. for (Range r : ranges) { logger.info("Return multi part of file : from ({}) to ({})", r.start, r.end); // Add multipart boundary and header fields for every range. sos.println(); sos.println("--" + MULTIPART_BOUNDARY); sos.println("Content-Type: " + contentType); sos.println("Content-Range: bytes " + r.start + "-" + r.end + "/" + r.total); // Copy single part range of multi part range. Range.copy(input, output, length, r.start, r.length); } // End with multipart boundary. sos.println(); sos.println("--" + MULTIPART_BOUNDARY + "--"); } } }
From source file:org.dcm4chee.storage.tar.TarContainerProvider.java
@Override public void writeEntriesTo(StorageContext context, List<ContainerEntry> entries, OutputStream out) throws IOException { TarArchiveOutputStream tar = new TarArchiveOutputStream(out); String checksumEntry = container.getChecksumEntry(); if (checksumEntry != null) { ByteArrayOutputStream bout = new ByteArrayOutputStream(); ContainerEntry.writeChecksumsTo(entries, bout); TarArchiveEntry tarEntry = new TarArchiveEntry(checksumEntry); tarEntry.setSize(bout.size());//from www.j av a 2 s . c om tar.putArchiveEntry(tarEntry); tar.write(bout.toByteArray()); tar.closeArchiveEntry(); } for (ContainerEntry entry : entries) { Path path = entry.getSourcePath(); TarArchiveEntry tarEntry = new TarArchiveEntry(entry.getName()); tarEntry.setModTime(Files.getLastModifiedTime(path).toMillis()); tarEntry.setSize(Files.size(path)); tar.putArchiveEntry(tarEntry); Files.copy(path, tar); tar.closeArchiveEntry(); } tar.finish(); }
From source file:com.spectralogic.ds3client.helpers.JobImpl_Test.java
@Before public void beforeRunningTestMethod() { try {/* w w w . j a v a 2 s .com*/ for (final String book : FILE_NAMES) { final Path objPath = ResourceUtils.loadFileResource(DIR_NAME + book); bookSize = Files.size(objPath); final Ds3Object obj = new Ds3Object(book, bookSize); bookTitles.add(book); objects.add(obj); } } catch (final Throwable t) { fail("Error running beforeRunningTestMethod: " + t.getMessage()); } }
From source file:fr.duminy.jbackup.core.archive.FileCollector.java
private long collect(final List<SourceWithPath> collectedFiles, final Path source, final IOFileFilter directoryFilter, final IOFileFilter fileFilter, final Cancellable cancellable) throws IOException { final long[] totalSize = { 0L }; SimpleFileVisitor<Path> visitor = new SimpleFileVisitor<Path>() { @Override//w w w . j a va2 s . c o m public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException { super.preVisitDirectory(dir, attrs); if ((directoryFilter == null) || source.equals(dir) || directoryFilter.accept(dir.toFile())) { return CONTINUE; } else { return SKIP_SUBTREE; } } @Override public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { if ((cancellable != null) && cancellable.isCancelled()) { return TERMINATE; } super.visitFile(file, attrs); if (!Files.isSymbolicLink(file)) { if ((fileFilter == null) || fileFilter.accept(file.toFile())) { LOG.trace("visitFile {}", file.toAbsolutePath()); collectedFiles.add(new SourceWithPath(source, file)); totalSize[0] += Files.size(file); } } return CONTINUE; } }; Files.walkFileTree(source, visitor); return totalSize[0]; }
From source file:typicalnerd.musicarchive.client.network.FileUploader.java
/** * Uploads the file to the web service.// ww w . j ava2 s. c o m * * @param file * The file's location in the file system. * * @return * Returns the {@link FileUploadResult} in case of a successful upload. * * @throws IOException */ private FileUploadResult uploadFile(Path file) throws IOException { URL url = new URL(baseUrl + "/files"); HttpURLConnection connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("POST"); connection.setRequestProperty("Content-Type", "application/octet-stream"); connection.setRequestProperty("Content-Length", String.valueOf(Files.size(file))); connection.setRequestProperty("Accept", "application/json"); connection.setDoOutput(true); connection.setDoInput(true); try (InputStream in = new BufferedInputStream(new FileInputStream(file.toFile()))) { try (OutputStream out = connection.getOutputStream()) { // Lazy as we are, we use one of the many nice functions in the Apache // Commons library. IOUtils.copy(in, out); } } // Now it's time to read the first result. If all goes well, this was a // HTTP 201 - CREATED. If the file is already known, e.g. because hash and // name match an existing object, then HTTP 200 is returned and nothing was // changed on the server. In that case we query if there is meta data and if // so skip the upload in with the assumption that the latest version is already // on the server. If not, we continue as planned. connection.connect(); int result = connection.getResponseCode(); if (200 != result || 201 != result) { try (InputStream in = connection.getInputStream()) { ErrorResponse e = mapper.readValue(in, ErrorResponse.class); throw new UnknownServiceException("Upload of file failed. " + e.getMessage()); } } // Parse the response to get the location of where to put the meta data. // We expect a JSON response so let Jackson parse it into an expected response // object. FileUploadResult uploadResult = new FileUploadResult(result); try (InputStream in = connection.getInputStream()) { ObjectReader reader = mapper.readerForUpdating(uploadResult); reader.readValue(in); } return uploadResult; }
From source file:org.niord.proxy.rest.RepositoryRestService.java
/** * Streams the file specified by the path * @param path the path/* w w w . j a v a2 s . co m*/ * @param request the servlet request * @return the response */ @GET @javax.ws.rs.Path("/file/{file:.+}") public Response streamFile(@PathParam("file") String path, @Context Request request) throws IOException { Path f = getRepoFile(path); if (f == null || Files.isDirectory(f)) { log.log(Level.WARNING, "Failed streaming file: " + f); return Response.status(HttpServletResponse.SC_NOT_FOUND).entity("File not found: " + path).build(); } // When a locally maintained repository is used, fetch the file from Niord if (settings.getRepoType() == Settings.RepoType.LOCAL && Files.notExists(f)) { f = fetchNiordFile(settings.getServer() + "/rest/repo/file/" + WebUtils.encodeURIComponent(path), f); } // Check if the file exits if (f == null || Files.notExists(f)) { log.log(Level.WARNING, "Failed streaming file: " + f); return Response.status(HttpServletResponse.SC_NOT_FOUND).entity("File not found: " + path).build(); } // Set expiry to cacheTimeout minutes Date expirationDate = new Date(System.currentTimeMillis() + 1000L * 60L * cacheTimeout); String mt = fileTypes.getContentType(f); // Check for an ETag match EntityTag etag = new EntityTag("" + Files.getLastModifiedTime(f).toMillis() + "_" + Files.size(f), true); Response.ResponseBuilder responseBuilder = request.evaluatePreconditions(etag); if (responseBuilder != null) { // Etag match log.log(Level.FINE, "File unchanged. Return code 304"); return responseBuilder.expires(expirationDate).build(); } log.log(Level.FINE, "Streaming file: " + f); return Response.ok(f.toFile(), mt).expires(expirationDate).tag(etag).build(); }
From source file:org.zanata.sync.jobs.cache.RepoCacheImpl.java
private long copyDir(Path source, Path target) throws IOException { Files.createDirectories(target); AtomicLong totalSize = new AtomicLong(0); Files.walkFileTree(source, EnumSet.of(FileVisitOption.FOLLOW_LINKS), Integer.MAX_VALUE, new SimpleFileVisitor<Path>() { @Override/*w w w .j a va 2s . co m*/ public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException { Path targetdir = target.resolve(source.relativize(dir)); try { if (Files.isDirectory(targetdir) && Files.exists(targetdir)) { return CONTINUE; } Files.copy(dir, targetdir, StandardCopyOption.REPLACE_EXISTING, StandardCopyOption.COPY_ATTRIBUTES); } catch (FileAlreadyExistsException e) { if (!Files.isDirectory(targetdir)) { throw e; } } return CONTINUE; } @Override public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { if (Files.isRegularFile(file)) { totalSize.accumulateAndGet(Files.size(file), (l, r) -> l + r); } Path targetFile = target.resolve(source.relativize(file)); // only copy to target if it doesn't exist or it exist but the content is different if (!Files.exists(targetFile) || !com.google.common.io.Files.equal(file.toFile(), targetFile.toFile())) { Files.copy(file, targetFile, StandardCopyOption.REPLACE_EXISTING, StandardCopyOption.COPY_ATTRIBUTES); } return CONTINUE; } }); return totalSize.get(); }