List of usage examples for java.nio.channels FileChannel size
public abstract long size() throws IOException;
From source file:com.ettrema.zsync.UploadReader.java
/** * Copies a Range of blocks from rc into a new offset of wc * // www .j a v a 2s .c om * @param rc A FileChannel for the input File * @param reloc The RelocateRange specifying the Range to be copied and its new offset * @param blockSize The block size used by reloc * @param wc The FileChannel for the output File * @throws IOException */ private static void moveRange(FileChannel rc, RelocateRange reloc, int blockSize, FileChannel wc) throws IOException { long MAX_BUFFER = 16384; long startBlock = reloc.getBlockRange().getStart(); long finishBlock = reloc.getBlockRange().getFinish(); long bytesLeft = (finishBlock - startBlock) * blockSize; //bytes left to copy long readAtOnce = 0; //number of bytes to attempt to read long bytesRead = 0; //number of bytes actually read long currOffset = reloc.getOffset(); //current write position if (finishBlock * blockSize > rc.size() || startBlock < 0) { throw new RuntimeException("Invalid RelocateRange: Source file does not contain blocks " + reloc.getBlockRange().getRange()); } rc.position(startBlock * blockSize); while (bytesLeft > 0) { readAtOnce = Math.min(bytesLeft, MAX_BUFFER); /*Because transferFrom does not update the write channel's position, * it needs to be set manually */ bytesRead = wc.transferFrom(rc, currOffset, readAtOnce); bytesLeft -= bytesRead; currOffset += bytesRead; } }
From source file:Main.java
public static long getCommentLength(final FileChannel fileChannel) throws IOException { // End of central directory record (EOCD) // Offset Bytes Description[23] // 0 4 End of central directory signature = 0x06054b50 // 4 2 Number of this disk // 6 2 Disk where central directory starts // 8 2 Number of central directory records on this disk // 10 2 Total number of central directory records // 12 4 Size of central directory (bytes) // 16 4 Offset of start of central directory, relative to start of archive // 20 2 Comment length (n) // 22 n Comment // For a zip with no archive comment, the // end-of-central-directory record will be 22 bytes long, so // we expect to find the EOCD marker 22 bytes from the end. final long archiveSize = fileChannel.size(); if (archiveSize < ZIP_EOCD_REC_MIN_SIZE) { throw new IOException("APK too small for ZIP End of Central Directory (EOCD) record"); }/*from w w w . java2 s.c o m*/ // ZIP End of Central Directory (EOCD) record is located at the very end of the ZIP archive. // The record can be identified by its 4-byte signature/magic which is located at the very // beginning of the record. A complication is that the record is variable-length because of // the comment field. // The algorithm for locating the ZIP EOCD record is as follows. We search backwards from // end of the buffer for the EOCD record signature. Whenever we find a signature, we check // the candidate record's comment length is such that the remainder of the record takes up // exactly the remaining bytes in the buffer. The search is bounded because the maximum // size of the comment field is 65535 bytes because the field is an unsigned 16-bit number. final long maxCommentLength = Math.min(archiveSize - ZIP_EOCD_REC_MIN_SIZE, UINT16_MAX_VALUE); final long eocdWithEmptyCommentStartPosition = archiveSize - ZIP_EOCD_REC_MIN_SIZE; for (int expectedCommentLength = 0; expectedCommentLength <= maxCommentLength; expectedCommentLength++) { final long eocdStartPos = eocdWithEmptyCommentStartPosition - expectedCommentLength; final ByteBuffer byteBuffer = ByteBuffer.allocate(4); fileChannel.position(eocdStartPos); fileChannel.read(byteBuffer); byteBuffer.order(ByteOrder.LITTLE_ENDIAN); if (byteBuffer.getInt(0) == ZIP_EOCD_REC_SIG) { final ByteBuffer commentLengthByteBuffer = ByteBuffer.allocate(2); fileChannel.position(eocdStartPos + ZIP_EOCD_COMMENT_LENGTH_FIELD_OFFSET); fileChannel.read(commentLengthByteBuffer); commentLengthByteBuffer.order(ByteOrder.LITTLE_ENDIAN); final int actualCommentLength = commentLengthByteBuffer.getShort(0); if (actualCommentLength == expectedCommentLength) { return actualCommentLength; } } } throw new IOException("ZIP End of Central Directory (EOCD) record not found"); }
From source file:com.stimulus.archiva.store.MessageStore.java
public static void copy(File source, File dest) throws IOException { FileChannel in = null, out = null; try {/*from w w w. ja v a2 s . c om*/ in = new FileInputStream(source).getChannel(); out = new FileOutputStream(dest).getChannel(); long size = in.size(); MappedByteBuffer buf = in.map(FileChannel.MapMode.READ_ONLY, 0, size); out.write(buf); } finally { if (in != null) in.close(); if (out != null) out.close(); } }
From source file:edu.stanford.epad.common.util.EPADFileUtils.java
public static File copyFile(File src, File dst) { FileChannel inChannel = null; FileChannel outChannel = null; try {//from w w w .ja v a 2 s . c om inChannel = new FileInputStream(src).getChannel(); outChannel = new FileOutputStream(dst).getChannel(); inChannel.transferTo(0, inChannel.size(), outChannel); return dst; } catch (Exception e) { log.warning("Error copying file, from " + src.getAbsolutePath() + " to " + dst.getAbsolutePath(), e); } finally { try { if (inChannel != null) inChannel.close(); if (outChannel != null) outChannel.close(); } catch (IOException e) { } } return null; }
From source file:org.geoserver.rest.util.IOUtils.java
/** * Optimize version of copy method for file channels. * /*from w w w . j a v a 2 s . c om*/ * @param bufferSize size of the temp buffer to use for this copy. * @param source the source {@link ReadableByteChannel}. * @param destination the destination {@link WritableByteChannel};. * @throws IOException in case something bad happens. */ public static void copyFileChannel(int bufferSize, FileChannel source, FileChannel destination) throws IOException { inputNotNull(source, destination); if (!source.isOpen() || !destination.isOpen()) throw new IllegalStateException("Source and destination channels must be open."); FileLock lock = null; try { lock = destination.lock(); final long sourceSize = source.size(); long pos = 0; while (pos < sourceSize) { // read and flip final long remaining = (sourceSize - pos); final int mappedZoneSize = remaining >= bufferSize ? bufferSize : (int) remaining; destination.transferFrom(source, pos, mappedZoneSize); // update zone pos += mappedZoneSize; } } finally { if (lock != null) { try { lock.release(); } catch (Throwable t) { if (LOGGER.isLoggable(Level.INFO)) LOGGER.log(Level.INFO, t.getLocalizedMessage(), t); } } } }
From source file:me.carpela.network.pt.cracker.tools.ttorrent.Torrent.java
private static String hashFiles(List<File> files, int pieceLenght) throws InterruptedException, IOException, NoSuchAlgorithmException { int threads = getHashingThreadsCount(); ExecutorService executor = Executors.newFixedThreadPool(threads); ByteBuffer buffer = ByteBuffer.allocate(pieceLenght); List<Future<String>> results = new LinkedList<Future<String>>(); StringBuilder hashes = new StringBuilder(); long length = 0L; int pieces = 0; long start = System.nanoTime(); for (File file : files) { length += file.length();/*from w w w . j ava2s. c o m*/ FileInputStream fis = new FileInputStream(file); FileChannel channel = fis.getChannel(); int step = 10; try { while (channel.read(buffer) > 0) { if (buffer.remaining() == 0) { buffer.clear(); results.add(executor.submit(new CallableChunkHasher(buffer))); } if (results.size() >= threads) { pieces += accumulateHashes(hashes, results); } if (channel.position() / (double) channel.size() * 100f > step) { step += 10; } } } finally { channel.close(); fis.close(); } } // Hash the last bit, if any if (buffer.position() > 0) { buffer.limit(buffer.position()); buffer.position(0); results.add(executor.submit(new CallableChunkHasher(buffer))); } pieces += accumulateHashes(hashes, results); // Request orderly executor shutdown and wait for hashing tasks to // complete. executor.shutdown(); while (!executor.isTerminated()) { Thread.sleep(10); } long elapsed = System.nanoTime() - start; int expectedPieces = (int) (Math.ceil((double) length / pieceLenght)); return hashes.toString(); }
From source file:RegexProperties.java
public void load(FileInputStream inStream) throws IOException, PatternSyntaxException { FileChannel fc = inStream.getChannel(); ByteBuffer bb = ByteBuffer.allocate((int) fc.size()); fc.read(bb);/*from w ww . j a v a 2 s . c o m*/ bb.flip(); String fileContent = new String(bb.array()); Pattern pattern = Pattern.compile("^(.*)$", Pattern.MULTILINE); Matcher matcher = pattern.matcher(fileContent); while (matcher.find()) { String line = matcher.group(1); if (line != null && !"".equals(line.trim()) && !line.startsWith("#") && !line.startsWith("!")) { String keyValue[] = null; if (line.indexOf("=") > 0) keyValue = line.split("=", 2); else keyValue = line.split(":", 2); if (keyValue != null) { super.put(keyValue[0].trim(), keyValue[1]); } } } fc = null; bb = null; }
From source file:it.polito.tellmefirst.parsing.TXTparser.java
public String txtToText(File file) throws TMFVisibleException { LOG.debug("[txtToText] - BEGIN"); String result;/*from w ww .jav a 2s . c om*/ try { FileInputStream stream = new FileInputStream(file); FileChannel fc = stream.getChannel(); MappedByteBuffer bb = fc.map(FileChannel.MapMode.READ_ONLY, 0, fc.size()); stream.close(); result = Charset.defaultCharset().decode(bb).toString(); } catch (Exception e) { LOG.error("[txtToText] - EXCEPTION: ", e); throw new TMFVisibleException( "Problem parsing the file: the TXT document you uploaded seems malformed."); } LOG.debug("[txtToText] - END"); return result; }
From source file:org.lenskit.data.packed.BinaryRatingDAO.java
/** * Open a binary rating DAO.//from ww w . j ava2 s . c o m * * @param file The file to open. * @return A DAO backed by {@code file}. * @throws IOException If there is */ public static BinaryRatingDAO open(File file) throws IOException { try (FileInputStream input = new FileInputStream(file)) { FileChannel channel = input.getChannel(); BinaryHeader header = BinaryHeader.read(channel); logger.info("Loading DAO with {} ratings of {} items from {} users", header.getRatingCount(), header.getItemCount(), header.getUserCount()); // the channel position has been advanced to end of header ByteBuffer data = channel.map(FileChannel.MapMode.READ_ONLY, channel.position(), header.getRatingDataSize()); channel.position(channel.position() + header.getRatingDataSize()); ByteBuffer tableBuffer = channel.map(FileChannel.MapMode.READ_ONLY, channel.position(), channel.size() - channel.position()); BinaryIndexTable utbl = BinaryIndexTable.fromBuffer(header.getUserCount(), tableBuffer); BinaryIndexTable itbl = BinaryIndexTable.fromBuffer(header.getItemCount(), tableBuffer); return new BinaryRatingDAO(file, header, data, utbl, itbl, header.getRatingCount(), Long.MAX_VALUE); } }
From source file:com.buaa.cfs.io.nativeio.NativeIO.java
/** * Unbuffered file copy from src to dst without tainting OS buffer cache * <p>/* w w w .j a va2 s .c o m*/ * In POSIX platform: It uses FileChannel#transferTo() which internally attempts unbuffered IO on OS with native * sendfile64() support and falls back to buffered IO otherwise. * <p> * It minimizes the number of FileChannel#transferTo call by passing the the src file size directly instead of a * smaller size as the 3rd parameter. This saves the number of sendfile64() system call when native sendfile64() is * supported. In the two fall back cases where sendfile is not supported, FileChannle#transferTo already has its own * batching of size 8 MB and 8 KB, respectively. * <p> * In Windows Platform: It uses its own native wrapper of CopyFileEx with COPY_FILE_NO_BUFFERING flag, which is * supported on Windows Server 2008 and above. * <p> * Ideally, we should use FileChannel#transferTo() across both POSIX and Windows platform. Unfortunately, the * wrapper(Java_sun_nio_ch_FileChannelImpl_transferTo0) used by FileChannel#transferTo for unbuffered IO is not * implemented on Windows. Based on OpenJDK 6/7/8 source code, Java_sun_nio_ch_FileChannelImpl_transferTo0 on * Windows simply returns IOS_UNSUPPORTED. * <p> * Note: This simple native wrapper does minimal parameter checking before copy and consistency check (e.g., size) * after copy. It is recommended to use wrapper function like the Storage#nativeCopyFileUnbuffered() function in * hadoop-hdfs with pre/post copy checks. * * @param src The source path * @param dst The destination path * * @throws IOException */ public static void copyFileUnbuffered(File src, File dst) throws IOException { if (nativeLoaded && Shell.WINDOWS) { copyFileUnbuffered0(src.getAbsolutePath(), dst.getAbsolutePath()); } else { FileInputStream fis = null; FileOutputStream fos = null; FileChannel input = null; FileChannel output = null; try { fis = new FileInputStream(src); fos = new FileOutputStream(dst); input = fis.getChannel(); output = fos.getChannel(); long remaining = input.size(); long position = 0; long transferred = 0; while (remaining > 0) { transferred = input.transferTo(position, remaining, output); remaining -= transferred; position += transferred; } } finally { IOUtils.cleanup(LOG, output); IOUtils.cleanup(LOG, fos); IOUtils.cleanup(LOG, input); IOUtils.cleanup(LOG, fis); } } }