List of usage examples for java.nio.channels FileChannel position
public abstract FileChannel position(long newPosition) throws IOException;
From source file:org.apache.nifi.file.FileUtils.java
/** * Randomly generates a sequence of bytes and overwrites the contents of the * file a number of times. The file is then deleted. * * @param file File to be overwritten a number of times and, ultimately, * deleted// w ww .jav a 2s .c o m * @param passes Number of times file should be overwritten * @throws IOException if something makes shredding or deleting a problem */ public static void shredFile(final File file, final int passes) throws IOException { final Random generator = new Random(); final long fileLength = file.length(); final int byteArraySize = (int) Math.min(fileLength, 1048576); // 1MB final byte[] b = new byte[byteArraySize]; final long numOfRandomWrites = (fileLength / b.length) + 1; final FileOutputStream fos = new FileOutputStream(file); try { // Over write file contents (passes) times final FileChannel channel = fos.getChannel(); for (int i = 0; i < passes; i++) { generator.nextBytes(b); for (int j = 0; j <= numOfRandomWrites; j++) { fos.write(b); } fos.flush(); channel.position(0); } // Write out "0" for each byte in the file Arrays.fill(b, (byte) 0); for (int j = 0; j < numOfRandomWrites; j++) { fos.write(b); } fos.flush(); fos.close(); // Try to delete the file a few times if (!FileUtils.deleteFile(file, null, 5)) { throw new IOException("Failed to delete file after shredding"); } } finally { FileUtils.closeQuietly(fos); } }
From source file:com.remobile.file.LocalFilesystem.java
private static void copyResource(CordovaResourceApi.OpenForReadResult input, OutputStream outputStream) throws IOException { try {/*from w w w.jav a 2s .com*/ InputStream inputStream = input.inputStream; if (inputStream instanceof FileInputStream && outputStream instanceof FileOutputStream) { FileChannel inChannel = ((FileInputStream) input.inputStream).getChannel(); FileChannel outChannel = ((FileOutputStream) outputStream).getChannel(); long offset = 0; long length = input.length; if (input.assetFd != null) { offset = input.assetFd.getStartOffset(); } // transferFrom()'s 2nd arg is a relative position. Need to set the absolute // position first. inChannel.position(offset); outChannel.transferFrom(inChannel, 0, length); } else { final int BUFFER_SIZE = 8192; byte[] buffer = new byte[BUFFER_SIZE]; for (;;) { int bytesRead = inputStream.read(buffer, 0, BUFFER_SIZE); if (bytesRead <= 0) { break; } outputStream.write(buffer, 0, bytesRead); } } } finally { input.inputStream.close(); if (outputStream != null) { outputStream.close(); } } }
From source file:Main.java
public static long findCentralDirStartOffset(final FileChannel fileChannel, final long commentLength) throws IOException { // End of central directory record (EOCD) // Offset Bytes Description[23] // 0 4 End of central directory signature = 0x06054b50 // 4 2 Number of this disk // 6 2 Disk where central directory starts // 8 2 Number of central directory records on this disk // 10 2 Total number of central directory records // 12 4 Size of central directory (bytes) // 16 4 Offset of start of central directory, relative to start of archive // 20 2 Comment length (n) // 22 n Comment // For a zip with no archive comment, the // end-of-central-directory record will be 22 bytes long, so // we expect to find the EOCD marker 22 bytes from the end. final ByteBuffer zipCentralDirectoryStart = ByteBuffer.allocate(4); zipCentralDirectoryStart.order(ByteOrder.LITTLE_ENDIAN); fileChannel.position(fileChannel.size() - commentLength - 6); // 6 = 2 (Comment length) + 4 (Offset of start of central directory, relative to start of archive) fileChannel.read(zipCentralDirectoryStart); final long centralDirStartOffset = zipCentralDirectoryStart.getInt(0); return centralDirStartOffset; }
From source file:com.ettrema.zsync.UploadReader.java
/** * Copies a Range of blocks from rc into a new offset of wc * // w ww . j av a2s. c o m * @param rc A FileChannel for the input File * @param reloc The RelocateRange specifying the Range to be copied and its new offset * @param blockSize The block size used by reloc * @param wc The FileChannel for the output File * @throws IOException */ private static void moveRange(FileChannel rc, RelocateRange reloc, int blockSize, FileChannel wc) throws IOException { long MAX_BUFFER = 16384; long startBlock = reloc.getBlockRange().getStart(); long finishBlock = reloc.getBlockRange().getFinish(); long bytesLeft = (finishBlock - startBlock) * blockSize; //bytes left to copy long readAtOnce = 0; //number of bytes to attempt to read long bytesRead = 0; //number of bytes actually read long currOffset = reloc.getOffset(); //current write position if (finishBlock * blockSize > rc.size() || startBlock < 0) { throw new RuntimeException("Invalid RelocateRange: Source file does not contain blocks " + reloc.getBlockRange().getRange()); } rc.position(startBlock * blockSize); while (bytesLeft > 0) { readAtOnce = Math.min(bytesLeft, MAX_BUFFER); /*Because transferFrom does not update the write channel's position, * it needs to be set manually */ bytesRead = wc.transferFrom(rc, currOffset, readAtOnce); bytesLeft -= bytesRead; currOffset += bytesRead; } }
From source file:org.apache.tajo.storage.text.DelimitedLineReader.java
public void init() throws IOException { if (is != null) { throw new IOException(this.getClass() + " was already initialized."); }// ww w .ja v a 2 s .co m if (fs == null) { fs = FileScanner.getFileSystem((TajoConf) conf, fragment.getPath()); } pos = startOffset = fragment.getStartKey(); end = fragment.getEndKey(); if (codec != null) { fis = fs.open(fragment.getPath()); decompressor = CodecPool.getDecompressor(codec); is = new DataInputStream(codec.createInputStream(fis, decompressor)); ByteBuf buf = BufferPool.directBuffer(bufferSize); lineReader = new ByteBufLineReader(new ByteBufInputChannel(is), buf); } else { if (fs instanceof LocalFileSystem) { File file; try { if (fragment.getPath().toUri().getScheme() != null) { file = new File(fragment.getPath().toUri()); } else { file = new File(fragment.getPath().toString()); } } catch (IllegalArgumentException iae) { throw new IOException(iae); } FileInputStream inputStream = new FileInputStream(file); FileChannel channel = inputStream.getChannel(); channel.position(startOffset); is = inputStream; lineReader = new ByteBufLineReader(new LocalFileInputChannel(inputStream), BufferPool.directBuffer((int) Math.min(bufferSize, fragment.getLength()))); } else { fis = fs.open(fragment.getPath()); fis.seek(startOffset); is = fis; lineReader = new ByteBufLineReader(new FSDataInputChannel(fis), BufferPool.directBuffer((int) Math.min(bufferSize, fragment.getLength()))); } } eof = false; }
From source file:org.apache.camel.component.file.FileOperations.java
/** * Creates and prepares the output file channel. Will position itself in correct position if the file is writable * eg. it should append or override any existing content. *//* w w w . j av a 2s. c o m*/ private FileChannel prepareOutputFileChannel(File target, FileChannel out) throws IOException { if (endpoint.getFileExist() == GenericFileExist.Append) { out = new RandomAccessFile(target, "rw").getChannel(); out = out.position(out.size()); } else { // will override out = new FileOutputStream(target).getChannel(); } return out; }
From source file:org.eclipse.orion.internal.server.servlets.xfer.ClientImport.java
/** * A put is used to send a chunk of a file. *//*from w ww . jav a 2s. c o m*/ void doPut(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException { int transferred = getTransferred(); int length = getLength(); int headerLength = Integer.valueOf(req.getHeader(ProtocolConstants.HEADER_CONTENT_LENGTH)); String rangeString = req.getHeader(ProtocolConstants.HEADER_CONTENT_RANGE); if (rangeString == null) rangeString = "bytes 0-" + (length - 1) + '/' + length; //$NON-NLS-1$ ContentRange range = ContentRange.parse(rangeString); if (length != range.getLength()) { fail(req, resp, "Chunk specifies an incorrect document length"); return; } if (range.getStartByte() > transferred) { fail(req, resp, "Chunk missing; Expected start byte: " + transferred); return; } if (range.getEndByte() < range.getStartByte()) { fail(req, resp, "Invalid range: " + rangeString); return; } int chunkSize = 1 + range.getEndByte() - range.getStartByte(); if (chunkSize != headerLength) { fail(req, resp, "Content-Range doesn't agree with Content-Length"); return; } byte[] chunk = readChunk(req, chunkSize); FileOutputStream fout = null; try { fout = new FileOutputStream(new File(getStorageDirectory(), FILE_DATA), true); FileChannel channel = fout.getChannel(); channel.position(range.getStartByte()); channel.write(ByteBuffer.wrap(chunk)); channel.close(); } finally { try { if (fout != null) fout.close(); } catch (IOException e) { //ignore secondary failure } } transferred = range.getEndByte() + 1; setTransferred(transferred); save(); if (transferred >= length) { completeTransfer(req, resp); return; } resp.setStatus(308);//Resume Incomplete resp.setHeader("Range", "bytes 0-" + range.getEndByte()); //$NON-NLS-2$ setResponseLocationHeader(req, resp); }
From source file:com.lukakama.serviio.watchservice.watcher.WatcherRunnable.java
private boolean isPathFullyAccessible(Path path) { if (!Files.exists(path)) { return false; }// w w w . ja va 2 s .c o m if (Files.isDirectory(path)) { DirectoryStream<Path> directoryStream = null; try { directoryStream = Files.newDirectoryStream(path); return true; } catch (IOException e) { log.debug("Unaccessible directory: {}", path, e); return false; } finally { IOUtils.closeQuietly(directoryStream); } } else { FileChannel fileChannel = null; try { fileChannel = FileChannel.open(path, StandardOpenOption.READ); fileChannel.position(Files.size(path)); return true; } catch (IOException e) { log.debug("Unaccessible file: {}", path, e); return false; } finally { IOUtils.closeQuietly(fileChannel); } } }
From source file:org.dcache.xrootd.standalone.DataServerHandler.java
/** * Retrieves the file descriptor obtained upon open and invokes * its write operation. The file descriptor will propagate necessary * function calls to the mover.// w ww. j a v a 2 s . com * * @param ctx received from the netty pipeline * @param msg the actual request */ @Override protected OkResponse<WriteRequest> doOnWrite(ChannelHandlerContext ctx, WriteRequest msg) throws XrootdException { try { FileChannel channel = getOpenFile(msg.getFileHandle()).getChannel(); channel.position(msg.getWriteOffset()); msg.getData(channel); return withOk(msg); } catch (IOException e) { throw new XrootdException(kXR_IOError, e.getMessage()); } }
From source file:org.apache.hadoop.hdfs.server.datanode.BlockSender.java
/** * Sends upto maxChunks chunks of data.//from w w w . j a v a 2 s . co m * * When blockInPosition is >= 0, assumes 'out' is a * {@link SocketOutputStream} and tries * {@link SocketOutputStream#transferToFully(FileChannel, long, int)} to * send data (and updates blockInPosition). */ private int sendChunks(ByteBuffer pkt, int maxChunks, OutputStream out) throws IOException { // Sends multiple chunks in one packet with a single write(). int len = Math.min((int) (endOffset - offset), bytesPerChecksum * maxChunks); // truncate len so that any partial chunks will be sent as a final packet. // this is not necessary for correctness, but partial chunks are // ones that may be recomputed and sent via buffer copy, so try to minimize // those bytes if (len > bytesPerChecksum && len % bytesPerChecksum != 0) { len -= len % bytesPerChecksum; } if (len == 0) { return 0; } int numChunks = (len + bytesPerChecksum - 1) / bytesPerChecksum; int packetLen = len + numChunks * checksumSize + 4; pkt.clear(); // write packet header pkt.putInt(packetLen); pkt.putLong(offset); pkt.putLong(seqno); pkt.put((byte) ((offset + len >= endOffset) ? 1 : 0)); //why no ByteBuf.putBoolean()? pkt.putInt(len); int checksumOff = pkt.position(); int checksumLen = numChunks * checksumSize; byte[] buf = pkt.array(); if (checksumSize > 0 && checksumIn != null) { try { checksumIn.readFully(buf, checksumOff, checksumLen); } catch (IOException e) { LOG.warn(" Could not read or failed to veirfy checksum for data" + " at offset " + offset + " for block " + block + " got : " + StringUtils.stringifyException(e)); IOUtils.closeStream(checksumIn); checksumIn = null; if (corruptChecksumOk) { if (checksumOff < checksumLen) { // Just fill the array with zeros. Arrays.fill(buf, checksumOff, checksumLen, (byte) 0); } } else { throw e; } } } int dataOff = checksumOff + checksumLen; if (blockInPosition < 0) { //normal transfer IOUtils.readFully(blockIn, buf, dataOff, len); if (verifyChecksum) { int dOff = dataOff; int cOff = checksumOff; int dLeft = len; for (int i = 0; i < numChunks; i++) { checksum.reset(); int dLen = Math.min(dLeft, bytesPerChecksum); checksum.update(buf, dOff, dLen); if (!checksum.compare(buf, cOff)) { throw new ChecksumException("Checksum failed at " + (offset + len - dLeft), len); } dLeft -= dLen; dOff += dLen; cOff += checksumSize; } } // only recompute checksum if we can't trust the meta data due to // concurrent writes if (memoizedBlock.hasBlockChanged(len)) { ChecksumUtil.updateChunkChecksum(buf, checksumOff, dataOff, len, checksum); } try { out.write(buf, 0, dataOff + len); } catch (IOException e) { throw ioeToSocketException(e); } } else { try { //use transferTo(). Checks on out and blockIn are already done. SocketOutputStream sockOut = (SocketOutputStream) out; FileChannel fileChannel = ((FileInputStream) blockIn).getChannel(); if (memoizedBlock.hasBlockChanged(len)) { fileChannel.position(blockInPosition); IOUtils.readFileChannelFully(fileChannel, buf, dataOff, len); ChecksumUtil.updateChunkChecksum(buf, checksumOff, dataOff, len, checksum); sockOut.write(buf, 0, dataOff + len); } else { //first write the packet sockOut.write(buf, 0, dataOff); // no need to flush. since we know out is not a buffered stream. sockOut.transferToFully(fileChannel, blockInPosition, len); } blockInPosition += len; } catch (IOException e) { /* exception while writing to the client (well, with transferTo(), * it could also be while reading from the local file). */ throw ioeToSocketException(e); } } if (throttler != null) { // rebalancing so throttle throttler.throttle(packetLen); } return len; }