List of usage examples for java.nio.channels FileChannel position
public abstract long position() throws IOException;
From source file:org.apache.hadoop.hdfs.server.datanode.BlockSender.java
/** * sendBlock() is used to read block and its metadata and stream the data to * either a client or to another datanode. * /*w w w .j a v a 2s . co m*/ * @param out stream to which the block is written to * @param baseStream optional. if non-null, <code>out</code> is assumed to * be a wrapper over this stream. This enables optimizations for * sending the data, e.g. * {@link SocketOutputStream#transferToFully(FileChannel, * long, int)}. * @param throttler for sending data. * @return total bytes read, including checksum data. */ long sendBlock(DataOutputStream out, OutputStream baseStream, DataTransferThrottler throttler) throws IOException { if (out == null) { throw new IOException("out stream is null"); } final long initialOffset = offset; long totalRead = 0; OutputStream streamForSendChunks = out; final long startTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0; try { writeChecksumHeader(out); int maxChunksPerPacket; int pktSize = PacketHeader.PKT_HEADER_LEN; boolean transferTo = transferToAllowed && !verifyChecksum && baseStream instanceof SocketOutputStream && blockIn instanceof FileInputStream; if (transferTo) { FileChannel fileChannel = ((FileInputStream) blockIn).getChannel(); blockInPosition = fileChannel.position(); streamForSendChunks = baseStream; maxChunksPerPacket = numberOfChunks(TRANSFERTO_BUFFER_SIZE); // Smaller packet size to only hold checksum when doing transferTo pktSize += checksumSize * maxChunksPerPacket; } else { maxChunksPerPacket = Math.max(1, numberOfChunks(HdfsConstants.IO_FILE_BUFFER_SIZE)); // Packet size includes both checksum and data pktSize += (chunkSize + checksumSize) * maxChunksPerPacket; } ByteBuffer pktBuf = ByteBuffer.allocate(pktSize); while (endOffset > offset) { long len = sendPacket(pktBuf, maxChunksPerPacket, streamForSendChunks, transferTo, throttler); offset += len; totalRead += len + (numberOfChunks(len) * checksumSize); seqno++; } try { // send an empty packet to mark the end of the block sendPacket(pktBuf, maxChunksPerPacket, streamForSendChunks, transferTo, throttler); out.flush(); } catch (IOException e) { //socket error throw ioeToSocketException(e); } sentEntireByteRange = true; } finally { if (clientTraceFmt != null) { final long endTime = System.nanoTime(); ClientTraceLog.info(String.format(clientTraceFmt, totalRead, initialOffset, endTime - startTime)); } close(); } return totalRead; }
From source file:org.apache.hadoop.hdfs.server.datanode.CachingBlockSender.java
/** * sendBlock() is used to read block and its metadata and stream the data to * either a client or to another datanode. * /*ww w. j a v a2 s . co m*/ * @param out * stream to which the block is written to * @param baseStream * optional. if non-null, <code>out</code> is assumed to * be a wrapper over this stream. This enables optimizations for * sending the data, e.g. {@link SocketOutputStream#transferToFully(FileChannel, long, int)}. * @param throttler * for sending data. * @return total bytes read, including checksum data. */ long sendBlock(final DataOutputStream out, final OutputStream baseStream, final DataTransferThrottler throttler) throws IOException { if (out == null) { throw new IOException("out stream is null"); } this.initialOffset = this.offset; long totalRead = 0; OutputStream streamForSendChunks = out; this.lastCacheDropOffset = this.initialOffset; if (isLongRead() && this.blockInFd != null) { // Advise that this file descriptor will be accessed sequentially. NativeIO.posixFadviseIfPossible(this.blockInFd, 0, 0, NativeIO.POSIX_FADV_SEQUENTIAL); } // Trigger readahead of beginning of file if configured. manageOsCache(); // TODO: Take a closer look at this final long startTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0; try { int maxChunksPerPacket; int pktSize = PacketHeader.PKT_HEADER_LEN; final boolean transferTo = this.transferToAllowed && !this.verifyChecksum && baseStream instanceof SocketOutputStream && this.blockIn instanceof FileInputStream; if (transferTo) { final FileChannel fileChannel = ((FileInputStream) this.blockIn).getChannel(); this.blockInPosition = fileChannel.position(); streamForSendChunks = baseStream; maxChunksPerPacket = numberOfChunks(TRANSFERTO_BUFFER_SIZE); // Smaller packet size to only hold checksum when doing transferTo pktSize += this.checksumSize * maxChunksPerPacket; } else { maxChunksPerPacket = Math.max(1, numberOfChunks(HdfsConstants.IO_FILE_BUFFER_SIZE)); // Packet size includes both checksum and data pktSize += (this.chunkSize + this.checksumSize) * maxChunksPerPacket; } ByteBuffer pktBuf = ByteBuffer.allocate(pktSize); while (this.endOffset > this.offset) { manageOsCache(); long len = sendPacket(pktBuf, maxChunksPerPacket, streamForSendChunks, transferTo, throttler); this.offset += len; totalRead += len + (numberOfChunks(len) * this.checksumSize); this.seqno++; } try { // send an empty packet to mark the end of the block sendPacket(pktBuf, maxChunksPerPacket, streamForSendChunks, transferTo, throttler); out.flush(); } catch (IOException e) { // socket error throw ioeToSocketException(e); } this.sentEntireByteRange = true; } finally { if (this.clientTraceFmt != null) { final long endTime = System.nanoTime(); ClientTraceLog.info( String.format(this.clientTraceFmt, totalRead, this.initialOffset, endTime - startTime)); } close(); } return totalRead; }
From source file:org.red5.io.flv.FLVReader.java
/** * Creates FLV reader from file channel. * * @param channel//www .j av a2 s .co m * @throws IOException on error */ public FLVReader(FileChannel channel) throws IOException { if (null == channel) { log.warn("Reader was passed a null channel"); log.debug("{}", org.apache.commons.lang.builder.ToStringBuilder.reflectionToString(this)); } if (!channel.isOpen()) { log.warn("Reader was passed a closed channel"); return; } this.channel = channel; channelSize = channel.size(); //log.debug("Channel size: {}", channelSize); if (channel.position() > 0) { log.debug("Channel position: {}", channel.position()); channel.position(0); } fillBuffer(); postInitialize(); }
From source file:org.carbondata.query.aggregator.impl.CustomAggregatorHelper.java
/** * Below method will be used to read the level files * * @param memberFile//ww w . j ava 2 s .co m * @param fileName * @throws IOException */ private void readLevelFileAndUpdateCache(File memberFile, String fileName) throws IOException { FileInputStream fos = null; FileChannel fileChannel = null; try { // create an object of FileOutputStream fos = new FileInputStream(memberFile); fileChannel = fos.getChannel(); Map<Integer, String> memberMap = surrogateKeyMap.get(fileName); if (null == memberMap) { memberMap = new HashMap<Integer, String>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE); surrogateKeyMap.put(fileName, memberMap); } long size = fileChannel.size(); int maxKey = 0; ByteBuffer rowlengthToRead = null; int len = 0; ByteBuffer row = null; int toread = 0; byte[] bb = null; String value = null; int surrogateValue = 0; boolean enableEncoding = Boolean.valueOf( CarbonProperties.getInstance().getProperty(CarbonCommonConstants.ENABLE_BASE64_ENCODING, CarbonCommonConstants.ENABLE_BASE64_ENCODING_DEFAULT)); while (fileChannel.position() < size) { rowlengthToRead = ByteBuffer.allocate(4); fileChannel.read(rowlengthToRead); rowlengthToRead.rewind(); len = rowlengthToRead.getInt(); if (len == 0) { continue; } row = ByteBuffer.allocate(len); fileChannel.read(row); row.rewind(); toread = row.getInt(); bb = new byte[toread]; row.get(bb); if (enableEncoding) { value = new String(Base64.decodeBase64(bb), Charset.defaultCharset()); } else { value = new String(bb, Charset.defaultCharset()); } surrogateValue = row.getInt(); memberMap.put(surrogateValue, value); // check if max key is less than Surrogate key then update the max key if (maxKey < surrogateValue) { maxKey = surrogateValue; } } } finally { CarbonUtil.closeStreams(fileChannel, fos); } }
From source file:org.apache.nifi.processors.standard.TailFile.java
/** * Read new lines from the given FileChannel, copying it to the given Output * Stream. The Checksum is used in order to later determine whether or not * data has been consumed.//w w w. j a va2 s. c o m * * @param reader The FileChannel to read data from * @param buffer the buffer to use for copying data * @param out the OutputStream to copy the data to * @param checksum the Checksum object to use in order to calculate checksum * for recovery purposes * * @return The new position after the lines have been read * @throws java.io.IOException if an I/O error occurs. */ private long readLines(final FileChannel reader, final ByteBuffer buffer, final OutputStream out, final Checksum checksum) throws IOException { getLogger().debug("Reading lines starting at position {}", new Object[] { reader.position() }); try (final ByteArrayOutputStream baos = new ByteArrayOutputStream()) { long pos = reader.position(); long rePos = pos; // position to re-read int num; int linesRead = 0; boolean seenCR = false; buffer.clear(); while (((num = reader.read(buffer)) != -1)) { buffer.flip(); for (int i = 0; i < num; i++) { byte ch = buffer.get(i); switch (ch) { case '\n': { baos.write(ch); seenCR = false; baos.writeTo(out); final byte[] baosBuffer = baos.toByteArray(); checksum.update(baosBuffer, 0, baos.size()); if (getLogger().isTraceEnabled()) { getLogger().trace("Checksum updated to {}", new Object[] { checksum.getValue() }); } baos.reset(); rePos = pos + i + 1; linesRead++; break; } case '\r': { baos.write(ch); seenCR = true; break; } default: { if (seenCR) { seenCR = false; baos.writeTo(out); final byte[] baosBuffer = baos.toByteArray(); checksum.update(baosBuffer, 0, baos.size()); if (getLogger().isTraceEnabled()) { getLogger().trace("Checksum updated to {}", new Object[] { checksum.getValue() }); } linesRead++; baos.reset(); baos.write(ch); rePos = pos + i; } else { baos.write(ch); } } } } pos = reader.position(); } if (rePos < reader.position()) { getLogger().debug("Read {} lines; repositioning reader from {} to {}", new Object[] { linesRead, pos, rePos }); reader.position(rePos); // Ensure we can re-read if necessary } return rePos; } }
From source file:org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.java
/** * Sets the offset in the meta file so that the * last checksum will be overwritten.// w w w. j a va 2 s .c o m */ @Override // FsDatasetSpi public void adjustCrcChannelPosition(ExtendedBlock b, ReplicaOutputStreams streams, int checksumSize) throws IOException { FileOutputStream file = (FileOutputStream) streams.getChecksumOut(); FileChannel channel = file.getChannel(); long oldPos = channel.position(); long newPos = oldPos - checksumSize; if (LOG.isDebugEnabled()) { LOG.debug("Changing meta file offset of block " + b + " from " + oldPos + " to " + newPos); } channel.position(newPos); }
From source file:org.apache.spark.shuffle.sort.UnsafeShuffleWriter.java
/** * Merges spill files by using NIO's transferTo to concatenate spill partitions' bytes. * This is only safe when the IO compression codec and serializer support concatenation of * serialized streams./*from www . ja v a2s . c o m*/ * * @return the partition lengths in the merged file. */ private long[] mergeSpillsWithTransferTo(SpillInfo[] spills, File outputFile) throws IOException { assert (spills.length >= 2); final int numPartitions = partitioner.numPartitions(); final long[] partitionLengths = new long[numPartitions]; final FileChannel[] spillInputChannels = new FileChannel[spills.length]; final long[] spillInputChannelPositions = new long[spills.length]; FileChannel mergedFileOutputChannel = null; boolean threwException = true; try { for (int i = 0; i < spills.length; i++) { spillInputChannels[i] = new FileInputStream(spills[i].file).getChannel(); } // This file needs to opened in append mode in order to work around a Linux kernel bug that // affects transferTo; see SPARK-3948 for more details. mergedFileOutputChannel = new FileOutputStream(outputFile, true).getChannel(); long bytesWrittenToMergedFile = 0; for (int partition = 0; partition < numPartitions; partition++) { for (int i = 0; i < spills.length; i++) { final long partitionLengthInSpill = spills[i].partitionLengths[partition]; long bytesToTransfer = partitionLengthInSpill; final FileChannel spillInputChannel = spillInputChannels[i]; final long writeStartTime = System.nanoTime(); while (bytesToTransfer > 0) { final long actualBytesTransferred = spillInputChannel.transferTo( spillInputChannelPositions[i], bytesToTransfer, mergedFileOutputChannel); spillInputChannelPositions[i] += actualBytesTransferred; bytesToTransfer -= actualBytesTransferred; } writeMetrics.incWriteTime(System.nanoTime() - writeStartTime); bytesWrittenToMergedFile += partitionLengthInSpill; partitionLengths[partition] += partitionLengthInSpill; } } // Check the position after transferTo loop to see if it is in the right position and raise an // exception if it is incorrect. The position will not be increased to the expected length // after calling transferTo in kernel version 2.6.32. This issue is described at // https://bugs.openjdk.java.net/browse/JDK-7052359 and SPARK-3948. if (mergedFileOutputChannel.position() != bytesWrittenToMergedFile) { throw new IOException("Current position " + mergedFileOutputChannel.position() + " does not equal expected " + "position " + bytesWrittenToMergedFile + " after transferTo. Please check your kernel" + " version to see if it is 2.6.32, as there is a kernel bug which will lead to " + "unexpected behavior when using transferTo. You can set spark.file.transferTo=false " + "to disable this NIO feature."); } threwException = false; } finally { // To avoid masking exceptions that caused us to prematurely enter the finally block, only // throw exceptions during cleanup if threwException == false. for (int i = 0; i < spills.length; i++) { assert (spillInputChannelPositions[i] == spills[i].file.length()); Closeables.close(spillInputChannels[i], threwException); } Closeables.close(mergedFileOutputChannel, threwException); } return partitionLengths; }