List of usage examples for java.nio ByteBuffer limit
public final Buffer limit(int newLimit)
From source file:org.commoncrawl.util.StreamingArcFileReader.java
/** * Attempts to deflate and read the next ArcFileItem from bytes available - NON-BLOCKING version * //from w w w . ja v a2 s. c o m * @return Fully constructed ArcFileItem or NULL if not enough data is available to service the request * @throws EOFException if end of stream is reached decoding item, or generic IOException if a corrupt stream is detected */ public ArcFileItem getNextItem() throws IOException { // check state ... if (_readState.ordinal() <= ReadState.ReadingArcHeaderTrailer.ordinal()) { if (_hasHeaderItem) { if (readARCHeader()) { _crc.reset(); _readState = ReadState.ReadingEntryHeader; } } else { // skip arc header _readState = ReadState.ReadingEntryHeader; } } // if reading header for entry if (_readState == ReadState.ReadingEntryHeader) { if (readHeader()) { _readState = ReadState.ReadingEntryData; // reset crc accumulator _crc.reset(); // and allocate a fresh builder object .. _builder = new ArcFileBuilder(); } } // if reading data for entry ... if (_readState == ReadState.ReadingEntryData) { // read header line buffer for (;;) { byte scanBuffer[] = new byte[BLOCK_SIZE]; ByteBuffer byteBuffer = ByteBuffer.wrap(scanBuffer); // read up to scan buffer size of data ... int readAmount = readInflatedBytes(scanBuffer, 0, scanBuffer.length); // if we did not read any bytes ... return immediately ... if (readAmount == 0) { return null; } else if (readAmount != -1) { // update crc value ... _crc.update(scanBuffer, 0, readAmount); // update content bytes read _contentBytesRead += readAmount; // and setup buffer pointers ... byteBuffer.position(0); byteBuffer.limit(readAmount); // and input data into builder ... _builder.inputData(byteBuffer); } // -1 indicates eos else { // reset inflater ... resetInflater(); // and transition to reading trailing bytes _readState = ReadState.ReadingEntryTrailer; break; } } } if (_readState == ReadState.ReadingEntryTrailer) { // validate crc and header length ... if (readTrailer()) { // transition to assumed state ... _readState = ReadState.ReadingEntryHeader; // get the arc file item ArcFileItem itemOut = _builder.finish(); itemOut.setArcFilePos((int) _arcFileStartOffset); // reset builder _builder = null; //reset crc _crc.reset(); // if no more data coming down the pipe... if (_rawInput.available() == 0 && _eosReached) { // transition to done state ... _readState = ReadState.Done; } return itemOut; } } return null; }
From source file:com.slytechs.capture.file.editor.AbstractRawIterator.java
public boolean verifyAdditionalRecords(final ByteBuffer buffer, final int count) throws EOFException, IOException { buffer.reset();// w w w. jav a 2 s .c o m final int MAX_HEADER_LENGTH = 24; final ByteBuffer view = BufferUtils.duplicate(buffer); final int capacity = view.capacity(); boolean status = true; for (int i = 0; i < count && view.position() + MAX_HEADER_LENGTH < capacity; i++) { view.mark(); long length = headerReader.readLength(view); int p = view.position() + (int) length; if (pattern.match(view) == false) { status = false; break; } view.reset(); if (p + MAX_HEADER_LENGTH > view.capacity()) { break; } view.limit(p + MAX_HEADER_LENGTH); view.position(p); } return status; }
From source file:com.tomagoyaky.jdwp.IOUtils.java
/** * Skips bytes from a ReadableByteChannel. * This implementation guarantees that it will read as many bytes * as possible before giving up.//w ww . j a va 2 s. c om * * @param input ReadableByteChannel to skip * @param toSkip number of bytes to skip. * @return number of bytes actually skipped. * @throws IOException if there is a problem reading the ReadableByteChannel * @throws IllegalArgumentException if toSkip is negative * @since 2.2 */ public static long skip(final ReadableByteChannel input, final long toSkip) throws IOException { if (toSkip < 0) { throw new IllegalArgumentException("Skip count must be non-negative, actual: " + toSkip); } final ByteBuffer skipByteBuffer = ByteBuffer.allocate((int) Math.min(toSkip, SKIP_BUFFER_SIZE)); long remain = toSkip; while (remain > 0) { skipByteBuffer.position(0); skipByteBuffer.limit((int) Math.min(remain, SKIP_BUFFER_SIZE)); final int n = input.read(skipByteBuffer); if (n == EOF) { break; } remain -= n; } return toSkip - remain; }
From source file:com.yobidrive.diskmap.needles.NeedleManager.java
/** Loads the needle pointed by the needlePointer and checks for validity (checksum, ...) and returns the next linked needle * @param needlePointer/*from www. j a v a 2 s. c o m*/ * @param needle * @return a chained needle if the read is successful, otherwise null * @throws NeedleManagerException */ public Needle getNeedleFromDisk(NeedlePointer needlePointer) throws NeedleManagerException { ByteBuffer needleBuffer = null; try { FileChannel fc = getChannel(needlePointer.getNeedleFileNumber()); if (fc == null) return new Needle(); // Position and read needle for check long position = needlePointer.getNeedleOffset(); // Acquires a ByteBuffer if (threadBufferQ == null) return new Needle(); Chrono chr = new Chrono(); needleBuffer = threadBufferQ.take(); chr.lap("Wait for thread buffer ", 20); // Finally we have a buffer needleBuffer.rewind(); needleBuffer.limit(MAXKEYSIZE + MAXVERSIONSIZE + Needle.NEEDLEOVERHEAD); // First read header to know the data size int readBytes = 0, totalHeaderReadBytes = 0; while (readBytes >= 0 && totalHeaderReadBytes < needleBuffer.limit()) { readBytes = fc.read(needleBuffer, position + totalHeaderReadBytes); totalHeaderReadBytes += readBytes; } if (totalHeaderReadBytes <= 0) return new Needle(); Needle needle = new Needle(); if (!needle.getNeedleHeaderFromBuffer(needleBuffer)) { return new Needle(); // Incorrect header } // Needle Header is OK, read the rest until end of needle. Change limit to include data // needleBuffer.rewind() ; needleBuffer.position(totalHeaderReadBytes); // needleBuffer.limit(needle.getPostDataSize()) ; needleBuffer.limit(needle.getTotalSizeFromData()); readBytes = 0; int totalContentReadBytes = 0; while (readBytes >= 0 && totalContentReadBytes < needleBuffer.limit() - totalHeaderReadBytes) { readBytes = fc.read(needleBuffer, position + totalHeaderReadBytes + totalContentReadBytes); totalContentReadBytes += readBytes; } // readBytes = fc.read(needleBuffer, position+needle.getHeaderSize()) ; // Parse data and verifies checksum // needleBuffer.rewind(); needleBuffer.position(needle.getHeaderSize()); if (!needle.getNeedleDataFromBuffer(needleBuffer)) return new Needle(); // Now needle is parsed and OK chr.total("Read from disk ", 20); return needle; } catch (Throwable th) { logger.error("Error reading needle at " + needlePointer.getFormattedNeedleFileNumber() + "/" + needlePointer.getFormattedNeedleOffset(), th); throw new NeedleManagerException(); } finally { if (needleBuffer != null) { try { threadBufferQ.put(needleBuffer); } catch (InterruptedException ie) { throw new BucketTableManagerException("Error giving back needle read thread", ie); } } } }
From source file:org.commoncrawl.io.internal.NIOHttpConnection.java
public void Writeable(NIOClientSocket theSocket) throws IOException { if (!theSocket.isOpen()) { return;/*from ww w . j a va2 s . co m*/ } int amountWritten = 0; try { boolean contentEOF = false; amountWritten = 0; if (_outBuf.available() == 0 && _dataSource != null) { // read some more data from the data source contentEOF = _dataSource.read(_outBuf); } ByteBuffer bufferToWrite = _outBuf.read(); if (bufferToWrite != null) { try { int amountToWrite = bufferToWrite.remaining(); // if upload rate limiter is not null ... if (_uploadRateLimiter != null) { // apply rate limit policy to outbound data ... amountToWrite = _uploadRateLimiter.checkRateLimit(amountToWrite); } if (amountToWrite != 0) { // if amount to write is less than remaining ... if (amountToWrite < bufferToWrite.remaining()) { //slice the buffer ... ByteBuffer slicedBuffer = bufferToWrite.slice(); // limit to amount to write ... slicedBuffer.limit(amountToWrite); // and write to socket ... amountWritten = _socket.write(slicedBuffer); if (amountWritten >= 0) { // advance source buffer manually... bufferToWrite.position(bufferToWrite.position() + amountWritten); } } else { amountWritten = _socket.write(bufferToWrite); } if (_uploadRateLimiter != null) { _uploadRateLimiter.updateStats(amountWritten); // debug output ... BandwidthUtils.BandwidthStats stats = new BandwidthUtils.BandwidthStats(); // collect stats _uploadRateLimiter.getStats(stats); // dump stats ... // System.out.println("Connection: "+ this+"Upload Speed:" + stats.scaledBitsPerSecond + " " + stats.scaledBitsUnits + " TotalWritten:" + (_cumilativeWritten + amountWritten) ); // LOG.info("Connection:" + getId()+" BytesOut:" + amountWritten +" Upload Speed:" + stats.scaledBitsPerSecond + " " + stats.scaledBitsUnits + " TotalWritten:" + (_cumilativeWritten + amountWritten)); } } } catch (IOException exception) { // LOG.error(CCStringUtils.stringifyException(e)); throw exception; } _totalWritten += amountWritten; _cumilativeWritten += amountWritten; // System.out.println("NIOHttpConnection->wrote:" + amountWritten + "Bytes TotalWritten:" + _cumilativeWritten); if (bufferToWrite.remaining() > 0) { _outBuf.putBack(bufferToWrite); } } if (_totalWritten > 0 && !_outBuf.isDataAvailable() && (_dataSource == null || contentEOF)) { _lastReadOrWriteTime = System.currentTimeMillis(); // transition from sending to receiving ... if (_state == State.SENDING_REQUEST) { // set up an initial last read time value here ... setState(State.RECEIVING_HEADERS, null); _selector.registerForRead(theSocket); } } } catch (IOException e) { LOG.error("Writeable for url:" + getURL() + " threw Exception:" + e.getMessage()); setErrorType(ErrorType.IOEXCEPTION); setErrorDesc(StringUtils.stringifyException(e)); setState(State.ERROR, e); throw e; } if (_state == State.SENDING_REQUEST) { _selector.registerForReadAndWrite(theSocket); } else if (_state.ordinal() >= State.RECEIVING_HEADERS.ordinal() && _state.ordinal() < State.DONE.ordinal()) { _selector.registerForRead(theSocket); } }
From source file:com.android.camera.one.v2.OneCameraZslImpl.java
/** * Given an image reader, extracts the JPEG image bytes and then closes the * reader.//from ww w .ja v a 2 s . c o m * * @param img the image from which to extract jpeg bytes or compress to * jpeg. * @param degrees the angle to rotate the image clockwise, in degrees. Rotation is * only applied to YUV images. * @return The bytes of the JPEG image. Newly allocated. */ private byte[] acquireJpegBytes(Image img, int degrees) { ByteBuffer buffer; if (img.getFormat() == ImageFormat.JPEG) { Image.Plane plane0 = img.getPlanes()[0]; buffer = plane0.getBuffer(); byte[] imageBytes = new byte[buffer.remaining()]; buffer.get(imageBytes); buffer.rewind(); return imageBytes; } else if (img.getFormat() == ImageFormat.YUV_420_888) { buffer = mJpegByteBufferPool.acquire(); if (buffer == null) { buffer = ByteBuffer.allocateDirect(img.getWidth() * img.getHeight() * 3); } int numBytes = JpegUtilNative.compressJpegFromYUV420Image(new AndroidImageProxy(img), buffer, JPEG_QUALITY, degrees); if (numBytes < 0) { throw new RuntimeException("Error compressing jpeg."); } buffer.limit(numBytes); byte[] imageBytes = new byte[buffer.remaining()]; buffer.get(imageBytes); buffer.clear(); mJpegByteBufferPool.release(buffer); return imageBytes; } else { throw new RuntimeException("Unsupported image format."); } }
From source file:org.commoncrawl.io.NIOHttpConnection.java
public void Writeable(NIOClientSocket theSocket) throws IOException { if (!theSocket.isOpen()) { return;/* w w w. j a v a2 s.co m*/ } int amountWritten = 0; try { boolean contentEOF = false; amountWritten = 0; if (_outBuf.available() == 0 && _dataSource != null) { // read some more data from the data source contentEOF = _dataSource.read(_outBuf); } ByteBuffer bufferToWrite = _outBuf.read(); if (bufferToWrite != null) { try { int amountToWrite = bufferToWrite.remaining(); // if upload rate limiter is not null ... if (_uploadRateLimiter != null) { // apply rate limit policy to outbound data ... amountToWrite = _uploadRateLimiter.checkRateLimit(amountToWrite); } if (amountToWrite != 0) { // if amount to write is less than remaining ... if (amountToWrite < bufferToWrite.remaining()) { // slice the buffer ... ByteBuffer slicedBuffer = bufferToWrite.slice(); // limit to amount to write ... slicedBuffer.limit(amountToWrite); // and write to socket ... amountWritten = _socket.write(slicedBuffer); if (amountWritten >= 0) { // advance source buffer manually... bufferToWrite.position(bufferToWrite.position() + amountWritten); } } else { amountWritten = _socket.write(bufferToWrite); } if (_uploadRateLimiter != null) { _uploadRateLimiter.updateStats(amountWritten); // debug output ... BandwidthUtils.BandwidthStats stats = new BandwidthUtils.BandwidthStats(); // collect stats _uploadRateLimiter.getStats(stats); // dump stats ... // System.out.println("Connection: "+ this+"Upload Speed:" + // stats.scaledBitsPerSecond + " " + stats.scaledBitsUnits + // " TotalWritten:" + (_cumilativeWritten + amountWritten) ); LOG.info("Connection:[" + getId() + "] BytesOut:" + amountWritten + " Upload Speed:" + stats.scaledBitsPerSecond + " " + stats.scaledBitsUnits + " TotalWritten:" + (_totalWritten + amountWritten)); } } } catch (IOException exception) { // LOG.error(CCStringUtils.stringifyException(e)); throw exception; } _totalWritten += amountWritten; _cumilativeWritten += amountWritten; // System.out.println("NIOHttpConnection->wrote:" + amountWritten + // "Bytes TotalWritten:" + _cumilativeWritten); if (bufferToWrite.remaining() > 0) { _outBuf.putBack(bufferToWrite); } } if (_totalWritten > 0 && !_outBuf.isDataAvailable() && (_dataSource == null || contentEOF)) { _lastReadOrWriteTime = System.currentTimeMillis(); // transition from sending to receiving ... if (_state == State.SENDING_REQUEST) { // set up an initial last read time value here ... setState(State.RECEIVING_HEADERS, null); _selector.registerForRead(theSocket); } } } catch (IOException e) { LOG.error("Connection:[" + getId() + "] Writeable for url:" + getURL() + " threw Exception:" + e.getMessage()); setErrorType(ErrorType.IOEXCEPTION); setErrorDesc(StringUtils.stringifyException(e)); setState(State.ERROR, e); throw e; } if (_state == State.SENDING_REQUEST) { _selector.registerForReadAndWrite(theSocket); } else if (_state.ordinal() >= State.RECEIVING_HEADERS.ordinal() && _state.ordinal() < State.DONE.ordinal()) { _selector.registerForRead(theSocket); } }
From source file:com.mellanox.r4h.DFSInputStream.java
private synchronized ByteBuffer tryReadZeroCopy(int maxLength, EnumSet<ReadOption> opts) throws IOException { // Copy 'pos' and 'blockEnd' to local variables to make it easier for the // JVM to optimize this function. final long curPos = pos; final long curEnd = blockEnd; final long blockStartInFile = currentLocatedBlock.getStartOffset(); final long blockPos = curPos - blockStartInFile; // Shorten this read if the end of the block is nearby. long length63; if ((curPos + maxLength) <= (curEnd + 1)) { length63 = maxLength;/* w w w.j a va 2 s.c om*/ } else { length63 = 1 + curEnd - curPos; if (length63 <= 0) { if (DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug("Unable to perform a zero-copy read from offset " + curPos + " of " + src + "; " + length63 + " bytes left in block. " + "blockPos=" + blockPos + "; curPos=" + curPos + "; curEnd=" + curEnd); } return null; } if (DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug("Reducing read length from " + maxLength + " to " + length63 + " to avoid going more than one byte " + "past the end of the block. blockPos=" + blockPos + "; curPos=" + curPos + "; curEnd=" + curEnd); } } // Make sure that don't go beyond 31-bit offsets in the MappedByteBuffer. int length; if (blockPos + length63 <= Integer.MAX_VALUE) { length = (int) length63; } else { long length31 = Integer.MAX_VALUE - blockPos; if (length31 <= 0) { // Java ByteBuffers can't be longer than 2 GB, because they use // 4-byte signed integers to represent capacity, etc. // So we can't mmap the parts of the block higher than the 2 GB offset. // FIXME: we could work around this with multiple memory maps. // See HDFS-5101. if (DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug("Unable to perform a zero-copy read from offset " + curPos + " of " + src + "; 31-bit MappedByteBuffer limit " + "exceeded. blockPos=" + blockPos + ", curEnd=" + curEnd); } return null; } length = (int) length31; if (DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug( "Reducing read length from " + maxLength + " to " + length + " to avoid 31-bit limit. " + "blockPos=" + blockPos + "; curPos=" + curPos + "; curEnd=" + curEnd); } } final ClientMmap clientMmap = blockReader.getClientMmap(opts); if (clientMmap == null) { if (DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug("unable to perform a zero-copy read from offset " + curPos + " of " + src + "; BlockReader#getClientMmap returned " + "null."); } return null; } boolean success = false; ByteBuffer buffer; try { seek(curPos + length); buffer = clientMmap.getMappedByteBuffer().asReadOnlyBuffer(); buffer.position((int) blockPos); buffer.limit((int) (blockPos + length)); getExtendedReadBuffers().put(buffer, clientMmap); synchronized (infoLock) { readStatistics.addZeroCopyBytes(length); } if (DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug("readZeroCopy read " + length + " bytes from offset " + curPos + " via the zero-copy read " + "path. blockEnd = " + blockEnd); } success = true; } finally { if (!success) { IOUtils.closeQuietly(clientMmap); } } return buffer; }
From source file:org.commoncrawl.io.internal.NIOHttpConnection.java
public int Readable(NIOClientSocket theSocket) throws IOException { if (!theSocket.isOpen()) { LOG.error("Readable Called on Closed Socket"); return -1; }//from w w w . j av a 2 s.com int totalBytesRead = 0; int singleReadAmount = 0; boolean overflow = false; boolean disconnected = false; try { if (_downloadMax == -1 || _totalRead < _downloadMax) { do { ByteBuffer buffer = _inBuf.getWriteBuf(); if (_downloadMax != -1) { if (_totalRead + buffer.remaining() > _downloadMax) { int overflowAmt = (_totalRead + buffer.remaining()) - _downloadMax; buffer.limit(buffer.limit() - overflowAmt); } } singleReadAmount = _socket.read(buffer); if (singleReadAmount > 0) { _inBuf.write(buffer); _totalRead += singleReadAmount; _cumilativeRead += singleReadAmount; totalBytesRead += singleReadAmount; } } while (singleReadAmount > 0 && (_downloadMax == -1 || _totalRead < _downloadMax)); if (_downloadMax != -1 && _totalRead == _downloadMax) { overflow = true; _contentTruncated = true; } } if (totalBytesRead > 0) { // flush any written buffers . _inBuf.flush(); // process incoming buffer processIncomingData(totalBytesRead); } if (singleReadAmount == -1 || overflow) { disconnected = true; if (getState() == State.RECEIVING_CONTENT && (overflow || _contentLength == -1 || _contentLength == _downloadedContentLength)) { // if we are still in the middle of processing chunked data ... if (_chunked) { // clear out existing input buffer ... _inBuf.reset(); // and if a chunk buffer is available ... if (_chunkContentBuffer != null) { // take what we can get ... // flush chunk buffer ... _chunkContentBuffer.flush(); // and swap it with the real content buffer ... _inBuf = _chunkContentBuffer; // reset chunk state ... _chunkContentBuffer = null; } // reset chunked flag ... _chunked = false; // and now, if this is NOT an overflow condidition ... if (!overflow) { // interpret this as an error ... setErrorType(ErrorType.IOEXCEPTION); setErrorDesc("Connection Closed Before Receiving Chunk Trailer"); setState(State.ERROR, new java.net.SocketException()); } } // now check one more time of we are are in the proper state ... if (getState() == State.RECEIVING_CONTENT) { setState(State.DONE, null); } } else if (getState() != State.DONE) { if (getState() == State.RECEIVING_CONTENT && _downloadedContentLength != 0) { LOG.warn("URL:" + _url + " POSSIBLE TRUNCATION: Read returned -1 with ContentLength:" + _contentLength + " BufferSize:" + _inBuf.available() + " DownloadSize:" + _downloadedContentLength + " State:" + getState() + "Context:" + _context); setState(State.DONE, null); } else { LOG.error("URL:" + _url + " Read returned -1 with ContentLength:" + _contentLength + " BufferSize:" + _inBuf.available() + " DownloadSize:" + _downloadedContentLength + " State:" + getState() + "Context:" + _context); setErrorType(ErrorType.IOEXCEPTION); setErrorDesc("Read returned -1 with ContentLength:" + _contentLength + " BufferSize:" + _inBuf.available() + " DownloadSize:" + _downloadedContentLength + " State:" + getState()); setState(State.ERROR, new java.net.SocketException()); } } } } catch (IOException e) { LOG.error("Readable for url:" + getURL() + " threw Exception:" + e.getMessage()); setErrorType(ErrorType.IOEXCEPTION); setErrorDesc(StringUtils.stringifyException(e)); setState(State.ERROR, e); } if (_socket.isOpen()) { // if we data to write ... if (_outBuf.isDataAvailable()) { _selector.registerForReadAndWrite(theSocket); } else { _selector.registerForRead(theSocket); } } if (totalBytesRead > 0) { //update last read time ... _lastReadOrWriteTime = System.currentTimeMillis(); } return (disconnected) ? -1 : totalBytesRead; }
From source file:org.commoncrawl.io.NIOHttpConnection.java
public int Readable(NIOClientSocket theSocket) throws IOException { if (!theSocket.isOpen()) { LOG.error("Connection:[" + getId() + "] Readable Called on Closed Socket"); return -1; }// w w w. j ava2 s. c om int totalBytesRead = 0; int singleReadAmount = 0; boolean overflow = false; boolean disconnected = false; try { if (_downloadMax == -1 || _totalRead < _downloadMax) { do { ByteBuffer buffer = _inBuf.getWriteBuf(); if (_downloadMax != -1) { if (_totalRead + buffer.remaining() > _downloadMax) { int overflowAmt = (_totalRead + buffer.remaining()) - _downloadMax; buffer.limit(buffer.limit() - overflowAmt); } } singleReadAmount = _socket.read(buffer); if (singleReadAmount > 0) { _inBuf.write(buffer); _totalRead += singleReadAmount; _cumilativeRead += singleReadAmount; totalBytesRead += singleReadAmount; } } while (singleReadAmount > 0 && (_downloadMax == -1 || _totalRead < _downloadMax)); if (_downloadMax != -1 && _totalRead == _downloadMax) { overflow = true; _contentTruncated = true; } } if (totalBytesRead > 0) { // flush any written buffers . _inBuf.flush(); // process incoming buffer processIncomingData(totalBytesRead); } if (singleReadAmount == -1 || overflow) { disconnected = true; if (getState() == State.RECEIVING_CONTENT && (overflow || _contentLength == -1 || _contentLength == _downloadedContentLength)) { // if we are still in the middle of processing chunked data ... if (_chunked) { // clear out existing input buffer ... _inBuf.reset(); // and if a chunk buffer is available ... if (_chunkContentBuffer != null) { // take what we can get ... // flush chunk buffer ... _chunkContentBuffer.flush(); // and swap it with the real content buffer ... _inBuf = _chunkContentBuffer; // reset chunk state ... _chunkContentBuffer = null; } // reset chunked flag ... _chunked = false; // and now, if this is NOT an overflow condidition ... if (!overflow) { // interpret this as an error ... setErrorType(ErrorType.IOEXCEPTION); setErrorDesc("Connection Closed Before Receiving Chunk Trailer"); setState(State.ERROR, new java.net.SocketException()); } } // now check one more time of we are are in the proper state ... if (getState() == State.RECEIVING_CONTENT) { setState(State.DONE, null); } } else if (getState() != State.DONE) { if (getState() == State.SENDING_REQUEST) { LOG.warn("Connection:[" + getId() + "] URL:" + _url + " POSSIBLE TRUNCATION: Read returned -1 with ContentLength:" + _contentLength + " BufferSize:" + _inBuf.available() + " DownloadSize:" + _downloadedContentLength + " PrevState:" + getState() + " Sent:" + _totalWritten + " OutBufDataAvail:" + _outBuf.available() + " Context:" + _context); setState(State.RECEIVING_HEADERS, null); processIncomingData(0); } else if (getState() == State.RECEIVING_CONTENT && _downloadedContentLength != 0) { LOG.warn("Connection:[" + getId() + "] URL:" + _url + " POSSIBLE TRUNCATION: Read returned -1 with ContentLength:" + _contentLength + " BufferSize:" + _inBuf.available() + " DownloadSize:" + _downloadedContentLength + " State:" + getState() + "Context:" + _context); setState(State.DONE, null); } else { LOG.error("Connection:[" + getId() + "] URL:" + _url + " Read returned -1 with ContentLength:" + _contentLength + " BufferSize:" + _inBuf.available() + " DownloadSize:" + _downloadedContentLength + " State:" + getState() + "Context:" + _context); setErrorType(ErrorType.IOEXCEPTION); setErrorDesc("Read returned -1 with ContentLength:" + _contentLength + " BufferSize:" + _inBuf.available() + " DownloadSize:" + _downloadedContentLength + " State:" + getState()); setState(State.ERROR, new java.net.SocketException()); } } } } catch (IOException e) { LOG.error("Connection:[" + getId() + "] Readable for url:" + getURL() + " threw Exception:" + e.getMessage()); setErrorType(ErrorType.IOEXCEPTION); setErrorDesc(StringUtils.stringifyException(e)); setState(State.ERROR, e); } if (_socket.isOpen()) { // if we data to write ... if (_outBuf.isDataAvailable()) { _selector.registerForReadAndWrite(theSocket); } else { _selector.registerForRead(theSocket); } } if (totalBytesRead > 0) { // update last read time ... _lastReadOrWriteTime = System.currentTimeMillis(); } return (disconnected) ? -1 : totalBytesRead; }