List of usage examples for java.lang ArrayIndexOutOfBoundsException ArrayIndexOutOfBoundsException
public ArrayIndexOutOfBoundsException()
From source file:org.apache.hadoop.io.compress.bzip2.Bzip2Compressor.java
@Override public synchronized void setInput(byte[] b, int off, int len) { if (b == null) { throw new NullPointerException(); }/*from w ww . ja v a2s . c om*/ if (off < 0 || len < 0 || off > b.length - len) { throw new ArrayIndexOutOfBoundsException(); } this.userBuf = b; this.userBufOff = off; this.userBufLen = len; uncompressedDirectBufOff = 0; setInputFromSavedData(); // Reinitialize bzip2's output direct buffer. compressedDirectBuf.limit(directBufferSize); compressedDirectBuf.position(directBufferSize); }
From source file:org.apache.hadoop.io.compress.lzma.LzmaDecompressor.java
public synchronized int decompress(byte[] b, int off, int len) throws IOException { if (b == null) { throw new NullPointerException(); }/*from w w w .j a v a 2 s .c o m*/ if (off < 0 || len < 0 || off > b.length - len) { throw new ArrayIndexOutOfBoundsException(); } int n = 0; // Check if there is uncompressed data n = uncompressedDirectBuf.remaining(); if (n > 0) { n = Math.min(n, len); ((ByteBuffer) uncompressedDirectBuf).get(b, off, n); return n; } // Re-initialize the lzma's output direct buffer uncompressedDirectBuf.rewind(); uncompressedDirectBuf.limit(directBufferSize); // Decompress data if (compressedDirectBufLen > 0) { n = decompressBytesDirect(); } uncompressedDirectBuf.limit(n); // Get atmost 'len' bytes n = Math.min(n, len); ((ByteBuffer) uncompressedDirectBuf).get(b, off, n); return n; }
From source file:com.gpl_compression.lzo.LzoDecompressor.java
public synchronized void setInput(byte[] b, int off, int len) { if (b == null) { throw new NullPointerException(); }//from w ww . j a v a 2s .co m if (off < 0 || len < 0 || off > b.length - len) { throw new ArrayIndexOutOfBoundsException(); } this.userBuf = b; this.userBufOff = off; this.userBufLen = len; setInputFromSavedData(); // Reinitialize lzo's output direct-buffer uncompressedDirectBuf.limit(directBufferSize); uncompressedDirectBuf.position(directBufferSize); }
From source file:com.hadoop.compression.fourmc.zstd.ZstdStreamDecompressor.java
public synchronized int decompress(byte[] b, int off, int len) throws IOException { if (b == null) { throw new NullPointerException(); }/* w w w. ja va 2 s . com*/ if (off < 0 || len < 0 || off > b.length - len) { throw new ArrayIndexOutOfBoundsException(); } int numBytes = 0; numBytes = oBuff.remaining(); if (numBytes > 0) { numBytes = Math.min(numBytes, len); ((ByteBuffer) oBuff).get(b, off, numBytes); return numBytes; } // Check if there is data to decompress. When an end of frame is reached, decompress shall not call // decompressStream without initStream. if (srcPos < iBuffLen || (iBuffLen == toRead && !finished)) { // Re-initialize the ZstdStream's output direct-buffer oBuff.rewind(); oBuff.limit(oBuffSize); dstPos = 0; // Decompress data, all the input should be consumed toRead = decompressStream(dStream, oBuff, oBuffSize, iBuff, iBuffLen); if (Zstd.isError(toRead)) { throw new InternalError("ZSTD decompressStream failed, due to: " + Zstd.getErrorName(toRead)); } // If toRead is 0, then we have finished decoding a frame. Finished should be set to true. finished = toRead == 0; // Check if all data in iBuff is consumed. if (srcPos >= iBuffLen) { srcPos = 0; iBuffLen = 0; iBuff.clear(); // toRead being 1 is a special case, meaning: // 1. zstd really need another one byte. // 2. zstd don't flush all the data into oBuff when oBuff is small. // When all the input is consumed and dstPos > 0, then toRead = 1 only happens in case 2. // This exception will be eliminated in later versions of zstd(>1.0.0). The following line then can // be safely removed or kept untouched as it will not be triggered. toRead = (toRead == 1 && dstPos != 0) ? 0 : toRead; } // Read most iBuffSize, works even for skippable frame(toRead can be any sizes between 1 to 4GB-1 // in a skippable frame) toRead = Math.min(toRead, iBuffSize); numBytes = oBuffLen; oBuff.limit(numBytes); // Return atmost 'len' bytes numBytes = Math.min(numBytes, len); ((ByteBuffer) oBuff).get(b, off, numBytes); } return numBytes; }
From source file:org.rythmengine.internal.TemplateParser.java
@Override public char pop() { if (!hasRemain()) throw new ArrayIndexOutOfBoundsException(); char c = template.charAt(cursor); step(1);//w w w .j a v a2 s . c o m return c; }
From source file:com.hadoop.compression.fourmc.Lz4Decompressor.java
public synchronized int decompress(byte[] b, int off, int len) throws IOException { if (b == null) { throw new NullPointerException(); }/* w ww . ja v a2s.c o m*/ if (off < 0 || len < 0 || off > b.length - len) { throw new ArrayIndexOutOfBoundsException(); } int numBytes = 0; if (isCurrentBlockUncompressed()) { // The current block has been stored uncompressed, so just // copy directly from the input buffer. numBytes = Math.min(userBufLen, len); System.arraycopy(userBuf, userBufOff, b, off, numBytes); userBufOff += numBytes; userBufLen -= numBytes; } else { // Check if there is uncompressed data numBytes = uncompressedDirectBuf.remaining(); if (numBytes > 0) { numBytes = Math.min(numBytes, len); ((ByteBuffer) uncompressedDirectBuf).get(b, off, numBytes); return numBytes; } // Check if there is data to decompress if (compressedDirectBufLen > 0) { // Re-initialize the LZ4's output direct-buffer uncompressedDirectBuf.rewind(); uncompressedDirectBuf.limit(directBufferSize); // Decompress data numBytes = decompressBytesDirect(); uncompressedDirectBuf.limit(numBytes); // Return atmost 'len' bytes numBytes = Math.min(numBytes, len); ((ByteBuffer) uncompressedDirectBuf).get(b, off, numBytes); } } // Set 'finished' if LZ4 has consumed all user-data if (userBufLen <= 0) { finished = true; } return numBytes; }
From source file:org.apache.hadoop.io.compress.lzo.LzoCompressor.java
public synchronized void setInput(byte[] b, int off, int len) { if (b == null) { throw new NullPointerException(); }//w w w . j a va2 s . c o m if (off < 0 || len < 0 || off > b.length - len) { throw new ArrayIndexOutOfBoundsException(); } finished = false; if (len > uncompressedDirectBuf.remaining()) { // save data; now !needsInput this.userBuf = b; this.userBufOff = off; this.userBufLen = len; } else { ((ByteBuffer) uncompressedDirectBuf).put(b, off, len); uncompressedDirectBufLen = uncompressedDirectBuf.position(); } bytesread += len; }
From source file:com.hadoop.compression.lzo.LzoDecompressor.java
public synchronized void setInput(byte[] b, int off, int len) { if (!isCurrentBlockUncompressed()) { if (len > directBufferSize) { LOG.warn("Decompression will fail because compressed buffer size :" + len + " is greater than this decompressor's directBufferSize: " + directBufferSize + ". To fix this, increase the value of your " + "configuration's io.compression.codec.lzo.buffersize to be larger " + "than: " + len + "."); }// ww w .java2s . c o m } if (b == null) { throw new NullPointerException(); } if (off < 0 || len < 0 || off > b.length - len) { throw new ArrayIndexOutOfBoundsException(); } this.userBuf = b; this.userBufOff = off; this.userBufLen = len; setInputFromSavedData(); // Reinitialize lzo's output direct-buffer uncompressedDirectBuf.limit(directBufferSize); uncompressedDirectBuf.position(directBufferSize); }
From source file:org.apache.hadoop.io.compress.lzma.LzmaCompressor.java
public synchronized int compress(byte[] b, int off, int len) throws IOException { if (b == null) { throw new NullPointerException(); }//ww w . j a v a 2 s. c om if (off < 0 || len < 0 || off > b.length - len) { throw new ArrayIndexOutOfBoundsException(); } int n = 0; // Check if there is compressed data n = compressedDirectBuf.remaining(); if (n > 0) { n = Math.min(n, len); ((ByteBuffer) compressedDirectBuf).get(b, off, n); return n; } // Re-initialize the lzma's output direct buffer compressedDirectBuf.rewind(); compressedDirectBuf.limit(directBufferSize); // Compress data n = compressBytesDirect(); compressedDirectBuf.limit(n); // Get atmost 'len' bytes n = Math.min(n, len); ((ByteBuffer) compressedDirectBuf).get(b, off, n); return n; }
From source file:com.hadoop.compression.fourmc.zstd.ZstdStreamCompressor.java
public synchronized int compress(byte[] b, int off, int len) throws IOException { if (b == null) { throw new NullPointerException(); }//w w w. ja v a 2s . c o m if (off < 0 || len < 0 || off > b.length - len) { throw new ArrayIndexOutOfBoundsException(); } int n = oBuff.remaining(); if (n > 0) { n = Math.min(n, len); ((ByteBuffer) oBuff).get(b, off, n); bytesWritten += n; return n; } // Happens when oBuffSize is small, zstd cannot flush content in the internal buffer just once. // The code will not be triggered if we use Zstd.cStreamInSize/Zstd.cStreamOutSize as input/output buffer size if (remainingToFlush > 0) { oBuff.rewind(); remainingToFlush = endStream(cStream, oBuff, 0, oBuff.capacity()); if (Zstd.isError(remainingToFlush)) { throw new InternalError("Zstd endStream failed, due to: " + Zstd.getErrorName(remainingToFlush)); } finished = remainingToFlush == 0; oBuff.limit(oBuffLen); n = Math.min(oBuffLen, len); bytesWritten += n; ((ByteBuffer) oBuff).get(b, off, n); return n; } if (0 == iBuff.position()) { setInputFromSavedData(); if (0 == iBuff.position()) { finished = true; return 0; } } oBuff.rewind(); oBuff.limit(oBuffSize); // iBuffLen = iBuffSize in most times. iBuffLen can be < iBuffSize if compress() is called after finish(); // oBuff is cleared before this call. int toRead = compressStream(cStream, oBuff, oBuffSize, iBuff, iBuffLen); if (Zstd.isError(toRead)) { throw new InternalError("ZSTD compressStream failed, due to: " + Zstd.getErrorName(toRead)); } boolean inputConsumedAll = srcPos >= iBuffLen; // If all the data in iBuff is consumed, then iBuff should be reset. // Otherwise, data in iBuff remains intact and will be consumed by compressStream in the next compress() call if (inputConsumedAll) { iBuff.clear(); srcPos = 0; iBuffLen = 0; } // finish() is called, all the data in iBuffLen is consumed, then a endFrame epilogue should be wrote. if (finish && userBufLen <= 0 && inputConsumedAll) { int oBuffOffset = oBuffLen; remainingToFlush = endStream(cStream, oBuff, oBuffOffset, oBuff.capacity() - oBuffOffset); if (Zstd.isError(remainingToFlush)) { throw new InternalError("Zstd endStream failed, due to: " + Zstd.getErrorName(remainingToFlush)); } finished = remainingToFlush == 0; } oBuff.limit(oBuffLen); n = Math.min(oBuffLen, len); bytesWritten += n; ((ByteBuffer) oBuff).get(b, off, n); return n; }