Example usage for java.nio ByteBuffer limit

List of usage examples for java.nio ByteBuffer limit

Introduction

In this page you can find the example usage for java.nio ByteBuffer limit.

Prototype

public final int limit() 

Source Link

Document

Returns the limit of this buffer.

Usage

From source file:org.commonjava.indy.httprox.handler.ProxyRequestReader.java

private int doRead(final ConduitStreamSourceChannel channel) throws IOException {
    bReq = new ByteArrayOutputStream();
    pReq = new PrintStream(bReq);

    logger.debug("Starting read: {}", channel);

    int total = 0;
    while (true) {
        ByteBuffer buf = ByteBuffer.allocate(1024);
        channel.awaitReadable(AWAIT_READABLE_IN_MILLISECONDS, TimeUnit.MILLISECONDS);

        int read = channel.read(buf); // return the number of bytes read, possibly zero, or -1

        logger.debug("Read {} bytes", read);

        if (read == -1) // return -1 if the channel has reached end-of-stream
        {//from w w  w. j  a  va  2 s  . c om
            if (total == 0) // nothing read, return -1 to indicate the EOF
            {
                return -1;
            } else {
                return total;
            }
        }

        if (read == 0) // no new bytes this time
        {
            return total;
        }

        total += read;

        buf.flip();
        byte[] bbuf = new byte[buf.limit()];
        buf.get(bbuf);

        if (!headDone) {
            // allows us to stop after header read...
            final String part = new String(bbuf);
            for (final char c : part.toCharArray()) {
                switch (c) {
                case '\n': {
                    while (lastFour.size() > 3) {
                        lastFour.remove(0);
                    }

                    lastFour.add(c);
                    try {
                        if (bReq.size() > 0 && HEAD_END.equals(lastFour)) {
                            logger.debug("Detected end of request headers.");
                            headDone = true;

                            logger.trace("Proxy request header:\n{}\n", new String(bReq.toByteArray()));
                        }
                    } finally {
                        lastFour.remove(lastFour.size() - 1);
                    }
                }
                default: {
                    pReq.print(c);
                    lastFour.add(c);
                }
                }
            }
        } else {
            bReq.write(bbuf);
        }
    }
}

From source file:org.apache.hadoop.hbase.io.hfile.TestHFileBlock.java

protected void testPreviousOffsetInternals() throws IOException {
    for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) {
        for (boolean pread : BOOLEAN_VALUES) {
            for (boolean cacheOnWrite : BOOLEAN_VALUES) {
                Random rand = defaultRandom();
                LOG.info("testPreviousOffset:Compression algorithm: " + algo + ", pread=" + pread
                        + ", cacheOnWrite=" + cacheOnWrite);
                Path path = new Path(TEST_UTIL.getDataTestDir(), "prev_offset");
                List<Long> expectedOffsets = new ArrayList<Long>();
                List<Long> expectedPrevOffsets = new ArrayList<Long>();
                List<BlockType> expectedTypes = new ArrayList<BlockType>();
                List<ByteBuffer> expectedContents = cacheOnWrite ? new ArrayList<ByteBuffer>() : null;
                long totalSize = writeBlocks(rand, algo, path, expectedOffsets, expectedPrevOffsets,
                        expectedTypes, expectedContents);

                FSDataInputStream is = fs.open(path);
                HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(true)
                        .withIncludesMvcc(includesMemstoreTS).withIncludesTags(includesTag)
                        .withCompression(algo).build();
                HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(is, totalSize, meta);
                long curOffset = 0;
                for (int i = 0; i < NUM_TEST_BLOCKS; ++i) {
                    if (!pread) {
                        assertEquals(is.getPos(), curOffset + (i == 0 ? 0 : HConstants.HFILEBLOCK_HEADER_SIZE));
                    }//w  ww. j  a va 2  s .  co m

                    assertEquals(expectedOffsets.get(i).longValue(), curOffset);
                    if (detailedLogging) {
                        LOG.info("Reading block #" + i + " at offset " + curOffset);
                    }
                    HFileBlock b = hbr.readBlockData(curOffset, -1, -1, pread);
                    if (detailedLogging) {
                        LOG.info("Block #" + i + ": " + b);
                    }
                    assertEquals("Invalid block #" + i + "'s type:", expectedTypes.get(i), b.getBlockType());
                    assertEquals("Invalid previous block offset for block " + i + " of " + "type "
                            + b.getBlockType() + ":", (long) expectedPrevOffsets.get(i),
                            b.getPrevBlockOffset());
                    b.sanityCheck();
                    assertEquals(curOffset, b.getOffset());

                    // Now re-load this block knowing the on-disk size. This tests a
                    // different branch in the loader.
                    HFileBlock b2 = hbr.readBlockData(curOffset, b.getOnDiskSizeWithHeader(), -1, pread);
                    b2.sanityCheck();

                    assertEquals(b.getBlockType(), b2.getBlockType());
                    assertEquals(b.getOnDiskSizeWithoutHeader(), b2.getOnDiskSizeWithoutHeader());
                    assertEquals(b.getOnDiskSizeWithHeader(), b2.getOnDiskSizeWithHeader());
                    assertEquals(b.getUncompressedSizeWithoutHeader(), b2.getUncompressedSizeWithoutHeader());
                    assertEquals(b.getPrevBlockOffset(), b2.getPrevBlockOffset());
                    assertEquals(curOffset, b2.getOffset());
                    assertEquals(b.getBytesPerChecksum(), b2.getBytesPerChecksum());
                    assertEquals(b.getOnDiskDataSizeWithHeader(), b2.getOnDiskDataSizeWithHeader());
                    assertEquals(0, HFile.getChecksumFailuresCount());

                    curOffset += b.getOnDiskSizeWithHeader();

                    if (cacheOnWrite) {
                        // In the cache-on-write mode we store uncompressed bytes so we
                        // can compare them to what was read by the block reader.
                        // b's buffer has header + data + checksum while
                        // expectedContents have header + data only
                        ByteBuffer bufRead = b.getBufferWithHeader();
                        ByteBuffer bufExpected = expectedContents.get(i);
                        boolean bytesAreCorrect = Bytes.compareTo(bufRead.array(), bufRead.arrayOffset(),
                                bufRead.limit() - b.totalChecksumBytes(), bufExpected.array(),
                                bufExpected.arrayOffset(), bufExpected.limit()) == 0;
                        String wrongBytesMsg = "";

                        if (!bytesAreCorrect) {
                            // Optimization: only construct an error message in case we
                            // will need it.
                            wrongBytesMsg = "Expected bytes in block #" + i + " (algo=" + algo + ", pread="
                                    + pread + ", cacheOnWrite=" + cacheOnWrite + "):\n";
                            wrongBytesMsg += Bytes.toStringBinary(bufExpected.array(),
                                    bufExpected.arrayOffset(), Math.min(32, bufExpected.limit()))
                                    + ", actual:\n" + Bytes.toStringBinary(bufRead.array(),
                                            bufRead.arrayOffset(), Math.min(32, bufRead.limit()));
                            if (detailedLogging) {
                                LOG.warn("expected header" + HFileBlock.toStringHeader(bufExpected)
                                        + "\nfound    header" + HFileBlock.toStringHeader(bufRead));
                                LOG.warn("bufread offset " + bufRead.arrayOffset() + " limit " + bufRead.limit()
                                        + " expected offset " + bufExpected.arrayOffset() + " limit "
                                        + bufExpected.limit());
                                LOG.warn(wrongBytesMsg);
                            }
                        }
                        assertTrue(wrongBytesMsg, bytesAreCorrect);
                    }
                }

                assertEquals(curOffset, fs.getFileStatus(path).getLen());
                is.close();
            }
        }
    }
}

From source file:org.apache.nifi.processors.standard.TestPutSQL.java

private String fixedSizeByteArrayAsASCIIString(int length) {
    byte[] bBinary = RandomUtils.nextBytes(length);
    ByteBuffer bytes = ByteBuffer.wrap(bBinary);
    StringBuffer sbBytes = new StringBuffer();
    for (int i = bytes.position(); i < bytes.limit(); i++)
        sbBytes.append((char) bytes.get(i));

    return sbBytes.toString();
}

From source file:dk.statsbiblioteket.util.LineReaderTest.java

public void testNIO() throws Exception {
    byte[] INITIAL = new byte[] { 1, 2, 3, 4 };
    byte[] EXTRA = new byte[] { 5, 6, 7, 8 };
    byte[] FULL = new byte[] { 1, 2, 3, 4, 5, 6, 7, 8 };
    byte[] FIFTH = new byte[] { 87 };
    byte[] FULL_WITH_FIFTH = new byte[] { 1, 2, 3, 4, 87, 6, 7, 8 };

    // Create temp-file with content
    File temp = createTempFile();
    FileOutputStream fileOut = new FileOutputStream(temp, true);
    fileOut.write(INITIAL);//  w  w  w.j  a  va2 s  . c  om
    fileOut.close();

    checkContent("The plain test-file should be correct", temp, INITIAL);
    {
        // Read the 4 bytes
        RandomAccessFile input = new RandomAccessFile(temp, "r");
        FileChannel channelIn = input.getChannel();
        ByteBuffer buffer = ByteBuffer.allocate(4096);
        channelIn.position(0);
        assertEquals("Buffer read should read full length", INITIAL.length, channelIn.read(buffer));
        buffer.position(0);

        checkContent("Using buffer should produce the right bytes", INITIAL, buffer);
        channelIn.close();
        input.close();
    }
    {
        // Fill new buffer
        ByteBuffer outBuffer = ByteBuffer.allocate(4096);
        outBuffer.put(EXTRA);
        outBuffer.flip();
        assertEquals("The limit of the outBuffer should be correct", EXTRA.length, outBuffer.limit());

        // Append new buffer to end
        RandomAccessFile output = new RandomAccessFile(temp, "rw");
        FileChannel channelOut = output.getChannel();
        channelOut.position(INITIAL.length);
        assertEquals("All bytes should be written", EXTRA.length, channelOut.write(outBuffer));
        channelOut.close();
        output.close();
        checkContent("The resulting file should have the full output", temp, FULL);
    }

    {
        // Fill single byte buffer
        ByteBuffer outBuffer2 = ByteBuffer.allocate(4096);
        outBuffer2.put(FIFTH);
        outBuffer2.flip();
        assertEquals("The limit of the second outBuffer should be correct", FIFTH.length, outBuffer2.limit());

        // Insert byte in the middle
        RandomAccessFile output2 = new RandomAccessFile(temp, "rw");
        FileChannel channelOut2 = output2.getChannel();
        channelOut2.position(4);
        assertEquals("The FIFTH should be written", FIFTH.length, channelOut2.write(outBuffer2));
        channelOut2.close();
        output2.close();
        checkContent("The resulting file with fifth should be complete", temp, FULL_WITH_FIFTH);
    }
}

From source file:de.rwhq.btree.LeafNode.java

public void prependEntriesFromOtherPage(final LeafNode<K, V> source, final int num) {
    // checks/*ww w  . j a  va  2  s .  c o m*/
    if (num < 0)
        throw new IllegalArgumentException("num must be > 0");

    if (num > source.getNumberOfEntries())
        throw new IllegalArgumentException("the source leaf has not enough entries");

    if (getNumberOfEntries() + num > maxEntries)
        throw new IllegalArgumentException(
                "not enough space in this leaf to prepend " + num + " entries from other leaf");

    if (getNumberOfEntries() > 0 && comparator.compare(source.getLastLeafKey(), getFirstLeafKey()) > 0) {
        throw new IllegalStateException(
                "the last key of the provided source leaf is larger than this leafs first key");
    }

    final ByteBuffer buffer = rawPage().bufferForWriting(0);

    // make space in this leaf, move all elements to the right
    final int totalSize = num * (keySerializer.getSerializedLength() + valueSerializer.getSerializedLength());
    final int byteToMove = buffer.limit() - Header.size() - totalSize;
    System.arraycopy(buffer.array(), Header.size(), buffer.array(), Header.size() + totalSize, byteToMove);

    // copy from other to us
    final int sourceOffset = source.getOffsetForKeyPos(source.getNumberOfEntries() - num);
    System.arraycopy(source.rawPage().bufferForWriting(0).array(), sourceOffset, buffer.array(), Header.size(),
            totalSize);

    // update headers, also sets modified
    source.setNumberOfEntries(source.getNumberOfEntries() - num);
    setNumberOfEntries(getNumberOfEntries() + num);
}

From source file:com.ery.ertc.estorm.util.Bytes.java

/**
 * Converts the given byte buffer to a printable representation, from the
 * index 0 (inclusive) to the limit (exclusive), regardless of the current
 * position. The position and the other index parameters are not changed.
 * //  ww w.j av a 2  s  .c  o  m
 * @param buf
 *            a byte buffer
 * @return a string representation of the buffer's binary contents
 * @see #toBytes(ByteBuffer)
 * @see #getBytes(ByteBuffer)
 */
public static String toStringBinary(ByteBuffer buf) {
    if (buf == null)
        return "null";
    if (buf.hasArray()) {
        return toStringBinary(buf.array(), buf.arrayOffset(), buf.limit());
    }
    return toStringBinary(toBytes(buf));
}

From source file:ome.io.nio.RomioPixelBuffer.java

/**
 * Implemented as specified by {@link PixelBuffer} I/F.
 * @see PixelBuffer#setTimepoint(ByteBuffer, Integer)
*/// w w w.  j  ava2  s.co m
public void setTimepoint(ByteBuffer buffer, Integer t) throws IOException, DimensionsOutOfBoundsException {
    throwIfReadOnly();
    Long offset = getTimepointOffset(t);
    Integer size = getTimepointSize();
    if (buffer.limit() != size) {
        // Handle the size mismatch.
        if (buffer.limit() < size)
            throw new BufferUnderflowException();
        throw new BufferOverflowException();
    }

    setRegion(size, offset, buffer);
}

From source file:org.apache.hadoop.hbase.io.hfile.LruBlockCache.java

private int compare(Cacheable left, Cacheable right) {
    ByteBuffer l = ByteBuffer.allocate(left.getSerializedLength());
    left.serialize(l);/* w w  w . j a va2  s  .co  m*/
    ByteBuffer r = ByteBuffer.allocate(right.getSerializedLength());
    right.serialize(r);
    return Bytes.compareTo(l.array(), l.arrayOffset(), l.limit(), r.array(), r.arrayOffset(), r.limit());
}

From source file:org.apache.hadoop.hbase.io.hfile.TestHFilePerformance.java

public void timeReading(String fileType, int keyLength, int valueLength, long rows, int method)
        throws IOException {
    System.out.println("Reading file of type: " + fileType);
    Path path = new Path(ROOT_DIR, fileType + ".Performance");
    System.out.println("Input file size: " + fs.getFileStatus(path).getLen());
    long totalBytesRead = 0;

    ByteBuffer val;

    ByteBuffer key;

    startTime();//from w  w  w . ja va 2 s . c o  m
    FSDataInputStream fin = fs.open(path);

    if ("HFile".equals(fileType)) {
        HFile.Reader reader = HFile.createReaderFromStream(path, fs.open(path), fs.getFileStatus(path).getLen(),
                new CacheConfig(conf), conf);
        reader.loadFileInfo();
        switch (method) {

        case 0:
        case 1:
        default: {
            HFileScanner scanner = reader.getScanner(false, false);
            scanner.seekTo();
            for (long l = 0; l < rows; l++) {
                key = scanner.getKey();
                val = scanner.getValue();
                totalBytesRead += key.limit() + val.limit();
                scanner.next();
            }
        }
            break;
        }
        reader.close();
    } else if ("SequenceFile".equals(fileType)) {

        SequenceFile.Reader reader;
        reader = new SequenceFile.Reader(fs, path, new Configuration());

        if (reader.getCompressionCodec() != null) {
            printlnWithTimestamp("Compression codec class: " + reader.getCompressionCodec().getClass());
        } else
            printlnWithTimestamp("Compression codec class: " + "none");

        BytesWritable keyBsw = new BytesWritable();
        BytesWritable valBsw = new BytesWritable();

        for (long l = 0; l < rows; l++) {
            reader.next(keyBsw, valBsw);
            totalBytesRead += keyBsw.getSize() + valBsw.getSize();
        }
        reader.close();

        //TODO make a tests for other types of SequenceFile reading scenarios

    } else {
        throw new IOException("File Type not supported.");
    }

    //printlnWithTimestamp("Closing reader");
    fin.close();
    stopTime();
    //printlnWithTimestamp("Finished close");

    printlnWithTimestamp("Finished in " + getIntervalMillis() + "ms");
    printlnWithTimestamp("Data read: ");
    printlnWithTimestamp("  rate  = " + totalBytesRead / getIntervalMillis() * 1000 / 1024 / 1024 + "MB/s");
    printlnWithTimestamp("  total = " + totalBytesRead + "B");

    printlnWithTimestamp("File read: ");
    printlnWithTimestamp(
            "  rate  = " + fs.getFileStatus(path).getLen() / getIntervalMillis() * 1000 / 1024 / 1024 + "MB/s");
    printlnWithTimestamp("  total = " + fs.getFileStatus(path).getLen() + "B");

    //TODO uncomment this for final committing so test files is removed.
    //fs.delete(path, true);
}

From source file:de.rwhq.btree.InnerNode.java

/**
 * @param serializedKey// ww w.j av a2  s  .com
 * @param pageId
 * @param posOfKeyForInsert
 */
private void insertKeyPointerPageIdAtPosition(final byte[] serializedKey, final Integer pageId,
        final int posOfKeyForInsert) {

    final KeyStruct thisKeyStruct = new KeyStruct(posOfKeyForInsert);
    final ByteBuffer buf = rawPage().bufferForWriting(thisKeyStruct.getOffset());

    final int spaceNeededForInsert = getSizeOfPageId() + keySerializer.getSerializedLength();
    System.arraycopy(buf.array(), buf.position(), buf.array(), buf.position() + spaceNeededForInsert,
            buf.limit() - buf.position() - spaceNeededForInsert);

    buf.put(serializedKey);
    buf.putInt(pageId);

    setNumberOfKeys(getNumberOfKeys() + 1);
    rawPage().sync();
}