Example usage for java.nio ByteBuffer capacity

List of usage examples for java.nio ByteBuffer capacity

Introduction

In this page you can find the example usage for java.nio ByteBuffer capacity.

Prototype

public final int capacity() 

Source Link

Document

Returns the capacity of this buffer.

Usage

From source file:org.getspout.spoutapi.packet.PacketAddonData.java

public PacketAddonData(AddonPacket packet) {
    this.packet = packet;
    SpoutOutputStream stream = new SpoutOutputStream();
    try {/*from   ww w .j a  va 2  s .c  o m*/
        packet.write(stream);
    } catch (Exception e) {
    }
    ByteBuffer buffer = stream.getRawBuffer();
    data = new byte[buffer.capacity() - buffer.remaining()];
    System.arraycopy(buffer.array(), 0, data, 0, data.length);
    needsCompression = data.length > 512;
}

From source file:org.apache.hadoop.hbase.io.BoundedByteBufferPool.java

public void putBuffer(ByteBuffer bb) {
    // If buffer is larger than we want to keep around, just let it go.
    if (bb.capacity() > this.maxByteBufferSizeToCache)
        return;// w w  w . ja  v  a  2  s.  co  m
    boolean success = false;
    int average = 0;
    lock.lock();
    try {
        success = this.buffers.offer(bb);
        if (success) {
            this.totalReservoirCapacity += bb.capacity();
            average = this.totalReservoirCapacity / this.buffers.size(); // size will never be 0.
        }
    } finally {
        lock.unlock();
    }
    if (!success) {
        LOG.warn("At capacity: " + this.buffers.size());
    } else {
        if (average > this.runningAverage && average < this.maxByteBufferSizeToCache) {
            this.runningAverage = average;
        }
    }
}

From source file:org.apache.tajo.storage.thirdparty.orc.ByteBufferAllocatorPool.java

public void putBuffer(ByteBuffer buffer) {
    TreeMap<Key, ByteBuffer> tree = getBufferTree(buffer.isDirect());
    while (true) {
        Key key = new Key(buffer.capacity(), currentGeneration++);
        if (!tree.containsKey(key)) {
            tree.put(key, buffer);//from  w  w w.ja  v  a2 s  .  c  o m
            return;
        }
        // Buffers are indexed by (capacity, generation).
        // If our key is not unique on the first try, we try again
    }
}

From source file:net.jradius.freeradius.FreeRadiusListener.java

public JRadiusEvent parseRequest(ListenerRequest listenerRequest, ByteBuffer notUsed, InputStream in)
        throws Exception {
    FreeRadiusRequest request = (FreeRadiusRequest) requestObjectPool.borrowObject();
    request.setBorrowedFromPool(requestObjectPool);

    int totalLength = (int) (RadiusFormat.readUnsignedInt(in) - 4);
    int readOffset = 0;

    ByteBuffer buffer = request.buffer_in;

    if (totalLength < 0 || totalLength > buffer.capacity()) {
        return null;
    }/*w ww  .j a  va  2s  .co m*/

    buffer.clear();
    byte[] payload = buffer.array();

    while (readOffset < totalLength) {
        int result = in.read(payload, readOffset, totalLength - readOffset);
        if (result < 0)
            return null;
        readOffset += result;
    }

    buffer.limit(totalLength);

    long nameLength = RadiusFormat.getUnsignedInt(buffer);

    if (nameLength < 0 || nameLength > 1024) {
        throw new RadiusException("KeepAlive rlm_jradius connection has been closed");
    }

    byte[] nameBytes = new byte[(int) nameLength];
    buffer.get(nameBytes);

    int messageType = RadiusFormat.getUnsignedByte(buffer);
    int packetCount = RadiusFormat.getUnsignedByte(buffer);

    RadiusPacket rp[] = PacketFactory.parse(buffer, packetCount);

    long length = RadiusFormat.getUnsignedInt(buffer);

    if (length > buffer.remaining()) {
        throw new RadiusException("bad length");
    }

    AttributeList configItems = new AttributeList();
    format.unpackAttributes(configItems, buffer, (int) length, true);

    request.setConfigItems(configItems);
    request.setSender(new String(nameBytes));
    request.setType(messageType);
    request.setPackets(rp);

    return request;
}

From source file:org.apache.hadoop.io.ElasticByteBufferPool.java

@Override
public synchronized void putBuffer(ByteBuffer buffer) {
    TreeMap<Key, ByteBuffer> tree = getBufferTree(buffer.isDirect());
    while (true) {
        Key key = new Key(buffer.capacity(), System.nanoTime());
        if (!tree.containsKey(key)) {
            tree.put(key, buffer);// www.j a  va2s.  co m
            return;
        }
        // Buffers are indexed by (capacity, time).
        // If our key is not unique on the first try, we try again, since the
        // time will be different.  Since we use nanoseconds, it's pretty
        // unlikely that we'll loop even once, unless the system clock has a
        // poor granularity.
    }
}

From source file:voldemort.common.nio.ByteBufferBackedInputStream.java

public ByteBufferBackedInputStream(ByteBuffer buffer, MutableLong sizeTracker) {
    this.buffer = buffer;
    this.sizeTracker = sizeTracker;
    if (buffer != null)
        this.sizeTracker.add(buffer.capacity());
}

From source file:org.apache.hadoop.ipc.ServerRpcSSLEngineImpl.java

private ByteBuffer enlargeUnwrappedBuffer(ByteBuffer buffer, byte missedByte) {
    buffer.flip();/*from  www . ja  v a  2s. c om*/
    ByteBuffer newBuffer = ByteBuffer.allocate(Math.min(buffer.capacity() * 2, maxUnWrappedDataLength));
    newBuffer.put(buffer);
    newBuffer.put(missedByte);
    buffer = null;
    return newBuffer;
}

From source file:com.act.lcms.v2.fullindex.BuilderTest.java

@Test
public void testAppendOrRealloc() throws Exception {
    ByteBuffer dest = ByteBuffer.allocate(4);
    assertEquals("Initial buffer capacity matches expected", 4, dest.capacity());
    dest = Utils.appendOrRealloc(dest, ByteBuffer.wrap(new byte[] { 'a', 'b', 'c', 'd' })); // No need to flip w/ wrap().
    assertEquals("Post-append (fits) buffer capacity matches expected", 4, dest.capacity());
    assertEquals("Post-append (fits) buffer position matches expected", 4, dest.position());
    dest = Utils.appendOrRealloc(dest, ByteBuffer.wrap(new byte[] { 'e' }));
    assertEquals("Post-append (too large) buffer capacity has doubled", 8, dest.capacity());
    assertEquals("Post-append (too large) buffer position matches expected", 5, dest.position());
    dest = Utils.appendOrRealloc(dest, ByteBuffer.wrap(new byte[] { 'f', 'g', 'h' }));
    assertEquals("Post-append (fits) buffer capacity matches expected", 8, dest.capacity());
    assertEquals("Post-append (fits) buffer position matches expected", 8, dest.position());
    dest = Utils.appendOrRealloc(dest, ByteBuffer.wrap(new byte[] { 'i' }));
    assertEquals("Post-append (too large) buffer capacity has doubled", 16, dest.capacity());
    assertEquals("Post-append (too large) buffer position matches expected", 9, dest.position());

}

From source file:org.getspout.spout.packet.PacketAddonData.java

public PacketAddonData(AddonPacket packet) {
    this.packet = packet;
    SpoutOutputStream stream = new SpoutOutputStream();

    boolean sandboxed = SpoutClient.isSandboxed();
    SpoutClient.enableSandbox();//from  w  ww. j a va  2s.  co  m
    try {
        packet.write(stream);
    } catch (Exception e) {
        e.printStackTrace();
    }
    if (!sandboxed) {
        SpoutClient.disableSandbox();
    }

    ByteBuffer buffer = stream.getRawBuffer();
    byte[] raw = new byte[buffer.capacity() - buffer.remaining()];
    for (int i = 0; i < raw.length; i++) {
        raw[i] = buffer.get(i);
    }
    data = raw;
    needsCompression = data.length > 512;
}

From source file:pl.allegro.tech.hermes.consumers.consumer.sender.http.ByteBufferEntityTest.java

@Test
public void testBasics() throws Exception {
    final ByteBuffer bytes = ByteBuffer.wrap("Message content".getBytes(Consts.ASCII));
    final ByteBufferEntity httpentity = new ByteBufferEntity(bytes);

    Assert.assertEquals(bytes.capacity(), httpentity.getContentLength());
    Assert.assertNotNull(httpentity.getContent());
    Assert.assertTrue(httpentity.isRepeatable());
    Assert.assertFalse(httpentity.isStreaming());
}