Example usage for java.nio ByteBuffer capacity

List of usage examples for java.nio ByteBuffer capacity

Introduction

In this page you can find the example usage for java.nio ByteBuffer capacity.

Prototype

public final int capacity() 

Source Link

Document

Returns the capacity of this buffer.

Usage

From source file:com.offbynull.portmapper.natpmp.NatPmpReceiver.java

/**
 * Start listening for NAT-PMP events. This method blocks until {@link #stop() } is called.
 * @param listener listener to notify of events
 * @throws IOException if socket error occurs
 * @throws NullPointerException if any argument is {@code null}
 *//*from w  ww  . ja  v  a2  s .  co  m*/
public void start(NatPmpEventListener listener) throws IOException {
    Validate.notNull(listener);

    MulticastSocket socket = null;
    try {
        final InetAddress group = InetAddress.getByName("224.0.0.1"); // NOPMD
        final int port = 5350;
        final InetSocketAddress groupAddress = new InetSocketAddress(group, port);

        socket = new MulticastSocket(port);

        if (!currentSocket.compareAndSet(null, socket)) {
            IOUtils.closeQuietly(socket);
            return;
        }

        socket.setReuseAddress(true);

        Enumeration<NetworkInterface> interfaces = NetworkInterface.getNetworkInterfaces();
        while (interfaces.hasMoreElements()) {
            NetworkInterface networkInterface = interfaces.nextElement();
            Enumeration<InetAddress> addrs = networkInterface.getInetAddresses();
            while (addrs.hasMoreElements()) { // make sure atleast 1 ipv4 addr bound to interface
                InetAddress addr = addrs.nextElement();

                try {
                    if (addr instanceof Inet4Address) {
                        socket.joinGroup(groupAddress, networkInterface);
                    }
                } catch (IOException ioe) { // NOPMD
                    // occurs with certain interfaces
                    // do nothing
                }
            }
        }

        ByteBuffer buffer = ByteBuffer.allocate(12);
        DatagramPacket data = new DatagramPacket(buffer.array(), buffer.capacity());

        while (true) {
            buffer.clear();
            socket.receive(data);
            buffer.position(data.getLength());
            buffer.flip();

            if (!data.getAddress().equals(gatewayAddress)) { // data isn't from our gateway, ignore
                continue;
            }

            if (buffer.remaining() != 12) { // data isn't the expected size, ignore
                continue;
            }

            int version = buffer.get(0);
            if (version != 0) { // data doesn't have the correct version, ignore
                continue;
            }

            int opcode = buffer.get(1) & 0xFF;
            if (opcode != 128) { // data doesn't have the correct op, ignore
                continue;
            }

            int resultCode = buffer.getShort(2) & 0xFFFF;
            switch (resultCode) {
            case 0:
                break;
            default:
                continue; // data doesn't have a successful result, ignore
            }

            listener.publicAddressUpdated(new ExternalAddressNatPmpResponse(buffer));
        }

    } catch (IOException ioe) {
        if (currentSocket.get() == null) {
            return; // ioexception caused by interruption/stop, so just return without propogating error up
        }

        throw ioe;
    } finally {
        IOUtils.closeQuietly(socket);
        currentSocket.set(null);
    }
}

From source file:cn.ac.ncic.mastiff.io.coding.DeltaBinaryBitPackingZigZarIntReader.java

public void ensureDecompress() throws IOException {
    org.apache.hadoop.io.compress.Decompressor decompressor = this.compressAlgo.getDecompressor();
    InputStream is = this.compressAlgo.createDecompressionStream(inBuf, decompressor, 0);
    ByteBuffer buf = ByteBuffer.allocate(decompressedSize);
    IOUtils.readFully(is, buf.array(), 0, buf.capacity());
    is.close();/*from  www. j a v  a  2s .  co  m*/
    this.compressAlgo.returnDecompressor(decompressor);
    inBuf.reset(buf.array(), offset, buf.capacity());
}

From source file:jext2.SymlinkInode.java

@NotThreadSafe(useLock = true)
private void writeSlowSymlink(String link, int size) throws JExt2Exception, NoSpaceLeftOnDevice, FileTooLarge {
    ByteBuffer buf = ByteBuffer.allocate(Ext2fsDataTypes.getStringByteLength(link));
    Ext2fsDataTypes.putString(buf, link, buf.capacity(), 0);
    buf.rewind();//from w w w . j  ava 2s.co  m
    writeData(buf, 0);
}

From source file:com.linkedin.haivvreo.AvroDeserializer.java

private Object deserializeList(Object datum, Schema recordSchema, ListTypeInfo columnType)
        throws HaivvreoException {
    // Need to check the original schema to see if this is actually a Fixed.
    if (recordSchema.getType().equals(Schema.Type.FIXED)) {
        // We're faking out Hive to work through a type system impedence mismatch.  Pull out the backing array and convert to a list.
        GenericData.Fixed fixed = (GenericData.Fixed) datum;
        List<Byte> asList = new ArrayList<Byte>(fixed.bytes().length);
        for (int j = 0; j < fixed.bytes().length; j++) {
            asList.add(fixed.bytes()[j]);
        }/*from w  ww.j  av a  2s  . com*/
        return asList;
    } else if (recordSchema.getType().equals(Schema.Type.BYTES)) {
        // This is going to be slow... hold on.
        ByteBuffer bb = (ByteBuffer) datum;
        List<Byte> asList = new ArrayList<Byte>(bb.capacity());
        byte[] array = bb.array();
        for (int j = 0; j < array.length; j++) {
            asList.add(array[j]);
        }
        return asList;
    } else { // An actual list, deser its values
        List listData = (List) datum;
        Schema listSchema = recordSchema.getElementType();
        List<Object> listContents = new ArrayList<Object>(listData.size());
        for (Object obj : listData) {
            listContents.add(worker(obj, listSchema, columnType.getListElementTypeInfo()));
        }
        return listContents;
    }
}

From source file:cn.ac.ncic.mastiff.io.coding.DeltaBinaryArrayZigZarByteReader.java

public void ensureDecompress() throws IOException {
    //  if (compressAlgo != null) {
    org.apache.hadoop.io.compress.Decompressor decompressor = this.compressAlgo.getDecompressor();
    InputStream is = this.compressAlgo.createDecompressionStream(inBuf, decompressor, 0);
    ByteBuffer buf = ByteBuffer.allocate(decompressedSize);
    IOUtils.readFully(is, buf.array(), 0, buf.capacity());
    is.close();//from   www .java2  s.  c  o  m
    this.compressAlgo.returnDecompressor(decompressor);
    inBuf.reset(buf.array(), offset, buf.capacity());
}

From source file:com.serenegiant.media.TLMediaEncoder.java

/**
 * read raw bit stream from specific intermediate file
 * @param in//  w ww. j a  v  a  2  s.c o  m
 * @param header
 * @param buffer
 * @param readBuffer
 * @throws IOException
 * @throws BufferOverflowException
 */
/*package*/static ByteBuffer readStream(final DataInputStream in, final TLMediaFrameHeader header,
        ByteBuffer buffer, final byte[] readBuffer) throws IOException {

    readHeader(in, header);
    if ((buffer == null) || header.size > buffer.capacity()) {
        buffer = ByteBuffer.allocateDirect(header.size);
    }
    buffer.clear();
    final int max_bytes = Math.min(readBuffer.length, header.size);
    int read_bytes;
    for (int i = header.size; i > 0; i -= read_bytes) {
        read_bytes = in.read(readBuffer, 0, Math.min(i, max_bytes));
        if (read_bytes <= 0)
            break;
        buffer.put(readBuffer, 0, read_bytes);
    }
    buffer.flip();
    return buffer;
}

From source file:com.cloudera.recordbreaker.hive.borrowed.AvroDeserializer.java

private Object deserializeList(Object datum, Schema recordSchema, ListTypeInfo columnType)
        throws AvroSerdeException {
    // Need to check the original schema to see if this is actually a Fixed.
    if (recordSchema.getType().equals(Schema.Type.FIXED)) {
        // We're faking out Hive to work through a type system impedence mismatch.
        // Pull out the backing array and convert to a list.
        GenericData.Fixed fixed = (GenericData.Fixed) datum;
        List<Byte> asList = new ArrayList<Byte>(fixed.bytes().length);
        for (int j = 0; j < fixed.bytes().length; j++) {
            asList.add(fixed.bytes()[j]);
        }// w  ww .j  a  v  a  2s .  co  m
        return asList;
    } else if (recordSchema.getType().equals(Schema.Type.BYTES)) {
        // This is going to be slow... hold on.
        ByteBuffer bb = (ByteBuffer) datum;
        List<Byte> asList = new ArrayList<Byte>(bb.capacity());
        byte[] array = bb.array();
        for (int j = 0; j < array.length; j++) {
            asList.add(array[j]);
        }
        return asList;
    } else { // An actual list, deser its values
        List listData = (List) datum;
        Schema listSchema = recordSchema.getElementType();
        List<Object> listContents = new ArrayList<Object>(listData.size());
        for (Object obj : listData) {
            listContents.add(worker(obj, listSchema, columnType.getListElementTypeInfo()));
        }
        return listContents;
    }
}

From source file:cn.ac.ncic.mastiff.io.coding.RunLengthEncodingByteReader.java

public void ensureDecompress() throws IOException {
    org.apache.hadoop.io.compress.Decompressor decompressor = this.compressAlgo.getDecompressor();
    InputStream is = this.compressAlgo.createDecompressionStream(inBuf, decompressor, 0);
    ByteBuffer buf = ByteBuffer.allocate(decompressedSize);
    IOUtils.readFully(is, buf.array(), 0, buf.capacity());
    is.close();/*from ww w.j  a v a  2 s. c  o m*/
    this.compressAlgo.returnDecompressor(decompressor);
    inBuf.reset(buf.array(), offset, buf.capacity());

}

From source file:org.apache.hadoop.ipc.RpcSSLEngineAbstr.java

private ByteBuffer enlargeBuffer(ByteBuffer buffer, int sessionProposedCapacity) {
    if (sessionProposedCapacity > buffer.capacity()) {
        return ByteBuffer.allocate(sessionProposedCapacity);
    } else {// w  w w  .  jav  a 2  s  .co  m
        return ByteBuffer.allocate(buffer.capacity() * 2);
    }
}

From source file:org.cloudata.core.commitlog.pipe.BufferPool.java

public void returnBuffer(ByteBuffer[] bufferArray) {
    synchronized (bufferMap) {
        for (ByteBuffer buf : bufferArray) {
            buf.clear();//from   w  ww  .  ja v  a 2  s  . c  o  m
            TreeSet<PoolEntry> entrySet = bufferMap.get(buf.capacity());
            if (entrySet == null) {
                entrySet = new TreeSet<PoolEntry>();
                bufferMap.put(buf.capacity(), entrySet);
            }
            entrySet.add(new PoolEntry(buf));
            poolMonitor.increaseBuffered(buf.capacity());
        }
    }
}