Example usage for io.netty.buffer ByteBuf getInt

List of usage examples for io.netty.buffer ByteBuf getInt

Introduction

In this page you can find the example usage for io.netty.buffer ByteBuf getInt.

Prototype

public abstract int getInt(int index);

Source Link

Document

Gets a 32-bit integer at the specified absolute index in this buffer.

Usage

From source file:io.nebo.thrift.DefaultThriftFrameDecoder.java

License:Apache License

protected ByteBuf tryDecodeFramedMessage(ChannelHandlerContext ctx, Channel channel, ByteBuf buffer,
        boolean stripFraming) {
    // Framed messages are prefixed by the size of the frame (which doesn't include the
    // framing itself).

    int messageStartReaderIndex = buffer.readerIndex();
    int messageContentsOffset;

    if (stripFraming) {
        messageContentsOffset = messageStartReaderIndex + MESSAGE_FRAME_SIZE;
    } else {/*from ww w . j  a v a2s  .c  om*/
        messageContentsOffset = messageStartReaderIndex;
    }

    // The full message is larger by the size of the frame size prefix
    int messageLength = buffer.getInt(messageStartReaderIndex) + MESSAGE_FRAME_SIZE;
    int messageContentsLength = messageStartReaderIndex + messageLength - messageContentsOffset;

    if (messageContentsLength > maxFrameSize) {
        throw new TooLongFrameException("Maximum frame size of " + maxFrameSize + " exceeded");
    }

    if (messageLength == 0) {
        // Zero-sized frame: just ignore it and return nothing
        buffer.readerIndex(messageContentsOffset);
        return null;
    } else if (buffer.readableBytes() < messageLength) {
        // Full message isn't available yet, return nothing for now
        return null;
    } else {
        // Full message is available, return it
        ByteBuf messageBuffer = extractFrame(buffer, messageContentsOffset, messageContentsLength);
        buffer.readerIndex(messageStartReaderIndex + messageLength);
        return messageBuffer;
    }
}

From source file:io.reactiverse.pgclient.impl.codec.DataTypeCodec.java

License:Apache License

private static Integer binaryDecodeINT4(int index, int len, ByteBuf buff) {
    return buff.getInt(index);
}

From source file:io.reactiverse.pgclient.impl.codec.DataTypeCodec.java

License:Apache License

private static LocalDate binaryDecodeDATE(int index, int len, ByteBuf buff) {
    return LOCAL_DATE_EPOCH.plus(buff.getInt(index), ChronoUnit.DAYS);
}

From source file:io.reactiverse.pgclient.impl.codec.DataTypeCodec.java

License:Apache License

private static OffsetTime binaryDecodeTIMETZ(int index, int len, ByteBuf buff) {
    // micros to nanos
    return OffsetTime.of(LocalTime.ofNanoOfDay(buff.getLong(index) * 1000),
            // zone offset in seconds (should we change it to UTC ?)
            ZoneOffset.ofTotalSeconds(-buff.getInt(index + 8)));
}

From source file:io.reactiverse.pgclient.impl.codec.DataTypeCodec.java

License:Apache License

private static Interval binaryDecodeINTERVAL(int index, int len, ByteBuf buff) {
    Duration duration = Duration.of(buff.getLong(index), ChronoUnit.MICROS);
    final long hours = duration.toHours();
    duration = duration.minusHours(hours);
    final long minutes = duration.toMinutes();
    duration = duration.minusMinutes(minutes);
    final long seconds = NANOSECONDS.toSeconds(duration.toNanos());
    duration = duration.minusSeconds(seconds);
    final long microseconds = NANOSECONDS.toMicros(duration.toNanos());
    int days = buff.getInt(index + 8);
    int months = buff.getInt(index + 12);
    Period monthYear = Period.of(0, months, days).normalized();
    return new Interval(monthYear.getYears(), monthYear.getMonths(), monthYear.getDays(), (int) hours,
            (int) minutes, (int) seconds, (int) microseconds);
}

From source file:io.reactiverse.pgclient.impl.codec.DataTypeCodec.java

License:Apache License

private static <T> T[] binaryDecodeArray(IntFunction<T[]> supplier, DataType type, int index, int len,
        ByteBuf buff) {
    if (len == 12) {
        return supplier.apply(0);
    }//from   w ww . ja  v  a2  s  . c om
    int dim = buff.getInt(index); // read ndim
    index += 4;
    index += 4; // skip dataoffset
    index += 4; // skip elemtype
    int length = buff.getInt(index); // read dimensions
    index += 4;
    index += 4; // skip lower bnds
    if (dim != 1) {
        logger.warn("Only arrays of dimension 1 are supported");
        return null;
    }
    T[] array = supplier.apply(length);
    for (int i = 0; i < array.length; i++) {
        int l = buff.getInt(index);
        index += 4;
        if (l != -1) {
            array[i] = (T) decodeBinary(type, index, l, buff);
            index += l;
        }
    }
    return array;
}

From source file:net.tomp2p.storage.Data.java

License:Apache License

/**
 * Reads the header. Does not modify the buffer positions if header could
 * not be fully read.//from w w  w .  j ava2 s. c om
 * 
 * Header format:
 * <pre>
 * 1 byte - header
 * 1 or 4 bytes - length
 * 4 or 0 bytes - ttl (hasTTL)
 * 1 or 0 bytes - number of basedon keys (hasBasedOn)
 * n x 20 bytes - basedon keys (hasBasedOn, number of basedon keys)
 * 2 or 0 bytes - length of public key (hasPublicKey)
 * n bytes - public key (hasPublicKey, length of public key)
 * </pre>
 * 
 * 
 * @param buf
 *            The buffer to read from
 * @return The data object, may be partially filled
 */
public static Data decodeHeader(final ByteBuf buf, final SignatureFactory signatureFactory) {
    // 2 is the smallest packet size, we could start if we know 1 byte to
    // decode the header, but we always need
    // a second byte. Thus, we are waiting for at least 2 bytes.
    if (buf.readableBytes() < Utils.BYTE_BYTE_SIZE + Utils.BYTE_BYTE_SIZE) {
        return null;
    }
    final int header = buf.getUnsignedByte(buf.readerIndex());
    final Data.Type type = Data.type(header);

    //Data length
    final int length;
    final int indexLength = Utils.BYTE_BYTE_SIZE;
    final int indexTTL;
    switch (type) {
    case SMALL:
        length = buf.getUnsignedByte(buf.readerIndex() + indexLength);
        indexTTL = indexLength + Utils.BYTE_BYTE_SIZE;
        break;
    case LARGE:
        indexTTL = indexLength + Utils.INTEGER_BYTE_SIZE;
        if (buf.readableBytes() < indexTTL) {
            return null;
        }
        length = buf.getInt(buf.readerIndex() + indexLength);
        break;
    default:
        throw new IllegalArgumentException("unknown type");
    }

    //TTL
    final int ttl;
    final int indexBasedOnNr;
    if (hasTTL(header)) {
        indexBasedOnNr = indexTTL + Utils.INTEGER_BYTE_SIZE;
        if (buf.readableBytes() < indexBasedOnNr) {
            return null;
        }
        ttl = buf.getInt(buf.readerIndex() + indexTTL);
    } else {
        ttl = -1;
        indexBasedOnNr = indexTTL;
    }

    //Nr BasedOn + basedon
    final int numBasedOn;
    final int indexPublicKeySize;
    final int indexBasedOn;
    final Set<Number160> basedOn = new HashSet<Number160>();
    if (hasBasedOn(header)) {
        // get # of based on keys
        indexBasedOn = indexBasedOnNr + Utils.BYTE_BYTE_SIZE;
        if (buf.readableBytes() < indexBasedOn) {
            return null;
        }
        numBasedOn = buf.getUnsignedByte(buf.readerIndex() + indexBasedOnNr) + 1;
        indexPublicKeySize = indexBasedOn + (numBasedOn * Number160.BYTE_ARRAY_SIZE);
        if (buf.readableBytes() < indexPublicKeySize) {
            return null;
        }
        //get basedon
        int index = buf.readerIndex() + indexBasedOnNr + Utils.BYTE_BYTE_SIZE;
        final byte[] me = new byte[Number160.BYTE_ARRAY_SIZE];
        for (int i = 0; i < numBasedOn; i++) {
            buf.getBytes(index, me);
            index += Number160.BYTE_ARRAY_SIZE;
            basedOn.add(new Number160(me));
        }

    } else {
        // no based on keys
        indexPublicKeySize = indexBasedOnNr;
        numBasedOn = 0;
    }

    //public key and size
    final int publicKeySize;
    final int indexPublicKey;
    final int indexEnd;
    final PublicKey publicKey;
    if (hasPublicKey(header)) {
        indexPublicKey = indexPublicKeySize + Utils.SHORT_BYTE_SIZE;
        if (buf.readableBytes() < indexPublicKey) {
            return null;
        }
        publicKeySize = buf.getUnsignedShort(buf.readerIndex() + indexPublicKeySize);
        indexEnd = indexPublicKey + publicKeySize;
        if (buf.readableBytes() < indexEnd) {
            return null;
        }
        //get public key
        buf.skipBytes(indexPublicKeySize);
        publicKey = signatureFactory.decodePublicKey(buf);
    } else {
        publicKeySize = 0;
        indexPublicKey = indexPublicKeySize;
        buf.skipBytes(indexPublicKey);
        publicKey = null;
    }

    //now we have read the header and the length
    final Data data = new Data(header, length);
    data.ttlSeconds = ttl;
    data.basedOnSet = basedOn;
    data.publicKey = publicKey;
    return data;
}

From source file:org.apache.bookkeeper.bookie.storage.ldb.ReadCacheTest.java

License:Apache License

@Test
public void multipleSegments() {
    // Test with multiple smaller segments
    ReadCache cache = new ReadCache(UnpooledByteBufAllocator.DEFAULT, 10 * 1024, 2 * 1024);

    assertEquals(0, cache.count());//  w ww .j a v  a2s  .com
    assertEquals(0, cache.size());

    for (int i = 0; i < 10; i++) {
        ByteBuf entry = Unpooled.wrappedBuffer(new byte[1024]);
        entry.setInt(0, i);
        cache.put(1, i, entry);
    }

    for (int i = 0; i < 10; i++) {
        ByteBuf res = cache.get(1, i);
        assertEquals(1, res.refCnt());

        assertEquals(1024, res.readableBytes());
        assertEquals(i, res.getInt(0));
    }

    assertEquals(10, cache.count());
    assertEquals(10 * 1024, cache.size());

    // Putting one more entry, should trigger the 1st segment rollover
    ByteBuf entry = Unpooled.wrappedBuffer(new byte[1024]);
    cache.put(2, 0, entry);

    assertEquals(9, cache.count());
    assertEquals(9 * 1024, cache.size());

    cache.close();
}

From source file:org.apache.distributedlog.LogRecordSet.java

License:Apache License

public static int numRecords(LogRecord record) throws IOException {
    checkArgument(record.isRecordSet(), "record is not a recordset");
    ByteBuf buffer = record.getPayloadBuf();
    int metadata = buffer.getInt(METADATA_OFFSET);
    int version = (metadata & METADATA_VERSION_MASK);
    if (version != VERSION) {
        throw new IOException(String.format("Version mismatch while reading. Received: %d," + " Required: %d",
                version, VERSION));/*ww w.  j a va 2  s  .  co m*/
    }
    return buffer.getInt(COUNT_OFFSET);
}

From source file:org.apache.drill.common.util.DecimalUtility.java

License:Apache License

public static BigDecimal getBigDecimalFromByteBuf(ByteBuf data, int startIndex, int nDecimalDigits, int scale,
        boolean truncateScale) {

    // For sparse decimal type we have padded zeroes at the end, strip them while converting to BigDecimal.
    int actualDigits;

    // Initialize the BigDecimal, first digit in the ByteBuf has the sign so mask it out
    BigInteger decimalDigits = BigInteger.valueOf((data.getInt(startIndex)) & 0x7FFFFFFF);

    BigInteger base = BigInteger.valueOf(DIGITS_BASE);

    for (int i = 1; i < nDecimalDigits; i++) {

        BigInteger temp = BigInteger.valueOf(data.getInt(startIndex + (i * integerSize)));
        decimalDigits = decimalDigits.multiply(base);
        decimalDigits = decimalDigits.add(temp);
    }/*www . ja  v a 2  s  .c om*/

    // Truncate any additional padding we might have added
    if (truncateScale == true && scale > 0 && (actualDigits = scale % MAX_DIGITS) != 0) {
        BigInteger truncate = BigInteger.valueOf((int) Math.pow(10, (MAX_DIGITS - actualDigits)));
        decimalDigits = decimalDigits.divide(truncate);
    }

    // set the sign
    if ((data.getInt(startIndex) & 0x80000000) != 0) {
        decimalDigits = decimalDigits.negate();
    }

    BigDecimal decimal = new BigDecimal(decimalDigits, scale);

    return decimal;
}