Example usage for io.netty.buffer ByteBuf setInt

List of usage examples for io.netty.buffer ByteBuf setInt

Introduction

In this page you can find the example usage for io.netty.buffer ByteBuf setInt.

Prototype

public abstract ByteBuf setInt(int index, int value);

Source Link

Document

Sets the specified 32-bit integer at the specified absolute index in this buffer.

Usage

From source file:org.apache.arrow.memory.TestEndianess.java

License:Apache License

@Test
public void testLittleEndian() {
    final BufferAllocator a = new RootAllocator(10000);
    final ByteBuf b = a.buffer(4);
    b.setInt(0, 35);
    assertEquals(b.getByte(0), 35);//from ww w. j ava  2s . com
    assertEquals(b.getByte(1), 0);
    assertEquals(b.getByte(2), 0);
    assertEquals(b.getByte(3), 0);
    b.release();
    a.close();
}

From source file:org.apache.bookkeeper.bookie.storage.ldb.ReadCacheTest.java

License:Apache License

@Test
public void multipleSegments() {
    // Test with multiple smaller segments
    ReadCache cache = new ReadCache(UnpooledByteBufAllocator.DEFAULT, 10 * 1024, 2 * 1024);

    assertEquals(0, cache.count());/*from   ww w .  j a  v a 2  s  .c o  m*/
    assertEquals(0, cache.size());

    for (int i = 0; i < 10; i++) {
        ByteBuf entry = Unpooled.wrappedBuffer(new byte[1024]);
        entry.setInt(0, i);
        cache.put(1, i, entry);
    }

    for (int i = 0; i < 10; i++) {
        ByteBuf res = cache.get(1, i);
        assertEquals(1, res.refCnt());

        assertEquals(1024, res.readableBytes());
        assertEquals(i, res.getInt(0));
    }

    assertEquals(10, cache.count());
    assertEquals(10 * 1024, cache.size());

    // Putting one more entry, should trigger the 1st segment rollover
    ByteBuf entry = Unpooled.wrappedBuffer(new byte[1024]);
    cache.put(2, 0, entry);

    assertEquals(9, cache.count());
    assertEquals(9 * 1024, cache.size());

    cache.close();
}

From source file:org.apache.distributedlog.EnvelopedEntryWriter.java

License:Apache License

private ByteBuf finalizeBuffer() {
    if (!envelopeBeforeTransmit) {
        return buffer.retain();
    }/*from   ww  w  .  j  a  v a2 s  . c o m*/

    int dataOffset = HEADER_LENGTH;
    int dataLen = buffer.readableBytes() - HEADER_LENGTH;

    if (Type.NONE == codec) {
        // update version
        buffer.setByte(VERSION_OFFSET, CURRENT_VERSION);
        // update the flags
        buffer.setInt(FLAGS_OFFSET, flags);
        // update data len
        buffer.setInt(DECOMPRESSED_SIZE_OFFSET, dataLen);
        buffer.setInt(COMPRESSED_SIZE_OFFSET, dataLen);
        return buffer.retain();
    }

    // compression
    CompressionCodec compressor = CompressionUtils.getCompressionCodec(codec);
    ByteBuf uncompressedBuf = buffer.slice(dataOffset, dataLen);
    ByteBuf compressedBuf = compressor.compress(uncompressedBuf, HEADER_LENGTH);
    // update version
    compressedBuf.setByte(VERSION_OFFSET, CURRENT_VERSION);
    // update the flags
    compressedBuf.setInt(FLAGS_OFFSET, flags);
    // update data len
    compressedBuf.setInt(DECOMPRESSED_SIZE_OFFSET, dataLen);
    compressedBuf.setInt(COMPRESSED_SIZE_OFFSET, compressedBuf.readableBytes() - HEADER_LENGTH);
    return compressedBuf;
}

From source file:org.apache.distributedlog.EnvelopedRecordSetWriter.java

License:Apache License

ByteBuf createBuffer() {
    int dataOffset = HEADER_LEN;
    int dataLen = buffer.readableBytes() - HEADER_LEN;

    if (Type.NONE.code() == codecCode) {
        // update count
        buffer.setInt(COUNT_OFFSET, count);
        // update data len
        buffer.setInt(DECOMPRESSED_SIZE_OFFSET, dataLen);
        buffer.setInt(COMPRESSED_SIZE_OFFSET, dataLen);
        return buffer.retain();
    }//from   w  ww. ja v  a  2  s.co m

    // compression

    CompressionCodec compressor = CompressionUtils.getCompressionCodec(codec);
    ByteBuf uncompressedBuf = buffer.slice(dataOffset, dataLen);
    ByteBuf compressedBuf = compressor.compress(uncompressedBuf, HEADER_LEN);
    compressedBuf.setInt(METADATA_OFFSET, metadata);
    // update count
    compressedBuf.setInt(COUNT_OFFSET, count);
    // update data len
    compressedBuf.setInt(DECOMPRESSED_SIZE_OFFSET, dataLen);
    compressedBuf.setInt(COMPRESSED_SIZE_OFFSET, compressedBuf.readableBytes() - HEADER_LEN);

    return compressedBuf;
}

From source file:org.apache.drill.common.util.DecimalUtility.java

License:Apache License

public static void getSparseFromBigDecimal(BigDecimal input, ByteBuf data, int startIndex, int scale,
        int precision, int nDecimalDigits) {

    // Initialize the buffer
    for (int i = 0; i < nDecimalDigits; i++) {
        data.setInt(startIndex + (i * integerSize), 0);
    }/*  w w w .j  av a 2s  .  c  o  m*/

    boolean sign = false;

    if (input.signum() == -1) {
        // negative input
        sign = true;
        input = input.abs();
    }

    // Truncate the input as per the scale provided
    input = input.setScale(scale, BigDecimal.ROUND_HALF_UP);

    // Separate out the integer part
    BigDecimal integerPart = input.setScale(0, BigDecimal.ROUND_DOWN);

    int destIndex = nDecimalDigits - roundUp(scale) - 1;

    // we use base 1 billion integer digits for out integernal representation
    BigDecimal base = new BigDecimal(DIGITS_BASE);

    while (integerPart.compareTo(BigDecimal.ZERO) == 1) {
        // store the modulo as the integer value
        data.setInt(startIndex + (destIndex * integerSize), (integerPart.remainder(base)).intValue());
        destIndex--;
        // Divide by base 1 billion
        integerPart = (integerPart.divide(base)).setScale(0, BigDecimal.ROUND_DOWN);
    }

    /* Sparse representation contains padding of additional zeroes
     * so each digit contains MAX_DIGITS for ease of arithmetic
     */
    int actualDigits;
    if ((actualDigits = (scale % MAX_DIGITS)) != 0) {
        // Pad additional zeroes
        scale = scale + (MAX_DIGITS - actualDigits);
        input = input.setScale(scale, BigDecimal.ROUND_DOWN);
    }

    //separate out the fractional part
    BigDecimal fractionalPart = input.remainder(BigDecimal.ONE).movePointRight(scale);

    destIndex = nDecimalDigits - 1;

    while (scale > 0) {
        // Get next set of MAX_DIGITS (9) store it in the ByteBuf
        fractionalPart = fractionalPart.movePointLeft(MAX_DIGITS);
        BigDecimal temp = fractionalPart.remainder(BigDecimal.ONE);

        data.setInt(startIndex + (destIndex * integerSize), (temp.unscaledValue().intValue()));
        destIndex--;

        fractionalPart = fractionalPart.setScale(0, BigDecimal.ROUND_DOWN);
        scale -= MAX_DIGITS;
    }

    // Set the negative sign
    if (sign == true) {
        data.setInt(startIndex, data.getInt(startIndex) | 0x80000000);
    }

}

From source file:org.apache.drill.common.util.DecimalUtility.java

License:Apache License

public static void setInteger(ByteBuf buffer, int start, int index, int value) {
    buffer.setInt(start + (index * 4), value);
}

From source file:org.apache.drill.exec.memory.TestEndianess.java

License:Apache License

@Test
public void testLittleEndian() {
    final BufferAllocator a = new RootAllocator(DrillConfig.getMaxDirectMemory());
    final ByteBuf b = a.buffer(4);
    b.setInt(0, 35);
    assertEquals(b.getByte(0), 35);/*w w  w  . java  2  s  .c om*/
    assertEquals(b.getByte(1), 0);
    assertEquals(b.getByte(2), 0);
    assertEquals(b.getByte(3), 0);
    b.release();
    DrillAutoCloseables.closeNoChecked(a);
}

From source file:org.apache.drill.exec.util.DecimalUtility.java

License:Apache License

public static void getSparseFromBigDecimal(BigDecimal input, ByteBuf data, int startIndex, int scale,
        int precision, int nDecimalDigits) {

    // Initialize the buffer
    for (int i = 0; i < nDecimalDigits; i++) {
        data.setInt(startIndex + (i * INTEGER_SIZE), 0);
    }/*from w  w  w . java2  s  .c om*/

    boolean sign = false;

    if (input.signum() == -1) {
        // negative input
        sign = true;
        input = input.abs();
    }

    // Truncate the input as per the scale provided
    input = input.setScale(scale, BigDecimal.ROUND_HALF_UP);

    // Separate out the integer part
    BigDecimal integerPart = input.setScale(0, BigDecimal.ROUND_DOWN);

    int destIndex = nDecimalDigits - roundUp(scale) - 1;

    // we use base 1 billion integer digits for out integernal representation
    BigDecimal base = new BigDecimal(DIGITS_BASE);

    while (integerPart.compareTo(BigDecimal.ZERO) == 1) {
        // store the modulo as the integer value
        data.setInt(startIndex + (destIndex * INTEGER_SIZE), (integerPart.remainder(base)).intValue());
        destIndex--;
        // Divide by base 1 billion
        integerPart = (integerPart.divide(base)).setScale(0, BigDecimal.ROUND_DOWN);
    }

    /* Sparse representation contains padding of additional zeroes
     * so each digit contains MAX_DIGITS for ease of arithmetic
     */
    int actualDigits;
    if ((actualDigits = (scale % MAX_DIGITS)) != 0) {
        // Pad additional zeroes
        scale = scale + (MAX_DIGITS - actualDigits);
        input = input.setScale(scale, BigDecimal.ROUND_DOWN);
    }

    //separate out the fractional part
    BigDecimal fractionalPart = input.remainder(BigDecimal.ONE).movePointRight(scale);

    destIndex = nDecimalDigits - 1;

    while (scale > 0) {
        // Get next set of MAX_DIGITS (9) store it in the DrillBuf
        fractionalPart = fractionalPart.movePointLeft(MAX_DIGITS);
        BigDecimal temp = fractionalPart.remainder(BigDecimal.ONE);

        data.setInt(startIndex + (destIndex * INTEGER_SIZE), (temp.unscaledValue().intValue()));
        destIndex--;

        fractionalPart = fractionalPart.setScale(0, BigDecimal.ROUND_DOWN);
        scale -= MAX_DIGITS;
    }

    // Set the negative sign
    if (sign == true) {
        data.setInt(startIndex, data.getInt(startIndex) | 0x80000000);
    }

}

From source file:org.apache.flink.runtime.query.netty.message.KvStateRequestSerializer.java

License:Apache License

/**
 * Allocates a buffer and serializes the KvState request failure into it.
 *
 * @param alloc ByteBuf allocator for the buffer to serialize message into
 * @param requestId ID of the request responding to
 * @param cause Failure cause/*from  w  ww.jav  a2s  . com*/
 * @return Serialized KvState request failure message
 * @throws IOException Serialization failures are forwarded
 */
public static ByteBuf serializeKvStateRequestFailure(ByteBufAllocator alloc, long requestId, Throwable cause)
        throws IOException {

    ByteBuf buf = alloc.ioBuffer();

    // Frame length is set at the end
    buf.writeInt(0);

    writeHeader(buf, KvStateRequestType.REQUEST_FAILURE);

    // Message
    buf.writeLong(requestId);

    try (ByteBufOutputStream bbos = new ByteBufOutputStream(buf);
            ObjectOutputStream out = new ObjectOutputStream(bbos)) {

        out.writeObject(cause);
    }

    // Set frame length
    int frameLength = buf.readableBytes() - 4;
    buf.setInt(0, frameLength);

    return buf;
}

From source file:org.apache.flink.runtime.query.netty.message.KvStateRequestSerializer.java

License:Apache License

/**
 * Allocates a buffer and serializes the server failure into it.
 *
 * <p>The cause must not be or contain any user types as causes.
 *
 * @param alloc ByteBuf allocator for the buffer to serialize message into
 * @param cause Failure cause//from   w  ww . j  a  v a2s  .  co m
 * @return Serialized server failure message
 * @throws IOException Serialization failures are forwarded
 */
public static ByteBuf serializeServerFailure(ByteBufAllocator alloc, Throwable cause) throws IOException {
    ByteBuf buf = alloc.ioBuffer();

    // Frame length is set at end
    buf.writeInt(0);

    writeHeader(buf, KvStateRequestType.SERVER_FAILURE);

    try (ByteBufOutputStream bbos = new ByteBufOutputStream(buf);
            ObjectOutputStream out = new ObjectOutputStream(bbos)) {

        out.writeObject(cause);
    }

    // Set frame length
    int frameLength = buf.readableBytes() - 4;
    buf.setInt(0, frameLength);

    return buf;
}