Example usage for io.netty.buffer ByteBuf duplicate

List of usage examples for io.netty.buffer ByteBuf duplicate

Introduction

In this page you can find the example usage for io.netty.buffer ByteBuf duplicate.

Prototype

public abstract ByteBuf duplicate();

Source Link

Document

Returns a buffer which shares the whole region of this buffer.

Usage

From source file:org.apache.camel.component.netty4.DatagramPacketByteArrayCodecTest.java

License:Apache License

@Test
public void testDecoder() {
    ByteBuf buf = Unpooled.buffer();
    buf.writeBytes(VALUE.getBytes());//from   w ww  .  j  av  a2s  .  com
    ByteBuf input = buf.duplicate();
    AddressedEnvelope<Object, InetSocketAddress> addressedEnvelop = new DefaultAddressedEnvelope<Object, InetSocketAddress>(
            input, new InetSocketAddress(8888));
    EmbeddedChannel channel = new EmbeddedChannel(
            ChannelHandlerFactories.newByteArrayDecoder("udp").newChannelHandler());
    Assert.assertTrue(channel.writeInbound(addressedEnvelop));
    Assert.assertTrue(channel.finish());
    AddressedEnvelope<Object, InetSocketAddress> result = (AddressedEnvelope) channel.readInbound();
    Assert.assertEquals(result.recipient().getPort(), addressedEnvelop.recipient().getPort());
    Assert.assertTrue(result.content() instanceof byte[]);
    Assert.assertEquals(VALUE, new String((byte[]) result.content()));
    Assert.assertNull(channel.readInbound());
}

From source file:org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.java

License:Apache License

private <A> void flush0(final A attachment, final CompletionHandler<Long, ? super A> handler,
        boolean syncBlock) {
    if (state != State.STREAMING) {
        handler.failed(new IOException("stream already broken"), attachment);
        return;/*from w  w w . ja  v  a 2s  .c  o  m*/
    }
    int dataLen = buf.readableBytes();
    final long ackedLength = nextPacketOffsetInBlock + dataLen;
    if (ackedLength == locatedBlock.getBlock().getNumBytes()) {
        // no new data, just return
        handler.completed(locatedBlock.getBlock().getNumBytes(), attachment);
        return;
    }
    Promise<Void> promise = eventLoop.newPromise();
    promise.addListener(new FutureListener<Void>() {

        @Override
        public void operationComplete(Future<Void> future) throws Exception {
            if (future.isSuccess()) {
                locatedBlock.getBlock().setNumBytes(ackedLength);
                handler.completed(ackedLength, attachment);
            } else {
                handler.failed(future.cause(), attachment);
            }
        }
    });
    Callback c = waitingAckQueue.peekLast();
    if (c != null && ackedLength == c.ackedLength) {
        // just append it to the tail of waiting ack queue,, do not issue new hflush request.
        waitingAckQueue.addLast(new Callback(promise, ackedLength, Collections.<Channel>emptyList()));
        return;
    }
    int chunkLen = summer.getBytesPerChecksum();
    int trailingPartialChunkLen = dataLen % chunkLen;
    int numChecks = dataLen / chunkLen + (trailingPartialChunkLen != 0 ? 1 : 0);
    int checksumLen = numChecks * summer.getChecksumSize();
    ByteBuf checksumBuf = alloc.directBuffer(checksumLen);
    summer.calculateChunkedSums(buf.nioBuffer(), checksumBuf.nioBuffer(0, checksumLen));
    checksumBuf.writerIndex(checksumLen);
    PacketHeader header = new PacketHeader(4 + checksumLen + dataLen, nextPacketOffsetInBlock, nextPacketSeqno,
            false, dataLen, syncBlock);
    int headerLen = header.getSerializedSize();
    ByteBuf headerBuf = alloc.buffer(headerLen);
    header.putInBuffer(headerBuf.nioBuffer(0, headerLen));
    headerBuf.writerIndex(headerLen);

    waitingAckQueue.addLast(new Callback(promise, ackedLength, datanodeList));
    for (Channel ch : datanodeList) {
        ch.write(headerBuf.duplicate().retain());
        ch.write(checksumBuf.duplicate().retain());
        ch.writeAndFlush(buf.duplicate().retain());
    }
    checksumBuf.release();
    headerBuf.release();
    ByteBuf newBuf = alloc.directBuffer().ensureWritable(trailingPartialChunkLen);
    if (trailingPartialChunkLen != 0) {
        buf.readerIndex(dataLen - trailingPartialChunkLen).readBytes(newBuf, trailingPartialChunkLen);
    }
    buf.release();
    this.buf = newBuf;
    nextPacketOffsetInBlock += dataLen - trailingPartialChunkLen;
    nextPacketSeqno++;
}

From source file:org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.java

License:Apache License

private void endBlock(Promise<Void> promise, long size) {
    if (state != State.STREAMING) {
        promise.tryFailure(new IOException("stream already broken"));
        return;/*  w  ww.j a va 2s. c om*/
    }
    if (!waitingAckQueue.isEmpty()) {
        promise.tryFailure(new IllegalStateException("should call flush first before calling close"));
        return;
    }
    state = State.CLOSING;
    PacketHeader header = new PacketHeader(4, size, nextPacketSeqno, true, 0, false);
    buf.release();
    buf = null;
    int headerLen = header.getSerializedSize();
    ByteBuf headerBuf = alloc.buffer(headerLen);
    header.putInBuffer(headerBuf.nioBuffer(0, headerLen));
    headerBuf.writerIndex(headerLen);
    waitingAckQueue.add(new Callback(promise, size, datanodeList));
    for (Channel ch : datanodeList) {
        ch.writeAndFlush(headerBuf.duplicate().retain());
    }
    headerBuf.release();
}

From source file:org.asynchttpclient.util.ByteBufUtils.java

License:Open Source License

public static String byteBuf2String(ByteBuf buf, Charset charset)
        throws UTFDataFormatException, IndexOutOfBoundsException, CharacterCodingException {

    int byteLen = buf.readableBytes();

    if (charset.equals(StandardCharsets.US_ASCII)) {
        return Utf8Reader.readUtf8(buf, byteLen);
    } else if (charset.equals(StandardCharsets.UTF_8)) {
        try {/*from  ww w  .  j  av a 2 s  .  co  m*/
            return Utf8Reader.readUtf8(buf.duplicate(), (int) (byteLen * 1.4));
        } catch (IndexOutOfBoundsException e) {
            // try again with 3 bytes per char
            return Utf8Reader.readUtf8(buf, byteLen * 3);
        }
    } else {
        return byteBuffersToString(buf.nioBuffers(), charset);
    }
}

From source file:org.hawkular.metrics.clients.ptrans.collectd.packet.PacketDecodingTest.java

License:Apache License

static ByteBuf createValuesPartBuffer(Values values) {
    List<Number> data = values.getData();
    ListIterator<Number> dataIterator = data.listIterator();
    List<DataType> dataTypes = values.getDataTypes();
    ListIterator<DataType> dataTypeIterator = dataTypes.listIterator();

    ByteBuf payloadBuffer = Unpooled.buffer();

    while (dataTypeIterator.hasNext()) {
        payloadBuffer.writeByte(dataTypeIterator.next().getId());
    }/*from ww  w . ja va2 s .  co m*/

    dataTypeIterator = dataTypes.listIterator();
    while (dataIterator.hasNext()) {
        DataType dataType = dataTypeIterator.next();
        Number number = dataIterator.next();
        switch (dataType) {
        case COUNTER:
        case ABSOLUTE:
            BigInteger bigInteger = (BigInteger) number;
            payloadBuffer.writeBytes(bigInteger.toByteArray());
            break;
        case DERIVE:
            payloadBuffer.writeLong((Long) number);
            break;
        case GAUGE:
            payloadBuffer.writeLong(ByteBufUtil.swapLong(Double.doubleToLongBits((Double) number)));
            break;
        default:
            fail("Unknown data type: " + dataType);
        }
    }

    ByteBuf headerBuffer = Unpooled.buffer();
    headerBuffer.writeShort(VALUES.getId());
    headerBuffer.writeShort(6 + payloadBuffer.writerIndex());
    headerBuffer.writeShort(data.size());

    ByteBuf buffer = Unpooled.buffer();
    buffer.writeBytes(headerBuffer.duplicate()).writeBytes(payloadBuffer.duplicate());
    return buffer;
}

From source file:org.hawkular.metrics.clients.ptrans.collectd.packet.PacketDecodingTest.java

License:Apache License

private void shouldDecodePart(PartType partType, ByteBuf buffer, Class<? extends Part> partClass,
        Matcher<Object> matcher) {

    DatagramPacket datagramPacket = new DatagramPacket(buffer.duplicate(), DUMMY_ADDRESS);

    EmbeddedChannel channel = new EmbeddedChannel(new CollectdPacketDecoder());
    assertTrue("Expected a CollectdPacket", channel.writeInbound(datagramPacket));

    Object output = channel.readInbound();
    assertEquals(CollectdPacket.class, output.getClass());

    CollectdPacket collectdPacket = (CollectdPacket) output;
    List<Part> parts = collectdPacket.getParts();
    assertEquals("Expected only one part in the packet", 1, parts.size());

    Part part = parts.iterator().next();
    assertEquals(partClass, part.getClass());
    assertEquals(partType, part.getPartType());
    assertThat(part.getValue(), matcher);

    assertNull("Expected just one CollectdPacket", channel.readInbound());
}

From source file:org.opendaylight.sxp.core.service.UpdateExportTaskTest.java

License:Open Source License

@Before
public void init() throws Exception {
    connection = mock(SxpConnection.class);
    Context context = PowerMockito.mock(Context.class);
    when(connection.getContext()).thenReturn(context);
    ByteBuf byteBuf = mock(ByteBuf.class);
    when(byteBuf.duplicate()).thenReturn(byteBuf);
    when(byteBuf.capacity()).thenReturn(10);
    PowerMockito.when(context.executeUpdateMessageStrategy(any(SxpConnection.class), anyList(), anyList(),
            any(SxpBindingFilter.class))).thenReturn(byteBuf);
    byteBuffs = new ByteBuf[] { byteBuf };
    parttions = new BiFunction[] { (c, f) -> byteBuf };
    atomicInteger = new AtomicInteger(1);
    exportTask = new UpdateExportTask(connection, byteBuffs, parttions, atomicInteger);

}

From source file:org.rhq.metrics.netty.collectd.packet.PacketDecodingTest.java

License:Apache License

static ByteBuf createValuesPartBuffer(Values values) {
    Number[] data = values.getData();
    DataType[] dataTypes = values.getDataTypes();
    ByteBuf payloadBuffer = Unpooled.buffer();
    for (int i = 0; i < data.length; i++) {
        payloadBuffer.writeByte(dataTypes[i].getId());
    }//  ww w  . jav a2 s.c o  m
    for (int i = 0; i < data.length; i++) {
        DataType dataType = dataTypes[i];
        switch (dataType) {
        case COUNTER:
        case ABSOLUTE:
            BigInteger bigInteger = (BigInteger) data[i];
            payloadBuffer.writeBytes(bigInteger.toByteArray());
            break;
        case DERIVE:
            payloadBuffer.writeLong((Long) data[i]);
            break;
        case GAUGE:
            payloadBuffer.writeLong(ByteBufUtil.swapLong(Double.doubleToLongBits((Double) data[i])));
            break;
        default:
            fail("Unknown data type: " + dataType);
        }
    }

    ByteBuf headerBuffer = Unpooled.buffer();
    headerBuffer.writeShort(VALUES.getId());
    headerBuffer.writeShort(6 + payloadBuffer.writerIndex());
    headerBuffer.writeShort(data.length);

    ByteBuf buffer = Unpooled.buffer();
    buffer.writeBytes(headerBuffer.duplicate()).writeBytes(payloadBuffer.duplicate());
    return buffer;
}

From source file:org.rhq.metrics.netty.collectd.packet.PacketDecodingTest.java

License:Apache License

private void shouldDecodePart(PartType partType, ByteBuf buffer, Class<? extends Part> partClass,
        Matcher<Object> matcher) {

    DatagramPacket datagramPacket = new DatagramPacket(buffer.duplicate(), DUMMY_ADDRESS);

    EmbeddedChannel channel = new EmbeddedChannel(new CollectdPacketDecoder());
    assertTrue("Expected a CollectdPacket", channel.writeInbound(datagramPacket));

    Object output = channel.readInbound();
    assertEquals(CollectdPacket.class, output.getClass());

    CollectdPacket collectdPacket = (CollectdPacket) output;
    Part[] parts = collectdPacket.getParts();
    assertEquals("Expected only one part in the packet", 1, parts.length);

    Part part = parts[0];/*from  w  ww  . j  a  v a  2s .c  o  m*/
    assertEquals(partClass, part.getClass());
    assertEquals(partType, part.getPartType());
    assertThat(part.getValue(), matcher);

    assertNull("Expected just one CollectdPacket", channel.readInbound());
}

From source file:org.wso2.carbon.mss.internal.router.InternalHttpResponder.java

License:Open Source License

private InputSupplier<InputStream> createContentSupplier(ByteBuf content) {
    final ByteBuf responseContent = content.duplicate(); // Have independent pointers.
    responseContent.markReaderIndex();//  www  . j av a 2s .com
    return new HttpResponderInputSupplier(responseContent).invoke();
}