Example usage for io.netty.buffer ByteBuf nioBuffer

List of usage examples for io.netty.buffer ByteBuf nioBuffer

Introduction

In this page you can find the example usage for io.netty.buffer ByteBuf nioBuffer.

Prototype

public abstract ByteBuffer nioBuffer(int index, int length);

Source Link

Document

Exposes this buffer's sub-region as an NIO ByteBuffer .

Usage

From source file:org.apache.directory.server.dhcp.netty.Dhcp6Handler.java

@Override
protected void channelRead0(ChannelHandlerContext ctx, DatagramPacket msg) throws Exception {
    if (LOG.isDebugEnabled()) {
        LOG.debug("Incomming DHCP : {}, from: {}", ByteBufUtil.hexDump(msg.content()), msg.sender());
    }/*www. ja va 2s  .  co  m*/

    final Dhcp6Message incommingMsg;
    try {
        incommingMsg = dhcp6MessageDecoder.decode(msg.content().nioBuffer());
    } catch (final Dhcp6Exception.UnknownMsgException e) {
        LOG.warn("Unknown DHCP message type: {}. Ignoring", ByteBufUtil.hexDump(msg.content()), e);
        return;
    }

    final Optional<Dhcp6Message> reply = dhcpService
            .getReplyFor(new Dhcp6RequestContext(msg.sender().getAddress()), incommingMsg);

    if (reply.isPresent()) {
        LOG.debug("Responding with message: {}", reply.get());

        // TODO what size to allocate the buffer to ?
        ByteBuf buf = ctx.alloc().buffer(1024);
        ByteBuffer buffer = buf.nioBuffer(buf.writerIndex(), buf.writableBytes());
        dhcp6MessageEncoder.encode(buffer, reply.get());
        buffer.flip();
        buf.writerIndex(buf.writerIndex() + buffer.remaining());
        DatagramPacket packet = new DatagramPacket(buf, msg.sender());
        ctx.write(packet);
    } else {
        LOG.warn("No response from DHCP service received for: {}. Ignoring.", incommingMsg);
    }
}

From source file:org.apache.directory.server.dhcp.netty.DhcpHandler.java

@Override
protected void channelRead0(ChannelHandlerContext ctx, DatagramPacket msg) throws Exception {
    DhcpMessage request = decoder.decode(msg.content().nioBuffer());

    DhcpRequestContext context = interfaceManager.newRequestContext(
            (InetSocketAddress) ctx.channel().localAddress(), msg.sender(), msg.recipient(), request);
    if (context == null) {
        debug("IGNQUERY", msg.sender(), msg.recipient(), request);
        return;/*from w  ww.j  a  v  a 2s  .  c o  m*/
    }
    // debug("READ", msg.sender(), msg.recipient(), request);

    MDCUtils.init(context, request);
    try {
        DhcpMessage reply = dhcpService.getReplyFor(context, request);
        if (reply == null) {
            debug("NOREPLY", msg.sender(), msg.recipient(), request);
            return;
        }

        InterfaceAddress localAddress = interfaceManager.getResponseInterface(request.getRelayAgentAddress(),
                request.getCurrentClientAddress(), msg.sender().getAddress(), reply);
        if (localAddress == null) {
            debug("NOIFACE", msg.recipient(), msg.sender(), reply);
            return;
        }

        debug("READ", msg.sender(), msg.recipient(), request);

        InetSocketAddress isa = DhcpInterfaceUtils.determineMessageDestination(request, reply, localAddress,
                msg.sender().getPort());

        ByteBuf buf = ctx.alloc().buffer(1024);
        ByteBuffer buffer = buf.nioBuffer(buf.writerIndex(), buf.writableBytes());
        encoder.encode(buffer, reply);
        buffer.flip();
        buf.writerIndex(buf.writerIndex() + buffer.remaining());
        DatagramPacket packet = new DatagramPacket(buf, isa);
        debug("WRITE", packet.sender(), packet.recipient(), reply);
        ctx.write(packet, ctx.voidPromise());
    } finally {
        MDCUtils.fini();
    }
}

From source file:org.apache.distributedlog.io.LZ4CompressionCodec.java

License:Apache License

@Override
public ByteBuf compress(ByteBuf uncompressed, int headerLen) {
    checkNotNull(uncompressed);//from w w w  . j a  va  2  s  .c  o m
    checkArgument(uncompressed.readableBytes() > 0);

    int uncompressedLen = uncompressed.readableBytes();
    int maxLen = compressor.maxCompressedLength(uncompressedLen);

    // get the source bytebuffer
    ByteBuffer uncompressedNio = uncompressed.nioBuffer(uncompressed.readerIndex(), uncompressedLen);
    ByteBuf compressed = PooledByteBufAllocator.DEFAULT.buffer(maxLen + headerLen, maxLen + headerLen);
    ByteBuffer compressedNio = compressed.nioBuffer(headerLen, maxLen);

    int compressedLen = compressor.compress(uncompressedNio, uncompressedNio.position(), uncompressedLen,
            compressedNio, compressedNio.position(), maxLen);
    compressed.writerIndex(compressedLen + headerLen);

    return compressed;
}

From source file:org.apache.distributedlog.io.LZ4CompressionCodec.java

License:Apache License

@Override
// length parameter is ignored here because of the way the fastDecompressor works.
public ByteBuf decompress(ByteBuf compressed, int decompressedSize) {
    checkNotNull(compressed);//from   w  w  w. j a  v  a2 s.  c om
    checkArgument(compressed.readableBytes() >= 0);
    checkArgument(decompressedSize >= 0);

    ByteBuf uncompressed = PooledByteBufAllocator.DEFAULT.buffer(decompressedSize, decompressedSize);
    ByteBuffer uncompressedNio = uncompressed.nioBuffer(0, decompressedSize);
    ByteBuffer compressedNio = compressed.nioBuffer(compressed.readerIndex(), compressed.readableBytes());

    decompressor.decompress(compressedNio, compressedNio.position(), uncompressedNio,
            uncompressedNio.position(), uncompressedNio.remaining());
    uncompressed.writerIndex(decompressedSize);
    return uncompressed;
}

From source file:org.apache.drill.exec.store.parquet.ParquetDirectByteBufferAllocator.java

License:Apache License

@Override
public ByteBuffer allocate(int sz) {
    ByteBuf bb = allocator.buffer(sz);
    ByteBuffer b = bb.nioBuffer(0, sz);
    final Key key = new Key(b);
    allocatedBuffers.put(key, bb);/*from w w  w.j  a  va 2  s .c  om*/
    logger.debug("ParquetDirectByteBufferAllocator: Allocated {} bytes. Allocated ByteBuffer id: {}", sz,
            key.hash);
    return b;
}

From source file:org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.java

License:Apache License

private <A> void flush0(final A attachment, final CompletionHandler<Long, ? super A> handler,
        boolean syncBlock) {
    if (state != State.STREAMING) {
        handler.failed(new IOException("stream already broken"), attachment);
        return;//from w  w  w.j  ava 2  s  .  c o m
    }
    int dataLen = buf.readableBytes();
    final long ackedLength = nextPacketOffsetInBlock + dataLen;
    if (ackedLength == locatedBlock.getBlock().getNumBytes()) {
        // no new data, just return
        handler.completed(locatedBlock.getBlock().getNumBytes(), attachment);
        return;
    }
    Promise<Void> promise = eventLoop.newPromise();
    promise.addListener(new FutureListener<Void>() {

        @Override
        public void operationComplete(Future<Void> future) throws Exception {
            if (future.isSuccess()) {
                locatedBlock.getBlock().setNumBytes(ackedLength);
                handler.completed(ackedLength, attachment);
            } else {
                handler.failed(future.cause(), attachment);
            }
        }
    });
    Callback c = waitingAckQueue.peekLast();
    if (c != null && ackedLength == c.ackedLength) {
        // just append it to the tail of waiting ack queue,, do not issue new hflush request.
        waitingAckQueue.addLast(new Callback(promise, ackedLength, Collections.<Channel>emptyList()));
        return;
    }
    int chunkLen = summer.getBytesPerChecksum();
    int trailingPartialChunkLen = dataLen % chunkLen;
    int numChecks = dataLen / chunkLen + (trailingPartialChunkLen != 0 ? 1 : 0);
    int checksumLen = numChecks * summer.getChecksumSize();
    ByteBuf checksumBuf = alloc.directBuffer(checksumLen);
    summer.calculateChunkedSums(buf.nioBuffer(), checksumBuf.nioBuffer(0, checksumLen));
    checksumBuf.writerIndex(checksumLen);
    PacketHeader header = new PacketHeader(4 + checksumLen + dataLen, nextPacketOffsetInBlock, nextPacketSeqno,
            false, dataLen, syncBlock);
    int headerLen = header.getSerializedSize();
    ByteBuf headerBuf = alloc.buffer(headerLen);
    header.putInBuffer(headerBuf.nioBuffer(0, headerLen));
    headerBuf.writerIndex(headerLen);

    waitingAckQueue.addLast(new Callback(promise, ackedLength, datanodeList));
    for (Channel ch : datanodeList) {
        ch.write(headerBuf.duplicate().retain());
        ch.write(checksumBuf.duplicate().retain());
        ch.writeAndFlush(buf.duplicate().retain());
    }
    checksumBuf.release();
    headerBuf.release();
    ByteBuf newBuf = alloc.directBuffer().ensureWritable(trailingPartialChunkLen);
    if (trailingPartialChunkLen != 0) {
        buf.readerIndex(dataLen - trailingPartialChunkLen).readBytes(newBuf, trailingPartialChunkLen);
    }
    buf.release();
    this.buf = newBuf;
    nextPacketOffsetInBlock += dataLen - trailingPartialChunkLen;
    nextPacketSeqno++;
}

From source file:org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.java

License:Apache License

private void endBlock(Promise<Void> promise, long size) {
    if (state != State.STREAMING) {
        promise.tryFailure(new IOException("stream already broken"));
        return;//from  www  .ja va  2 s  .c o m
    }
    if (!waitingAckQueue.isEmpty()) {
        promise.tryFailure(new IllegalStateException("should call flush first before calling close"));
        return;
    }
    state = State.CLOSING;
    PacketHeader header = new PacketHeader(4, size, nextPacketSeqno, true, 0, false);
    buf.release();
    buf = null;
    int headerLen = header.getSerializedSize();
    ByteBuf headerBuf = alloc.buffer(headerLen);
    header.putInBuffer(headerBuf.nioBuffer(0, headerLen));
    headerBuf.writerIndex(headerLen);
    waitingAckQueue.add(new Callback(promise, size, datanodeList));
    for (Channel ch : datanodeList) {
        ch.writeAndFlush(headerBuf.duplicate().retain());
    }
    headerBuf.release();
}

From source file:org.apache.hadoop.hbase.util.FanOutOneBlockAsyncDFSOutput.java

License:Apache License

private void setupReceiver(final int timeoutMs) {
    SimpleChannelInboundHandler<PipelineAckProto> ackHandler = new SimpleChannelInboundHandler<PipelineAckProto>() {

        @Override//w  w w.j ava  2 s  .  c o  m
        public boolean isSharable() {
            return true;
        }

        @Override
        protected void channelRead0(final ChannelHandlerContext ctx, PipelineAckProto ack) throws Exception {
            final Status reply = getStatus(ack);
            if (reply != Status.SUCCESS) {
                failed(ctx.channel(), new Supplier<Throwable>() {

                    @Override
                    public Throwable get() {
                        return new IOException("Bad response " + reply + " for block " + locatedBlock.getBlock()
                                + " from datanode " + ctx.channel().remoteAddress());
                    }
                });
                return;
            }
            if (PipelineAck.isRestartOOBStatus(reply)) {
                failed(ctx.channel(), new Supplier<Throwable>() {

                    @Override
                    public Throwable get() {
                        return new IOException("Restart response " + reply + " for block "
                                + locatedBlock.getBlock() + " from datanode " + ctx.channel().remoteAddress());
                    }
                });
                return;
            }
            if (ack.getSeqno() == HEART_BEAT_SEQNO) {
                return;
            }
            completed(ctx.channel());
        }

        @Override
        public void channelInactive(final ChannelHandlerContext ctx) throws Exception {
            failed(ctx.channel(), new Supplier<Throwable>() {

                @Override
                public Throwable get() {
                    return new IOException("Connection to " + ctx.channel().remoteAddress() + " closed");
                }
            });
        }

        @Override
        public void exceptionCaught(ChannelHandlerContext ctx, final Throwable cause) throws Exception {
            failed(ctx.channel(), new Supplier<Throwable>() {

                @Override
                public Throwable get() {
                    return cause;
                }
            });
        }

        @Override
        public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
            if (evt instanceof IdleStateEvent) {
                IdleStateEvent e = (IdleStateEvent) evt;
                if (e.state() == IdleState.READER_IDLE) {
                    failed(ctx.channel(), new Supplier<Throwable>() {

                        @Override
                        public Throwable get() {
                            return new IOException("Timeout(" + timeoutMs + "ms) waiting for response");
                        }
                    });
                } else if (e.state() == IdleState.WRITER_IDLE) {
                    PacketHeader heartbeat = new PacketHeader(4, 0, HEART_BEAT_SEQNO, false, 0, false);
                    int len = heartbeat.getSerializedSize();
                    ByteBuf buf = alloc.buffer(len);
                    heartbeat.putInBuffer(buf.nioBuffer(0, len));
                    buf.writerIndex(len);
                    ctx.channel().writeAndFlush(buf);
                }
                return;
            }
            super.userEventTriggered(ctx, evt);
        }

    };
    for (Channel ch : datanodeList) {
        ch.pipeline().addLast(new IdleStateHandler(timeoutMs, timeoutMs / 2, 0, TimeUnit.MILLISECONDS),
                new ProtobufVarint32FrameDecoder(), new ProtobufDecoder(PipelineAckProto.getDefaultInstance()),
                ackHandler);
        ch.config().setAutoRead(true);
    }
}

From source file:org.apache.hive.spark.client.rpc.KryoMessageCodec.java

License:Apache License

@Override
protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) throws Exception {
    if (in.readableBytes() < 4) {
        return;//from  w  w  w.  j  av  a  2 s.  c o m
    }

    in.markReaderIndex();
    int msgSize = in.readInt();
    checkSize(msgSize);

    if (in.readableBytes() < msgSize) {
        // Incomplete message in buffer.
        in.resetReaderIndex();
        return;
    }

    try {
        ByteBuffer nioBuffer = maybeDecrypt(in.nioBuffer(in.readerIndex(), msgSize));
        Input kryoIn = new Input(new ByteBufferInputStream(nioBuffer));

        Object msg = kryos.get().readClassAndObject(kryoIn);
        LOG.debug("Decoded message of type {} ({} bytes)", msg != null ? msg.getClass().getName() : msg,
                msgSize);
        out.add(msg);
    } finally {
        in.skipBytes(msgSize);
    }
}

From source file:org.apache.pulsar.client.impl.MessageCrypto.java

License:Apache License

public synchronized ByteBuf encrypt(Set<String> encKeys, CryptoKeyReader keyReader,
        MessageMetadata.Builder msgMetadata, ByteBuf payload) throws PulsarClientException {

    if (encKeys.isEmpty()) {
        return payload;
    }//from w  w  w  .j  a  va 2  s .  co m

    // Update message metadata with encrypted data key
    for (String keyName : encKeys) {
        if (encryptedDataKeyMap.get(keyName) == null) {
            // Attempt to load the key. This will allow us to load keys as soon as
            // a new key is added to producer config
            addPublicKeyCipher(keyName, keyReader);
        }
        EncryptionKeyInfo keyInfo = encryptedDataKeyMap.get(keyName);
        if (keyInfo != null) {
            if (keyInfo.getMetadata() != null && !keyInfo.getMetadata().isEmpty()) {
                List<KeyValue> kvList = new ArrayList<KeyValue>();
                keyInfo.getMetadata().forEach((key, value) -> {
                    kvList.add(KeyValue.newBuilder().setKey(key).setValue(value).build());
                });
                msgMetadata.addEncryptionKeys(EncryptionKeys.newBuilder().setKey(keyName)
                        .setValue(ByteString.copyFrom(keyInfo.getKey())).addAllMetadata(kvList).build());
            } else {
                msgMetadata.addEncryptionKeys(EncryptionKeys.newBuilder().setKey(keyName)
                        .setValue(ByteString.copyFrom(keyInfo.getKey())).build());
            }
        } else {
            // We should never reach here.
            log.error("{} Failed to find encrypted Data key for key {}.", logCtx, keyName);
        }

    }

    // Create gcm param
    // TODO: Replace random with counter and periodic refreshing based on timer/counter value
    secureRandom.nextBytes(iv);
    GCMParameterSpec gcmParam = new GCMParameterSpec(tagLen, iv);

    // Update message metadata with encryption param
    msgMetadata.setEncryptionParam(ByteString.copyFrom(iv));

    ByteBuf targetBuf = null;
    try {
        // Encrypt the data
        cipher.init(Cipher.ENCRYPT_MODE, dataKey, gcmParam);

        ByteBuffer sourceNioBuf = payload.nioBuffer(payload.readerIndex(), payload.readableBytes());

        int maxLength = cipher.getOutputSize(payload.readableBytes());
        targetBuf = PooledByteBufAllocator.DEFAULT.buffer(maxLength, maxLength);
        ByteBuffer targetNioBuf = targetBuf.nioBuffer(0, maxLength);

        int bytesStored = cipher.doFinal(sourceNioBuf, targetNioBuf);
        targetBuf.writerIndex(bytesStored);

    } catch (IllegalBlockSizeException | BadPaddingException | InvalidKeyException
            | InvalidAlgorithmParameterException | ShortBufferException e) {

        targetBuf.release();
        log.error("{} Failed to encrypt message. {}", logCtx, e);
        throw new PulsarClientException.CryptoException(e.getMessage());

    }

    payload.release();
    return targetBuf;
}