List of usage examples for io.netty.buffer CompositeByteBuf writerIndex
@Override public CompositeByteBuf writerIndex(int writerIndex)
From source file:org.apache.drill.exec.rpc.RpcEncoder.java
License:Apache License
@Override protected void encode(ChannelHandlerContext ctx, OutboundRpcMessage msg, List<Object> out) throws Exception { if (RpcConstants.EXTRA_DEBUGGING) { logger.debug("Rpc Encoder called with msg {}", msg); }/*from w w w . jav a 2 s. c o m*/ if (!ctx.channel().isOpen()) { //output.add(ctx.alloc().buffer(0)); logger.debug("Channel closed, skipping encode."); msg.release(); return; } try { if (RpcConstants.EXTRA_DEBUGGING) { logger.debug("Encoding outbound message {}", msg); } // first we build the RpcHeader RpcHeader header = RpcHeader.newBuilder() // .setMode(msg.mode) // .setCoordinationId(msg.coordinationId) // .setRpcType(msg.rpcType).build(); // figure out the full length int headerLength = header.getSerializedSize(); int protoBodyLength = msg.pBody.getSerializedSize(); int rawBodyLength = msg.getRawBodySize(); int fullLength = // HEADER_TAG_LENGTH + getRawVarintSize(headerLength) + headerLength + // PROTOBUF_BODY_TAG_LENGTH + getRawVarintSize(protoBodyLength) + protoBodyLength; // if (rawBodyLength > 0) { fullLength += (RAW_BODY_TAG_LENGTH + getRawVarintSize(rawBodyLength) + rawBodyLength); } ByteBuf buf = ctx.alloc().buffer(); OutputStream os = new ByteBufOutputStream(buf); CodedOutputStream cos = CodedOutputStream.newInstance(os); // write full length first (this is length delimited stream). cos.writeRawVarint32(fullLength); // write header cos.writeRawVarint32(HEADER_TAG); cos.writeRawVarint32(headerLength); header.writeTo(cos); // write protobuf body length and body cos.writeRawVarint32(PROTOBUF_BODY_TAG); cos.writeRawVarint32(protoBodyLength); msg.pBody.writeTo(cos); // if exists, write data body and tag. if (msg.getRawBodySize() > 0) { if (RpcConstants.EXTRA_DEBUGGING) { logger.debug("Writing raw body of size {}", msg.getRawBodySize()); } cos.writeRawVarint32(RAW_BODY_TAG); cos.writeRawVarint32(rawBodyLength); cos.flush(); // need to flush so that dbody goes after if cos is caching. CompositeByteBuf cbb = new CompositeByteBuf(buf.alloc(), true, msg.dBodies.length + 1); cbb.addComponent(buf); int bufLength = buf.readableBytes(); for (ByteBuf b : msg.dBodies) { cbb.addComponent(b); bufLength += b.readableBytes(); } cbb.writerIndex(bufLength); out.add(cbb); } else { cos.flush(); out.add(buf); } if (RpcConstants.SOME_DEBUGGING) { logger.debug("Wrote message length {}:{} bytes (head:body). Message: " + msg, getRawVarintSize(fullLength), fullLength); } if (RpcConstants.EXTRA_DEBUGGING) { logger.debug("Sent message. Ending writer index was {}.", buf.writerIndex()); } } finally { // make sure to release Rpc Messages underlying byte buffers. //msg.release(); } }
From source file:org.apache.helix.ipc.netty.NettyHelixIPCService.java
License:Apache License
/** * Sends a message to all partitions with a given state in the cluster. *///from w w w . j av a 2 s. c om @Override public void send(HelixAddress destination, int messageType, UUID messageId, ByteBuf message) { if (LOG.isTraceEnabled()) { LOG.trace("Sending " + messageId); } // Send message try { // Get list of channels List<Channel> channels = channelMap.get(destination.getSocketAddress()); if (channels == null) { synchronized (channelMap) { channels = channelMap.get(destination.getSocketAddress()); if (channels == null) { channels = new ArrayList<Channel>(config.getNumConnections()); for (int i = 0; i < config.getNumConnections(); i++) { channels.add(null); } channelMap.put(destination.getSocketAddress(), channels); } } } // Pick the channel for this scope int idx = (Integer.MAX_VALUE & destination.getScope().hashCode()) % channels.size(); Channel channel = channels.get(idx); if (channel == null || !channel.isOpen()) { synchronized (channelMap) { channel = channels.get(idx); if (channel == null || !channel.isOpen()) { channel = clientBootstrap.connect(destination.getSocketAddress()).sync().channel(); channels.set(idx, channel); statChannelOpen.inc(); } } } // Compute total length int headerLength = NUM_LENGTH_FIELDS * (Integer.SIZE / 8) + (Integer.SIZE / 8) * 2 // version, type + (Long.SIZE / 8) * 2 // 128 bit UUID + getLength(destination.getScope().getCluster()) + getLength(destination.getScope().getResource()) + getLength(destination.getScope().getPartition()) + getLength(destination.getScope().getState()) + getLength(config.getInstanceName()) + getLength(destination.getInstanceName()); int messageLength = message == null ? 0 : message.readableBytes(); // Build message header ByteBuf headerBuf = channel.alloc().buffer(headerLength); headerBuf.writeInt(MESSAGE_VERSION).writeInt(messageType).writeLong(messageId.getMostSignificantBits()) .writeLong(messageId.getLeastSignificantBits()); writeStringWithLength(headerBuf, destination.getScope().getCluster()); writeStringWithLength(headerBuf, destination.getScope().getResource()); writeStringWithLength(headerBuf, destination.getScope().getPartition()); writeStringWithLength(headerBuf, destination.getScope().getState()); writeStringWithLength(headerBuf, config.getInstanceName()); writeStringWithLength(headerBuf, destination.getInstanceName()); // Compose message header and payload headerBuf.writeInt(messageLength); CompositeByteBuf fullByteBuf = channel.alloc().compositeBuffer(2); fullByteBuf.addComponent(headerBuf); fullByteBuf.writerIndex(headerBuf.readableBytes()); if (message != null) { fullByteBuf.addComponent(message); fullByteBuf.writerIndex(fullByteBuf.writerIndex() + message.readableBytes()); } // Send NettyHelixIPCBackPressureHandler backPressureHandler = channel.pipeline() .get(NettyHelixIPCBackPressureHandler.class); backPressureHandler.waitUntilWritable(channel); channel.writeAndFlush(fullByteBuf); statTxMsg.mark(); statTxBytes.mark(fullByteBuf.readableBytes()); } catch (Exception e) { statError.inc(); throw new IllegalStateException("Could not send message to " + destination, e); } }
From source file:org.cloudfoundry.reactor.util.MultipartHttpOutbound.java
License:Apache License
public Mono<Void> done() { AsciiString boundary = generateMultipartBoundary(); ByteBufAllocator allocator = this.outbound.delegate().alloc(); CompositeByteBuf bodyBuf = allocator.compositeBuffer(); this.partConsumers .forEach(partConsumer -> bodyBuf.addComponent(getPart(allocator, boundary, partConsumer))); bodyBuf.addComponent(getCloseDelimiter(allocator, boundary)); return this.outbound.removeTransferEncodingChunked() .addHeader(CONTENT_TYPE, MULTIPART_FORM_DATA.concat(BOUNDARY_PREAMBLE).concat(boundary)) .addHeader(CONTENT_LENGTH, String.valueOf(bodyBuf.capacity())) .sendOne(bodyBuf.writerIndex(bodyBuf.capacity())); }
From source file:org.cloudfoundry.reactor.util.MultipartHttpOutbound.java
License:Apache License
private static ByteBuf getPart(ByteBufAllocator allocator, AsciiString boundary, Consumer<PartHttpOutbound> partConsumer) { PartHttpOutbound part = new PartHttpOutbound(); partConsumer.accept(part);//from ww w . ja v a2 s. c o m CompositeByteBuf body = allocator.compositeBuffer(); body.addComponent(getDelimiter(allocator, boundary)); body.addComponent(getHeaders(allocator, part.getHeaders())); body.addComponent(getData(allocator, part.getInputStream())); return body.writerIndex(body.capacity()); }
From source file:org.dcache.xrootd.protocol.messages.ReadVResponse.java
License:Open Source License
@Override public void writeTo(ChannelHandlerContext ctx, ChannelPromise promise) { checkState(refCnt() > 0);//w w w . ja v a 2 s . c o m CompositeByteBuf buffer = ctx.alloc().compositeBuffer(2 * length + 1); ByteBuf header = ctx.alloc().buffer(8); header.writeShort(request.getStreamId()); header.writeShort(stat); header.writeInt(getDataLength()); buffer.addComponent(header); for (int i = 0; i < length; i++) { header = ctx.alloc().buffer(READ_LIST_HEADER_SIZE); header.writeInt(requests[index + i].getFileHandle()); header.writeInt(data[index + i].readableBytes()); header.writeLong(requests[index + i].getOffset()); buffer.addComponent(header); buffer.addComponent(data[index + i].retain()); } buffer.writerIndex(buffer.capacity()); ctx.write(buffer, promise); release(); }
From source file:org.ebayopensource.scc.cache.NettyResponseDeserializer.java
License:Apache License
@Override public FullHttpResponse deserialize(CacheResponse cacheResp) { CompositeByteBuf byteBuf = UnpooledByteBufAllocator.DEFAULT.compositeBuffer(); if (cacheResp.getContent() != null) { byteBuf.capacity(cacheResp.getContent().length); byteBuf.setBytes(0, cacheResp.getContent()); byteBuf.writerIndex(cacheResp.getContent().length); }/* ww w . j av a 2 s .c o m*/ DefaultFullHttpResponse response = new DefaultFullHttpResponse( HttpVersion.valueOf(cacheResp.getProtocalVersion()), new HttpResponseStatus(cacheResp.getCode(), cacheResp.getReasonPhrase()), byteBuf, true); HttpHeaders headers = response.headers(); List<CacheEntry<String, String>> cacheHeaders = cacheResp.getHeaders(); for (Entry<String, String> entry : cacheHeaders) { headers.add(entry.getKey(), entry.getValue()); } HttpHeaders trailingHeaders = response.trailingHeaders(); List<CacheEntry<String, String>> cacheTrailingHeaders = cacheResp.getTrailingHeaders(); for (Entry<String, String> entry : cacheTrailingHeaders) { trailingHeaders.add(entry.getKey(), entry.getValue()); } return response; }
From source file:org.eclipse.milo.opcua.sdk.client.DataTypeDictionaryReader.java
License:Open Source License
private CompletableFuture<ByteBuf> readFragments(NodeId nodeId, CompositeByteBuf fragmentBuffer, int fragmentSize, int index) { Preconditions.checkArgument(fragmentSize > 0, "fragmentSize=" + fragmentSize); String indexRange = fragmentSize <= 1 ? String.valueOf(index) : String.format("%d:%d", index, index + fragmentSize - 1); CompletableFuture<DataValue> valueFuture = readNode( new ReadValueId(nodeId, AttributeId.Value.uid(), indexRange, QualifiedName.NULL_VALUE)); return valueFuture.thenComposeAsync(value -> { StatusCode statusCode = value.getStatusCode(); if (statusCode == null || statusCode.isGood()) { ByteString fragmentBytes = (ByteString) value.getValue().getValue(); if (fragmentBytes != null) { int bytesRead = fragmentBytes.length(); if (bytesRead > 0) { fragmentBuffer.addComponent(Unpooled.wrappedBuffer(fragmentBytes.bytesOrEmpty())); fragmentBuffer.writerIndex(fragmentBuffer.writerIndex() + bytesRead); }//from w ww. jav a2 s. c om if (bytesRead < fragmentSize) { // A partial fragment means this is the last read that will // succeed; don't bother trying to read the next fragment. return completedFuture(fragmentBuffer); } else if (bytesRead > fragmentSize) { // Some servers don't support index range properly and just // return the entire contents. when this happens, we can assume // we've read everything there is to read. // An edge case where the dictionary size is exactly equal to the // fragment size still exists. In this case we must hope the server // properly terminates the subsequent request with something like // Bad_IndexRangeNoData or else the infinite loop could still happen. return completedFuture(fragmentBuffer); } else { return readFragments(nodeId, fragmentBuffer, fragmentSize, index + bytesRead); } } else { logger.warn("Read a null type dictionary " + "fragment at indexRange=\"%s\"", indexRange); return completedFuture(fragmentBuffer); } } else { if (statusCode.getValue() != StatusCodes.Bad_IndexRangeNoData) { logger.warn("Reading type dictionary fragments expected to " + "terminate with Bad_IndexRangeNoData but got {}", statusCode); } return completedFuture(fragmentBuffer); } }); }
From source file:org.eclipse.milo.opcua.stack.client.transport.uasc.UascClientMessageHandler.java
License:Open Source License
private void sendOpenSecureChannelRequest(ChannelHandlerContext ctx, SecurityTokenRequestType requestType) { ByteString clientNonce = secureChannel.isSymmetricSigningEnabled() ? NonceUtil.generateNonce(secureChannel.getSecurityPolicy()) : ByteString.NULL_VALUE;//from w ww . j av a2 s . c o m secureChannel.setLocalNonce(clientNonce); RequestHeader header = new RequestHeader(null, DateTime.now(), uint(0), uint(0), null, config.getRequestTimeout(), null); OpenSecureChannelRequest request = new OpenSecureChannelRequest(header, uint(PROTOCOL_VERSION), requestType, secureChannel.getMessageSecurityMode(), secureChannel.getLocalNonce(), config.getChannelLifetime()); serializationQueue.encode((binaryEncoder, chunkEncoder) -> { ByteBuf messageBuffer = BufferUtil.pooledBuffer(); try { binaryEncoder.setBuffer(messageBuffer); binaryEncoder.writeMessage(null, request); checkMessageSize(messageBuffer); chunkEncoder.encodeAsymmetric(secureChannel, requestIdSequence.getAndIncrement(), messageBuffer, MessageType.OpenSecureChannel, new ChunkEncoder.Callback() { @Override public void onEncodingError(UaException ex) { logger.error("Error encoding {}: {}", request, ex.getMessage(), ex); ctx.close(); } @Override public void onMessageEncoded(List<ByteBuf> messageChunks, long requestId) { CompositeByteBuf chunkComposite = BufferUtil.compositeBuffer(); for (ByteBuf chunk : messageChunks) { chunkComposite.addComponent(chunk); chunkComposite .writerIndex(chunkComposite.writerIndex() + chunk.readableBytes()); } ctx.writeAndFlush(chunkComposite, ctx.voidPromise()); ChannelSecurity channelSecurity = secureChannel.getChannelSecurity(); long currentTokenId = -1L; if (channelSecurity != null) { currentTokenId = channelSecurity.getCurrentToken().getTokenId().longValue(); } long previousTokenId = -1L; if (channelSecurity != null) { previousTokenId = channelSecurity.getPreviousToken() .map(token -> token.getTokenId().longValue()).orElse(-1L); } logger.debug( "Sent OpenSecureChannelRequest ({}, id={}, currentToken={}, previousToken={}).", request.getRequestType(), secureChannel.getChannelId(), currentTokenId, previousTokenId); } }); } finally { messageBuffer.release(); } }); }
From source file:org.eclipse.milo.opcua.stack.client.transport.uasc.UascClientMessageHandler.java
License:Open Source License
private void sendCloseSecureChannelRequest(ChannelHandlerContext ctx, CloseSecureChannelRequest request) { serializationQueue.encode((binaryEncoder, chunkEncoder) -> { ByteBuf messageBuffer = BufferUtil.pooledBuffer(); try {// w w w. j av a 2 s . c o m binaryEncoder.setBuffer(messageBuffer); binaryEncoder.writeMessage(null, request); checkMessageSize(messageBuffer); chunkEncoder.encodeSymmetric(secureChannel, requestIdSequence.getAndIncrement(), messageBuffer, MessageType.CloseSecureChannel, new ChunkEncoder.Callback() { @Override public void onEncodingError(UaException ex) { logger.error("Error encoding {}: {}", request, ex.getMessage(), ex); ctx.close(); } @Override public void onMessageEncoded(List<ByteBuf> messageChunks, long requestId) { CompositeByteBuf chunkComposite = BufferUtil.compositeBuffer(); for (ByteBuf chunk : messageChunks) { chunkComposite.addComponent(chunk); chunkComposite .writerIndex(chunkComposite.writerIndex() + chunk.readableBytes()); } ctx.writeAndFlush(chunkComposite).addListener(future -> ctx.close()); secureChannel.setChannelId(0); } }); } catch (UaSerializationException e) { handshakeFuture.completeExceptionally(e); ctx.close(); } finally { messageBuffer.release(); } }); }
From source file:org.eclipse.milo.opcua.stack.client.transport.uasc.UascClientMessageHandler.java
License:Open Source License
@Override protected void encode(ChannelHandlerContext ctx, UaTransportRequest request, ByteBuf buffer) { serializationQueue.encode((binaryEncoder, chunkEncoder) -> { ByteBuf messageBuffer = BufferUtil.pooledBuffer(); try {/*from w w w . ja v a 2s.c om*/ binaryEncoder.setBuffer(messageBuffer); binaryEncoder.writeMessage(null, request.getRequest()); checkMessageSize(messageBuffer); chunkEncoder.encodeSymmetric(secureChannel, requestIdSequence.getAndIncrement(), messageBuffer, MessageType.SecureMessage, new ChunkEncoder.Callback() { @Override public void onEncodingError(UaException ex) { logger.error("Error encoding {}: {}", request.getRequest(), ex.getMessage(), ex); ctx.close(); } @Override public void onMessageEncoded(List<ByteBuf> messageChunks, long requestId) { pending.put(requestId, request); // No matter how we complete, make sure the entry in pending is removed. // This covers the case where the request fails due to a timeout in the // transport layer as well as normal completion. request.getFuture().whenComplete((r, x) -> pending.remove(requestId)); CompositeByteBuf chunkComposite = BufferUtil.compositeBuffer(); for (ByteBuf chunk : messageChunks) { chunkComposite.addComponent(chunk); chunkComposite .writerIndex(chunkComposite.writerIndex() + chunk.readableBytes()); } ctx.writeAndFlush(chunkComposite, ctx.voidPromise()); } }); } catch (UaSerializationException e) { request.getFuture().completeExceptionally(e); } finally { messageBuffer.release(); } }); }