List of usage examples for io.netty.buffer ByteBuf retain
@Override public abstract ByteBuf retain();
From source file:com.spotify.netty4.handler.codec.zmtp.ZMTPMessage.java
License:Apache License
/** * Create a new {@link ZMTPMessage} with the front frame removed. *///from w w w.java 2 s .com public ZMTPMessage pop() { if (this.frames.length == 0) { throw new IllegalStateException("empty message"); } final ByteBuf[] frames = new ByteBuf[this.frames.length - 1]; System.arraycopy(this.frames, 1, frames, 0, frames.length); for (final ByteBuf f : frames) { f.retain(); } return new ZMTPMessage(frames); }
From source file:com.spotify.netty4.handler.codec.zmtp.ZMTPMessageDecoder.java
License:Apache License
@Override public void content(final ChannelHandlerContext ctx, final ByteBuf data, final List<Object> out) { // Wait for more data? if (data.readableBytes() < frameLength) { return;/*ww w . j a va 2 s . c o m*/ } if (frameLength == 0) { frames.add(DELIMITER); return; } final ByteBuf frame = data.readSlice(frameLength); frame.retain(); frames.add(frame); }
From source file:com.twitter.http2.HttpStreamDecoder.java
License:Apache License
@Override protected void decode(ChannelHandlerContext ctx, Object msg, List<Object> out) throws Exception { if (msg instanceof HttpHeadersFrame) { HttpHeadersFrame httpHeadersFrame = (HttpHeadersFrame) msg; int streamId = httpHeadersFrame.getStreamId(); StreamedHttpMessage message = messageMap.get(streamId); if (message == null) { if (httpHeadersFrame.headers().contains(":status")) { // If a client receives a reply with a truncated header block, // reply with a RST_STREAM frame with error code INTERNAL_ERROR. if (httpHeadersFrame.isTruncated()) { HttpRstStreamFrame httpRstStreamFrame = new DefaultHttpRstStreamFrame(streamId, HttpErrorCode.INTERNAL_ERROR); out.add(httpRstStreamFrame); return; }//from w w w . j ava 2 s . c o m try { StreamedHttpResponse response = createHttpResponse(httpHeadersFrame); if (httpHeadersFrame.isLast()) { HttpHeaders.setContentLength(response, 0); response.getContent().close(); } else { // Response body will follow in a series of Data Frames if (!HttpHeaders.isContentLengthSet(response)) { HttpHeaders.setTransferEncodingChunked(response); } messageMap.put(streamId, response); } out.add(response); } catch (Exception e) { // If a client receives a SYN_REPLY without valid getStatus and version headers // the client must reply with a RST_STREAM frame indicating a PROTOCOL_ERROR HttpRstStreamFrame httpRstStreamFrame = new DefaultHttpRstStreamFrame(streamId, HttpErrorCode.PROTOCOL_ERROR); ctx.writeAndFlush(httpRstStreamFrame); out.add(httpRstStreamFrame); } } else { // If a client sends a request with a truncated header block, the server must // reply with a HTTP 431 REQUEST HEADER FIELDS TOO LARGE reply. if (httpHeadersFrame.isTruncated()) { httpHeadersFrame = new DefaultHttpHeadersFrame(streamId); httpHeadersFrame.setLast(true); httpHeadersFrame.headers().set(":status", HttpResponseStatus.REQUEST_HEADER_FIELDS_TOO_LARGE.code()); ctx.writeAndFlush(httpHeadersFrame); return; } try { message = createHttpRequest(httpHeadersFrame); if (httpHeadersFrame.isLast()) { message.setDecoderResult(DecoderResult.SUCCESS); message.getContent().close(); } else { // Request body will follow in a series of Data Frames messageMap.put(streamId, message); } out.add(message); } catch (Exception e) { // If a client sends a SYN_STREAM without all of the method, url (host and path), // scheme, and version headers the server must reply with a HTTP 400 BAD REQUEST reply. // Also sends HTTP 400 BAD REQUEST reply if header name/value pairs are invalid httpHeadersFrame = new DefaultHttpHeadersFrame(streamId); httpHeadersFrame.setLast(true); httpHeadersFrame.headers().set(":status", HttpResponseStatus.BAD_REQUEST.code()); ctx.writeAndFlush(httpHeadersFrame); } } } else { LastHttpContent trailer = trailerMap.remove(streamId); if (trailer == null) { trailer = new DefaultLastHttpContent(); } // Ignore trailers in a truncated HEADERS frame. if (!httpHeadersFrame.isTruncated()) { for (Map.Entry<String, String> e : httpHeadersFrame.headers()) { trailer.trailingHeaders().add(e.getKey(), e.getValue()); } } if (httpHeadersFrame.isLast()) { messageMap.remove(streamId); message.addContent(trailer); } else { trailerMap.put(streamId, trailer); } } } else if (msg instanceof HttpDataFrame) { HttpDataFrame httpDataFrame = (HttpDataFrame) msg; int streamId = httpDataFrame.getStreamId(); StreamedHttpMessage message = messageMap.get(streamId); // If message is not in map discard Data Frame. if (message == null) { return; } ByteBuf content = httpDataFrame.content(); if (content.isReadable()) { content.retain(); message.addContent(new DefaultHttpContent(content)); } if (httpDataFrame.isLast()) { messageMap.remove(streamId); message.addContent(LastHttpContent.EMPTY_LAST_CONTENT); message.setDecoderResult(DecoderResult.SUCCESS); } } else if (msg instanceof HttpRstStreamFrame) { HttpRstStreamFrame httpRstStreamFrame = (HttpRstStreamFrame) msg; int streamId = httpRstStreamFrame.getStreamId(); StreamedHttpMessage message = messageMap.remove(streamId); if (message != null) { message.getContent().close(); message.setDecoderResult(DecoderResult.failure(CANCELLATION_EXCEPTION)); } } else { // HttpGoAwayFrame out.add(msg); } }
From source file:com.uber.tchannel.codecs.CodecUtils.java
License:Open Source License
public static ByteBuf readArg(ByteBuf buffer) { if (buffer.readableBytes() < TFrame.FRAME_SIZE_LENGTH) { return null; }//from www. jav a2s. c om int len = buffer.readUnsignedShort(); if (len > buffer.readableBytes()) { throw new UnsupportedOperationException("wrong read index for args"); } else if (len == 0) { return Unpooled.EMPTY_BUFFER; } /* Read a slice, retain a copy */ ByteBuf arg = buffer.readSlice(len); arg.retain(); return arg; }
From source file:com.uber.tchannel.codecs.TFrameCodec.java
License:Open Source License
@Override protected void decode(ChannelHandlerContext ctx, ByteBuf msg, List<Object> out) throws Exception { msg.retain(); out.add(decode(msg));//from w ww. j a va2s .com }
From source file:com.yahoo.pulsar.broker.service.persistent.PersistentReplicator.java
License:Apache License
@Override public void readEntriesComplete(List<Entry> entries, Object ctx) { if (log.isDebugEnabled()) { log.debug("[{}][{} -> {}] Read entries complete of {} messages", topicName, localCluster, remoteCluster, entries.size());// www .j a v a 2s . c o m } if (readBatchSize < MaxReadBatchSize) { int newReadBatchSize = Math.min(readBatchSize * 2, MaxReadBatchSize); if (log.isDebugEnabled()) { log.debug("[{}][{} -> {}] Increasing read batch size from {} to {}", topicName, localCluster, remoteCluster, readBatchSize, newReadBatchSize); } readBatchSize = newReadBatchSize; } readFailureBackoff.reduceToHalf(); boolean atLeastOneMessageSentForReplication = false; try { // This flag is set to true when we skip atleast one local message, // in order to skip remaining local messages. boolean isLocalMessageSkippedOnce = false; for (int i = 0; i < entries.size(); i++) { Entry entry = entries.get(i); int length = entry.getLength(); ByteBuf headersAndPayload = entry.getDataBuffer(); MessageImpl msg; try { msg = MessageImpl.deserialize(headersAndPayload); } catch (Throwable t) { log.error("[{}][{} -> {}] Failed to deserialize message at {} (buffer size: {}): {}", topicName, localCluster, remoteCluster, entry.getPosition(), length, t.getMessage(), t); cursor.asyncDelete(entry.getPosition(), this, entry.getPosition()); entry.release(); continue; } if (msg.isReplicated()) { // Discard messages that were already replicated into this region cursor.asyncDelete(entry.getPosition(), this, entry.getPosition()); entry.release(); msg.recycle(); continue; } if (msg.hasReplicateTo() && !msg.getReplicateTo().contains(remoteCluster)) { if (log.isDebugEnabled()) { log.debug("[{}][{} -> {}] Skipping message at {} / msg-id: {}: replicateTo {}", topicName, localCluster, remoteCluster, entry.getPosition(), msg.getMessageId(), msg.getReplicateTo()); } cursor.asyncDelete(entry.getPosition(), this, entry.getPosition()); entry.release(); msg.recycle(); continue; } if (msg.isExpired(messageTTLInSeconds)) { msgExpired.recordEvent(0 /* no value stat */); if (log.isDebugEnabled()) { log.debug("[{}][{} -> {}] Discarding expired message at {} / msg-id: {}", topicName, localCluster, remoteCluster, entry.getPosition(), msg.getMessageId()); } cursor.asyncDelete(entry.getPosition(), this, entry.getPosition()); entry.release(); msg.recycle(); continue; } if (state.get() != State.Started || isLocalMessageSkippedOnce) { // The producer is not ready yet after having stopped/restarted. Drop the message because it will // recovered when the producer is ready if (log.isDebugEnabled()) { log.debug("[{}][{} -> {}] Dropping read message at {} because producer is not ready", topicName, localCluster, remoteCluster, entry.getPosition()); } isLocalMessageSkippedOnce = true; entry.release(); msg.recycle(); continue; } // Increment pending messages for messages produced locally pendingMessagesUpdater.incrementAndGet(this); msgOut.recordEvent(headersAndPayload.readableBytes()); msg.setReplicatedFrom(localCluster); headersAndPayload.retain(); producer.sendAsync(msg, ProducerSendCallback.create(this, entry, msg)); atLeastOneMessageSentForReplication = true; } } catch (Exception e) { log.error("[{}][{} -> {}] Unexpected exception: {}", topicName, localCluster, remoteCluster, e.getMessage(), e); } havePendingRead = FALSE; if (atLeastOneMessageSentForReplication && !isWritable()) { // Don't read any more entries until the current pending entries are persisted if (log.isDebugEnabled()) { log.debug("[{}][{} -> {}] Pausing replication traffic. at-least-one: {} is-writable: {}", topicName, localCluster, remoteCluster, atLeastOneMessageSentForReplication, isWritable()); } } else { readMoreEntries(); } }
From source file:com.yahoo.pulsar.client.impl.ProducerImpl.java
License:Apache License
public void sendAsync(Message message, SendCallback callback) { checkArgument(message instanceof MessageImpl); if (!isValidProducerState(callback)) { return;//ww w . j a va 2 s.c o m } if (!canEnqueueRequest(callback)) { return; } MessageImpl msg = (MessageImpl) message; MessageMetadata.Builder msgMetadata = msg.getMessageBuilder(); ByteBuf payload = msg.getDataBuffer(); // If compression is enabled, we are compressing, otherwise it will simply use the same buffer int uncompressedSize = payload.readableBytes(); ByteBuf compressedPayload = payload; // batch will be compressed when closed if (!isBatchMessagingEnabled()) { compressedPayload = compressor.encode(payload); payload.release(); } if (!msg.isReplicated() && msgMetadata.hasProducerName()) { callback.sendComplete( new PulsarClientException.InvalidMessageException("Cannot re-use the same message")); compressedPayload.release(); return; } try { synchronized (this) { long sequenceId = msgIdGeneratorUpdater.getAndIncrement(this); if (!msgMetadata.hasPublishTime()) { msgMetadata.setPublishTime(System.currentTimeMillis()); checkArgument(!msgMetadata.hasProducerName()); checkArgument(!msgMetadata.hasSequenceId()); msgMetadata.setProducerName(producerName); msgMetadata.setSequenceId(sequenceId); if (conf.getCompressionType() != CompressionType.NONE) { msgMetadata.setCompression(convertCompressionType(conf.getCompressionType())); msgMetadata.setUncompressedSize(uncompressedSize); } } if (isBatchMessagingEnabled()) { // handle boundary cases where message being added would exceed // batch size and/or max message size if (batchMessageContainer.hasSpaceInBatch(msg)) { batchMessageContainer.add(msg, callback); payload.release(); if (batchMessageContainer.numMessagesInBatch == maxNumMessagesInBatch || batchMessageContainer.currentBatchSizeBytes >= BatchMessageContainer.MAX_MESSAGE_BATCH_SIZE_BYTES) { batchMessageAndSend(); } } else { doBatchSendAndAdd(msg, callback, payload); } } else { ByteBuf cmd = sendMessage(producerId, sequenceId, 1, msgMetadata.build(), compressedPayload); msgMetadata.recycle(); final OpSendMsg op = OpSendMsg.create(msg, cmd, sequenceId, callback); op.setNumMessagesInBatch(1); op.setBatchSizeByte(payload.readableBytes()); pendingMessages.put(op); if (isConnected()) { // If we do have a connection, the message is sent immediately, otherwise we'll try again once a // new // connection is established cmd.retain(); cnx().ctx().channel().eventLoop().execute(WriteInEventLoopCallback.create(this, cnx(), op)); stats.updateNumMsgsSent(op.numMessagesInBatch, op.batchSizeByte); } else { if (log.isDebugEnabled()) { log.debug("[{}] [{}] Connection is not ready -- sequenceId {}", topic, producerName, sequenceId); } } } } } catch (InterruptedException ie) { Thread.currentThread().interrupt(); semaphore.release(); callback.sendComplete(new PulsarClientException(ie)); } catch (Throwable t) { semaphore.release(); callback.sendComplete(new PulsarClientException(t)); } }
From source file:com.yahoo.pulsar.client.impl.ProducerImpl.java
License:Apache License
private void batchMessageAndSend() { if (log.isDebugEnabled()) { log.debug("[{}] [{}] Batching the messages from the batch container with {} messages", topic, producerName, batchMessageContainer.numMessagesInBatch); }/*from w w w . j a v a 2 s . co m*/ OpSendMsg op = null; int numMessagesInBatch = 0; try { if (!batchMessageContainer.isEmpty()) { numMessagesInBatch = batchMessageContainer.numMessagesInBatch; ByteBuf compressedPayload = batchMessageContainer.getCompressedBatchMetadataAndPayload(); long sequenceId = batchMessageContainer.sequenceId; ByteBuf cmd = sendMessage(producerId, sequenceId, batchMessageContainer.numMessagesInBatch, batchMessageContainer.setBatchAndBuild(), compressedPayload); op = OpSendMsg.create(batchMessageContainer.messages, cmd, sequenceId, batchMessageContainer.firstCallback); op.setNumMessagesInBatch(batchMessageContainer.numMessagesInBatch); op.setBatchSizeByte(batchMessageContainer.currentBatchSizeBytes); batchMessageContainer.clear(); pendingMessages.put(op); if (isConnected()) { // If we do have a connection, the message is sent immediately, otherwise we'll try again once a new // connection is established cmd.retain(); cnx().ctx().channel().eventLoop().execute(WriteInEventLoopCallback.create(this, cnx(), op)); stats.updateNumMsgsSent(numMessagesInBatch, op.batchSizeByte); } else { if (log.isDebugEnabled()) { log.debug("[{}] [{}] Connection is not ready -- sequenceId {}", topic, producerName, sequenceId); } } } } catch (InterruptedException ie) { Thread.currentThread().interrupt(); semaphore.release(numMessagesInBatch); if (op != null) { op.callback.sendComplete(new PulsarClientException(ie)); } } catch (Throwable t) { semaphore.release(numMessagesInBatch); log.warn("[{}] [{}] error while closing out batch -- {}", topic, producerName, t); if (op != null) { op.callback.sendComplete(new PulsarClientException(t)); } } }
From source file:com.yahoo.pulsar.common.api.Commands.java
License:Apache License
public static ByteBuf deSerializeSingleMessageInBatch(ByteBuf uncompressedPayload, PulsarApi.SingleMessageMetadata.Builder singleMessageMetadataBuilder, int index, int batchSize) throws IOException { int singleMetaSize = (int) uncompressedPayload.readUnsignedInt(); int writerIndex = uncompressedPayload.writerIndex(); int beginIndex = uncompressedPayload.readerIndex() + singleMetaSize; uncompressedPayload.writerIndex(beginIndex); ByteBufCodedInputStream stream = ByteBufCodedInputStream.get(uncompressedPayload); PulsarApi.SingleMessageMetadata singleMessageMetadata = singleMessageMetadataBuilder.mergeFrom(stream, null) .build();/*from w ww.j a va2s . com*/ int singleMessagePayloadSize = singleMessageMetadata.getPayloadSize(); uncompressedPayload.markReaderIndex(); ByteBuf singleMessagePayload = uncompressedPayload.slice(uncompressedPayload.readerIndex(), singleMessagePayloadSize); singleMessagePayload.retain(); uncompressedPayload.writerIndex(writerIndex); uncompressedPayload.resetReaderIndex(); // reader now points to beginning of payload read; so move it past message payload just read if (index < batchSize) { uncompressedPayload.readerIndex(uncompressedPayload.readerIndex() + singleMessagePayloadSize); } return singleMessagePayload; }
From source file:com.yahoo.pulsar.common.api.DoubleByteBuf.java
License:Apache License
public static ByteBuf get(ByteBuf b1, ByteBuf b2) { DoubleByteBuf buf = RECYCLER.get();/*from w w w. ja v a 2s.c o m*/ buf.setRefCnt(1); // Make sure the buffers are not deallocated as long as we hold them. Also, buffers can get retained/releases // outside of DoubleByteBuf scope buf.b1 = b1.retain(); buf.b2 = b2.retain(); buf.setIndex(0, b1.readableBytes() + b2.readableBytes()); return toLeakAwareBuffer(buf); }