List of usage examples for io.netty.buffer Unpooled EMPTY_BUFFER
ByteBuf EMPTY_BUFFER
To view the source code for io.netty.buffer Unpooled EMPTY_BUFFER.
Click Source Link
From source file:org.acmsl.katas.antlr4netty.InterpreterServerChannelHandler.java
License:Open Source License
/** * {@inheritDoc}//from w w w. ja v a 2 s.c o m */ @Override public void channelReadComplete(final ChannelHandlerContext ctx) throws Exception { ctx.writeAndFlush(Unpooled.EMPTY_BUFFER).addListener(ChannelFutureListener.CLOSE); }
From source file:org.aotorrent.common.Piece.java
License:Apache License
public ByteBuf read(int offset, int length) throws IOException { //LOGGER.debug("READ is complete = " + isComplete()); if (isComplete()) { ByteBuf bb = (softBuffer != null) ? softBuffer.get() : null; if (bb == null) { bb = torrent.getFileStorage().read(index, pieceLength); softBuffer = new SoftReference<>(bb); torrent.missCache();/*from ww w . ja va 2 s . c om*/ } else { torrent.hitCache(); } ByteBuf buf = Unpooled.buffer(length, length); bb.readerIndex(offset); bb.readBytes(buf); return buf; } else { return Unpooled.EMPTY_BUFFER; } }
From source file:org.apache.flink.runtime.io.network.netty.OutboundEnvelopeEncoder.java
License:Apache License
@Override public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception { Envelope env = (Envelope) msg;/*from w ww.j a va 2 s. co m*/ ByteBuf buf = ctx.alloc().directBuffer(); encode(env, buf); if (buf.isReadable()) { ctx.write(buf, promise); } else { buf.release(); ctx.write(Unpooled.EMPTY_BUFFER, promise); } }
From source file:org.apache.hadoop.hdfs.server.datanode.web.SimpleHttpProxyHandler.java
License:Apache License
private static void closeOnFlush(Channel ch) { if (ch.isActive()) { ch.writeAndFlush(Unpooled.EMPTY_BUFFER).addListener(ChannelFutureListener.CLOSE); }/*from www. j a v a 2 s. c o m*/ }
From source file:org.apache.hadoop.hdfs.server.datanode.web.webhdfs.WebHdfsHandler.java
License:Apache License
private static void writeContinueHeader(ChannelHandlerContext ctx) { DefaultHttpResponse r = new DefaultFullHttpResponse(HTTP_1_1, CONTINUE, Unpooled.EMPTY_BUFFER); ctx.writeAndFlush(r);// w w w.jav a2 s . c o m }
From source file:org.apache.jackrabbit.oak.plugins.segment.standby.server.StandbyServerHandler.java
License:Apache License
@Override public void channelRead0(ChannelHandlerContext ctx, String payload) throws Exception { state = "got message"; String request = Messages.extractMessageFrom(payload); InetSocketAddress client = (InetSocketAddress) ctx.channel().remoteAddress(); if (!clientAllowed(client)) { log.warn("Got request from client " + client + " which is not in the allowed ip ranges! Request will be ignored."); } else {//from w ww .j a v a 2 s . com String clientID = Messages.extractClientFrom(payload); observer.gotMessageFrom(clientID, request, client); if (Messages.GET_HEAD.equalsIgnoreCase(request)) { RecordId r = headId(); if (r != null) { ctx.writeAndFlush(r); return; } } else if (request.startsWith(Messages.GET_SEGMENT)) { String sid = request.substring(Messages.GET_SEGMENT.length()); log.debug("request segment id {}", sid); UUID uuid = UUID.fromString(sid); Segment s = null; for (int i = 0; i < 10; i++) { try { s = store.readSegment(new SegmentId(store.getTracker(), uuid.getMostSignificantBits(), uuid.getLeastSignificantBits())); } catch (IllegalRepositoryStateException e) { // segment not found log.debug("waiting for segment. Got exception: " + e.getMessage()); TimeUnit.MILLISECONDS.sleep(2000); } if (s != null) break; } if (s != null) { log.debug("sending segment " + sid + " to " + client); ctx.writeAndFlush(s); observer.didSendSegmentBytes(clientID, s.size()); return; } } else if (request.startsWith(Messages.GET_BLOB)) { String bid = request.substring(Messages.GET_BLOB.length()); log.debug("request blob id {}", bid); Blob b = store.readBlob(bid); log.debug("sending blob " + bid + " to " + client); ctx.writeAndFlush(b); observer.didSendBinariesBytes(clientID, Math.max(0, (int) b.length())); return; } else { log.warn("Unknown request {}, ignoring.", request); } } ctx.writeAndFlush(Unpooled.EMPTY_BUFFER); }
From source file:org.apache.pulsar.client.impl.CompactedOutBatchMessageTest.java
License:Apache License
@Test public void testCompactedOutMessages() throws Exception { final String topic1 = "persistent://my-property/my-ns/my-topic"; MessageMetadata metadata = MessageMetadata.newBuilder().setProducerName("foobar").setSequenceId(1) .setPublishTime(1).setNumMessagesInBatch(3).build(); // build a buffer with 4 messages, first and last compacted out ByteBuf batchBuffer = Unpooled.buffer(1000); Commands.serializeSingleMessageInBatchWithPayload( SingleMessageMetadata.newBuilder().setCompactedOut(true).setPartitionKey("key1"), Unpooled.EMPTY_BUFFER, batchBuffer); Commands.serializeSingleMessageInBatchWithPayload( SingleMessageMetadata.newBuilder().setCompactedOut(true).setPartitionKey("key2"), Unpooled.EMPTY_BUFFER, batchBuffer); Commands.serializeSingleMessageInBatchWithPayload( SingleMessageMetadata.newBuilder().setCompactedOut(false).setPartitionKey("key3"), Unpooled.EMPTY_BUFFER, batchBuffer); Commands.serializeSingleMessageInBatchWithPayload( SingleMessageMetadata.newBuilder().setCompactedOut(true).setPartitionKey("key4"), Unpooled.EMPTY_BUFFER, batchBuffer); try (ConsumerImpl<byte[]> consumer = (ConsumerImpl<byte[]>) pulsarClient.newConsumer().topic(topic1) .subscriptionName("my-subscriber-name").subscribe()) { // shove it in the sideways consumer.receiveIndividualMessagesFromBatch(metadata, 0, batchBuffer, MessageIdData.newBuilder().setLedgerId(1234).setEntryId(567).build(), consumer.cnx()); Message<?> m = consumer.receive(); assertEquals(((BatchMessageIdImpl) m.getMessageId()).getLedgerId(), 1234); assertEquals(((BatchMessageIdImpl) m.getMessageId()).getEntryId(), 567); assertEquals(((BatchMessageIdImpl) m.getMessageId()).getBatchIndex(), 2); assertEquals(m.getKey(), "key3"); assertEquals(consumer.numMessagesInQueue(), 0); }/* w ww. j a v a 2 s. c o m*/ }
From source file:org.apache.pulsar.client.impl.RawBatchConverter.java
License:Apache License
/** * Take a batched message and a filter, and returns a message with the only the sub-messages * which match the filter. Returns an empty optional if no messages match. * * This takes ownership of the passes in message, and if the returned optional is not empty, * the ownership of that message is returned also. *///from w w w. ja v a2 s . c om public static Optional<RawMessage> rebatchMessage(RawMessage msg, BiPredicate<String, MessageId> filter) throws IOException { checkArgument(msg.getMessageIdData().getBatchIndex() == -1); ByteBuf payload = msg.getHeadersAndPayload(); MessageMetadata metadata = Commands.parseMessageMetadata(payload); ByteBuf batchBuffer = PooledByteBufAllocator.DEFAULT.buffer(payload.capacity()); CompressionType compressionType = metadata.getCompression(); CompressionCodec codec = CompressionCodecProvider.getCompressionCodec(compressionType); int uncompressedSize = metadata.getUncompressedSize(); ByteBuf uncompressedPayload = codec.decode(payload, uncompressedSize); try { int batchSize = metadata.getNumMessagesInBatch(); int messagesRetained = 0; SingleMessageMetadata.Builder emptyMetadataBuilder = SingleMessageMetadata.newBuilder() .setCompactedOut(true); for (int i = 0; i < batchSize; i++) { SingleMessageMetadata.Builder singleMessageMetadataBuilder = SingleMessageMetadata.newBuilder(); ByteBuf singleMessagePayload = Commands.deSerializeSingleMessageInBatch(uncompressedPayload, singleMessageMetadataBuilder, 0, batchSize); MessageId id = new BatchMessageIdImpl(msg.getMessageIdData().getLedgerId(), msg.getMessageIdData().getEntryId(), msg.getMessageIdData().getPartition(), i); if (!singleMessageMetadataBuilder.hasPartitionKey()) { messagesRetained++; Commands.serializeSingleMessageInBatchWithPayload(singleMessageMetadataBuilder, singleMessagePayload, batchBuffer); } else if (filter.test(singleMessageMetadataBuilder.getPartitionKey(), id) && singleMessagePayload.readableBytes() > 0) { messagesRetained++; Commands.serializeSingleMessageInBatchWithPayload(singleMessageMetadataBuilder, singleMessagePayload, batchBuffer); } else { Commands.serializeSingleMessageInBatchWithPayload(emptyMetadataBuilder, Unpooled.EMPTY_BUFFER, batchBuffer); } singleMessageMetadataBuilder.recycle(); singleMessagePayload.release(); } emptyMetadataBuilder.recycle(); if (messagesRetained > 0) { int newUncompressedSize = batchBuffer.readableBytes(); ByteBuf compressedPayload = codec.encode(batchBuffer); MessageMetadata.Builder metadataBuilder = metadata.toBuilder(); metadataBuilder.setUncompressedSize(newUncompressedSize); MessageMetadata newMetadata = metadataBuilder.build(); ByteBuf metadataAndPayload = Commands.serializeMetadataAndPayload(Commands.ChecksumType.Crc32c, newMetadata, compressedPayload); Optional<RawMessage> result = Optional .of(new RawMessageImpl(msg.getMessageIdData(), metadataAndPayload)); metadataBuilder.recycle(); newMetadata.recycle(); metadataAndPayload.release(); compressedPayload.release(); return result; } else { return Optional.empty(); } } finally { batchBuffer.release(); metadata.recycle(); msg.close(); } }
From source file:org.apache.pulsar.client.impl.RawMessageImpl.java
License:Apache License
@Override public void close() { headersAndPayload.release(); headersAndPayload = Unpooled.EMPTY_BUFFER; }
From source file:org.apache.pulsar.compaction.CompactedTopicTest.java
License:Apache License
/** * Build a compacted ledger, and return the id of the ledger, the position of the different * entries in the ledger, and a list of gaps, and the entry which should be returned after the gap. *//*from w w w . ja va 2 s.c om*/ private Triple<Long, List<Pair<MessageIdData, Long>>, List<Pair<MessageIdData, Long>>> buildCompactedLedger( BookKeeper bk, int count) throws Exception { LedgerHandle lh = bk.createLedger(1, 1, Compactor.COMPACTED_TOPIC_LEDGER_DIGEST_TYPE, Compactor.COMPACTED_TOPIC_LEDGER_PASSWORD); List<Pair<MessageIdData, Long>> positions = new ArrayList<>(); List<Pair<MessageIdData, Long>> idsInGaps = new ArrayList<>(); AtomicLong ledgerIds = new AtomicLong(10L); AtomicLong entryIds = new AtomicLong(0L); CompletableFuture.allOf(IntStream.range(0, count).mapToObj((i) -> { List<MessageIdData> idsInGap = new ArrayList<MessageIdData>(); if (r.nextInt(10) == 1) { long delta = r.nextInt(10) + 1; idsInGap.add(MessageIdData.newBuilder().setLedgerId(ledgerIds.get()).setEntryId(entryIds.get() + 1) .build()); ledgerIds.addAndGet(delta); entryIds.set(0); } long delta = r.nextInt(5); if (delta != 0) { idsInGap.add(MessageIdData.newBuilder().setLedgerId(ledgerIds.get()).setEntryId(entryIds.get() + 1) .build()); } MessageIdData id = MessageIdData.newBuilder().setLedgerId(ledgerIds.get()) .setEntryId(entryIds.addAndGet(delta + 1)).build(); @Cleanup RawMessage m = new RawMessageImpl(id, Unpooled.EMPTY_BUFFER); CompletableFuture<Void> f = new CompletableFuture<>(); ByteBuf buffer = m.serialize(); lh.asyncAddEntry(buffer, (rc, ledger, eid, ctx) -> { if (rc != BKException.Code.OK) { f.completeExceptionally(BKException.create(rc)); } else { positions.add(Pair.of(id, eid)); idsInGap.forEach((gid) -> idsInGaps.add(Pair.of(gid, eid))); f.complete(null); } }, null); return f; }).toArray(CompletableFuture[]::new)).get(); lh.close(); return Triple.of(lh.getId(), positions, idsInGaps); }