List of usage examples for io.netty.buffer ByteBuf writerIndex
public abstract ByteBuf writerIndex(int writerIndex);
From source file:org.apache.bookkeeper.util.ByteBufListTest.java
License:Apache License
@Test public void testEncoder() throws Exception { ByteBuf b1 = PooledByteBufAllocator.DEFAULT.heapBuffer(128, 128); b1.writerIndex(b1.capacity()); ByteBuf b2 = PooledByteBufAllocator.DEFAULT.heapBuffer(128, 128); b2.writerIndex(b2.capacity());//from w ww . jav a 2 s. co m ByteBufList buf = ByteBufList.get(b1, b2); ChannelHandlerContext ctx = new MockChannelHandlerContext(); ByteBufList.ENCODER.write(ctx, buf, null); assertEquals(buf.refCnt(), 0); assertEquals(b1.refCnt(), 0); assertEquals(b2.refCnt(), 0); }
From source file:org.apache.distributedlog.io.LZ4CompressionCodec.java
License:Apache License
@Override public ByteBuf compress(ByteBuf uncompressed, int headerLen) { checkNotNull(uncompressed);//from w w w . j ava2 s.c o m checkArgument(uncompressed.readableBytes() > 0); int uncompressedLen = uncompressed.readableBytes(); int maxLen = compressor.maxCompressedLength(uncompressedLen); // get the source bytebuffer ByteBuffer uncompressedNio = uncompressed.nioBuffer(uncompressed.readerIndex(), uncompressedLen); ByteBuf compressed = PooledByteBufAllocator.DEFAULT.buffer(maxLen + headerLen, maxLen + headerLen); ByteBuffer compressedNio = compressed.nioBuffer(headerLen, maxLen); int compressedLen = compressor.compress(uncompressedNio, uncompressedNio.position(), uncompressedLen, compressedNio, compressedNio.position(), maxLen); compressed.writerIndex(compressedLen + headerLen); return compressed; }
From source file:org.apache.distributedlog.io.LZ4CompressionCodec.java
License:Apache License
@Override // length parameter is ignored here because of the way the fastDecompressor works. public ByteBuf decompress(ByteBuf compressed, int decompressedSize) { checkNotNull(compressed);/* w w w. j a v a 2 s. c om*/ checkArgument(compressed.readableBytes() >= 0); checkArgument(decompressedSize >= 0); ByteBuf uncompressed = PooledByteBufAllocator.DEFAULT.buffer(decompressedSize, decompressedSize); ByteBuffer uncompressedNio = uncompressed.nioBuffer(0, decompressedSize); ByteBuffer compressedNio = compressed.nioBuffer(compressed.readerIndex(), compressed.readableBytes()); decompressor.decompress(compressedNio, compressedNio.position(), uncompressedNio, uncompressedNio.position(), uncompressedNio.remaining()); uncompressed.writerIndex(decompressedSize); return uncompressed; }
From source file:org.apache.drill.exec.vector.VectorTrimmer.java
License:Apache License
public static void trim(ByteBuf data, int idx) { data.writerIndex(idx); if (data instanceof DrillBuf) { // data.capacity(idx); data.writerIndex(idx);//from w ww . ja va 2 s. c o m } }
From source file:org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.java
License:Apache License
private <A> void flush0(final A attachment, final CompletionHandler<Long, ? super A> handler, boolean syncBlock) { if (state != State.STREAMING) { handler.failed(new IOException("stream already broken"), attachment); return;/* ww w . j av a2s . c o m*/ } int dataLen = buf.readableBytes(); final long ackedLength = nextPacketOffsetInBlock + dataLen; if (ackedLength == locatedBlock.getBlock().getNumBytes()) { // no new data, just return handler.completed(locatedBlock.getBlock().getNumBytes(), attachment); return; } Promise<Void> promise = eventLoop.newPromise(); promise.addListener(new FutureListener<Void>() { @Override public void operationComplete(Future<Void> future) throws Exception { if (future.isSuccess()) { locatedBlock.getBlock().setNumBytes(ackedLength); handler.completed(ackedLength, attachment); } else { handler.failed(future.cause(), attachment); } } }); Callback c = waitingAckQueue.peekLast(); if (c != null && ackedLength == c.ackedLength) { // just append it to the tail of waiting ack queue,, do not issue new hflush request. waitingAckQueue.addLast(new Callback(promise, ackedLength, Collections.<Channel>emptyList())); return; } int chunkLen = summer.getBytesPerChecksum(); int trailingPartialChunkLen = dataLen % chunkLen; int numChecks = dataLen / chunkLen + (trailingPartialChunkLen != 0 ? 1 : 0); int checksumLen = numChecks * summer.getChecksumSize(); ByteBuf checksumBuf = alloc.directBuffer(checksumLen); summer.calculateChunkedSums(buf.nioBuffer(), checksumBuf.nioBuffer(0, checksumLen)); checksumBuf.writerIndex(checksumLen); PacketHeader header = new PacketHeader(4 + checksumLen + dataLen, nextPacketOffsetInBlock, nextPacketSeqno, false, dataLen, syncBlock); int headerLen = header.getSerializedSize(); ByteBuf headerBuf = alloc.buffer(headerLen); header.putInBuffer(headerBuf.nioBuffer(0, headerLen)); headerBuf.writerIndex(headerLen); waitingAckQueue.addLast(new Callback(promise, ackedLength, datanodeList)); for (Channel ch : datanodeList) { ch.write(headerBuf.duplicate().retain()); ch.write(checksumBuf.duplicate().retain()); ch.writeAndFlush(buf.duplicate().retain()); } checksumBuf.release(); headerBuf.release(); ByteBuf newBuf = alloc.directBuffer().ensureWritable(trailingPartialChunkLen); if (trailingPartialChunkLen != 0) { buf.readerIndex(dataLen - trailingPartialChunkLen).readBytes(newBuf, trailingPartialChunkLen); } buf.release(); this.buf = newBuf; nextPacketOffsetInBlock += dataLen - trailingPartialChunkLen; nextPacketSeqno++; }
From source file:org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.java
License:Apache License
private void endBlock(Promise<Void> promise, long size) { if (state != State.STREAMING) { promise.tryFailure(new IOException("stream already broken")); return;// w ww. j av a 2s . c o m } if (!waitingAckQueue.isEmpty()) { promise.tryFailure(new IllegalStateException("should call flush first before calling close")); return; } state = State.CLOSING; PacketHeader header = new PacketHeader(4, size, nextPacketSeqno, true, 0, false); buf.release(); buf = null; int headerLen = header.getSerializedSize(); ByteBuf headerBuf = alloc.buffer(headerLen); header.putInBuffer(headerBuf.nioBuffer(0, headerLen)); headerBuf.writerIndex(headerLen); waitingAckQueue.add(new Callback(promise, size, datanodeList)); for (Channel ch : datanodeList) { ch.writeAndFlush(headerBuf.duplicate().retain()); } headerBuf.release(); }
From source file:org.apache.hadoop.hbase.util.FanOutOneBlockAsyncDFSOutput.java
License:Apache License
private void setupReceiver(final int timeoutMs) { SimpleChannelInboundHandler<PipelineAckProto> ackHandler = new SimpleChannelInboundHandler<PipelineAckProto>() { @Override//from w w w.j av a 2s . c o m public boolean isSharable() { return true; } @Override protected void channelRead0(final ChannelHandlerContext ctx, PipelineAckProto ack) throws Exception { final Status reply = getStatus(ack); if (reply != Status.SUCCESS) { failed(ctx.channel(), new Supplier<Throwable>() { @Override public Throwable get() { return new IOException("Bad response " + reply + " for block " + locatedBlock.getBlock() + " from datanode " + ctx.channel().remoteAddress()); } }); return; } if (PipelineAck.isRestartOOBStatus(reply)) { failed(ctx.channel(), new Supplier<Throwable>() { @Override public Throwable get() { return new IOException("Restart response " + reply + " for block " + locatedBlock.getBlock() + " from datanode " + ctx.channel().remoteAddress()); } }); return; } if (ack.getSeqno() == HEART_BEAT_SEQNO) { return; } completed(ctx.channel()); } @Override public void channelInactive(final ChannelHandlerContext ctx) throws Exception { failed(ctx.channel(), new Supplier<Throwable>() { @Override public Throwable get() { return new IOException("Connection to " + ctx.channel().remoteAddress() + " closed"); } }); } @Override public void exceptionCaught(ChannelHandlerContext ctx, final Throwable cause) throws Exception { failed(ctx.channel(), new Supplier<Throwable>() { @Override public Throwable get() { return cause; } }); } @Override public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception { if (evt instanceof IdleStateEvent) { IdleStateEvent e = (IdleStateEvent) evt; if (e.state() == IdleState.READER_IDLE) { failed(ctx.channel(), new Supplier<Throwable>() { @Override public Throwable get() { return new IOException("Timeout(" + timeoutMs + "ms) waiting for response"); } }); } else if (e.state() == IdleState.WRITER_IDLE) { PacketHeader heartbeat = new PacketHeader(4, 0, HEART_BEAT_SEQNO, false, 0, false); int len = heartbeat.getSerializedSize(); ByteBuf buf = alloc.buffer(len); heartbeat.putInBuffer(buf.nioBuffer(0, len)); buf.writerIndex(len); ctx.channel().writeAndFlush(buf); } return; } super.userEventTriggered(ctx, evt); } }; for (Channel ch : datanodeList) { ch.pipeline().addLast(new IdleStateHandler(timeoutMs, timeoutMs / 2, 0, TimeUnit.MILLISECONDS), new ProtobufVarint32FrameDecoder(), new ProtobufDecoder(PipelineAckProto.getDefaultInstance()), ackHandler); ch.config().setAutoRead(true); } }
From source file:org.apache.pulsar.client.impl.MessageCrypto.java
License:Apache License
public synchronized ByteBuf encrypt(Set<String> encKeys, CryptoKeyReader keyReader, MessageMetadata.Builder msgMetadata, ByteBuf payload) throws PulsarClientException { if (encKeys.isEmpty()) { return payload; }/* w ww.java 2 s .c om*/ // Update message metadata with encrypted data key for (String keyName : encKeys) { if (encryptedDataKeyMap.get(keyName) == null) { // Attempt to load the key. This will allow us to load keys as soon as // a new key is added to producer config addPublicKeyCipher(keyName, keyReader); } EncryptionKeyInfo keyInfo = encryptedDataKeyMap.get(keyName); if (keyInfo != null) { if (keyInfo.getMetadata() != null && !keyInfo.getMetadata().isEmpty()) { List<KeyValue> kvList = new ArrayList<KeyValue>(); keyInfo.getMetadata().forEach((key, value) -> { kvList.add(KeyValue.newBuilder().setKey(key).setValue(value).build()); }); msgMetadata.addEncryptionKeys(EncryptionKeys.newBuilder().setKey(keyName) .setValue(ByteString.copyFrom(keyInfo.getKey())).addAllMetadata(kvList).build()); } else { msgMetadata.addEncryptionKeys(EncryptionKeys.newBuilder().setKey(keyName) .setValue(ByteString.copyFrom(keyInfo.getKey())).build()); } } else { // We should never reach here. log.error("{} Failed to find encrypted Data key for key {}.", logCtx, keyName); } } // Create gcm param // TODO: Replace random with counter and periodic refreshing based on timer/counter value secureRandom.nextBytes(iv); GCMParameterSpec gcmParam = new GCMParameterSpec(tagLen, iv); // Update message metadata with encryption param msgMetadata.setEncryptionParam(ByteString.copyFrom(iv)); ByteBuf targetBuf = null; try { // Encrypt the data cipher.init(Cipher.ENCRYPT_MODE, dataKey, gcmParam); ByteBuffer sourceNioBuf = payload.nioBuffer(payload.readerIndex(), payload.readableBytes()); int maxLength = cipher.getOutputSize(payload.readableBytes()); targetBuf = PooledByteBufAllocator.DEFAULT.buffer(maxLength, maxLength); ByteBuffer targetNioBuf = targetBuf.nioBuffer(0, maxLength); int bytesStored = cipher.doFinal(sourceNioBuf, targetNioBuf); targetBuf.writerIndex(bytesStored); } catch (IllegalBlockSizeException | BadPaddingException | InvalidKeyException | InvalidAlgorithmParameterException | ShortBufferException e) { targetBuf.release(); log.error("{} Failed to encrypt message. {}", logCtx, e); throw new PulsarClientException.CryptoException(e.getMessage()); } payload.release(); return targetBuf; }
From source file:org.apache.pulsar.client.impl.MessageCrypto.java
License:Apache License
private ByteBuf decryptData(SecretKey dataKeySecret, MessageMetadata msgMetadata, ByteBuf payload) { // unpack iv and encrypted data ByteString ivString = msgMetadata.getEncryptionParam(); ivString.copyTo(iv, 0);//from w ww . j a v a2s. co m GCMParameterSpec gcmParams = new GCMParameterSpec(tagLen, iv); ByteBuf targetBuf = null; try { cipher.init(Cipher.DECRYPT_MODE, dataKeySecret, gcmParams); ByteBuffer sourceNioBuf = payload.nioBuffer(payload.readerIndex(), payload.readableBytes()); int maxLength = cipher.getOutputSize(payload.readableBytes()); targetBuf = PooledByteBufAllocator.DEFAULT.buffer(maxLength, maxLength); ByteBuffer targetNioBuf = targetBuf.nioBuffer(0, maxLength); int decryptedSize = cipher.doFinal(sourceNioBuf, targetNioBuf); targetBuf.writerIndex(decryptedSize); } catch (InvalidKeyException | InvalidAlgorithmParameterException | IllegalBlockSizeException | BadPaddingException | ShortBufferException e) { log.error("{} Failed to decrypt message {}", logCtx, e.getMessage()); if (targetBuf != null) { targetBuf.release(); targetBuf = null; } } return targetBuf; }
From source file:org.apache.pulsar.common.api.ByteBufPairTest.java
License:Apache License
@Test public void testDoubleByteBuf() throws Exception { ByteBuf b1 = PooledByteBufAllocator.DEFAULT.heapBuffer(128, 128); b1.writerIndex(b1.capacity()); ByteBuf b2 = PooledByteBufAllocator.DEFAULT.heapBuffer(128, 128); b2.writerIndex(b2.capacity());//from w w w .ja va2s.co m ByteBufPair buf = ByteBufPair.get(b1, b2); assertEquals(buf.readableBytes(), 256); assertEquals(buf.getFirst(), b1); assertEquals(buf.getSecond(), b2); assertEquals(buf.refCnt(), 1); assertEquals(b1.refCnt(), 1); assertEquals(b2.refCnt(), 1); buf.release(); assertEquals(buf.refCnt(), 0); assertEquals(b1.refCnt(), 0); assertEquals(b2.refCnt(), 0); }