List of usage examples for io.netty.channel ChannelHandlerContext alloc
ByteBufAllocator alloc();
From source file:org.apache.hadoop.hbase.security.NettyHBaseSaslRpcClientHandler.java
License:Apache License
private void writeResponse(ChannelHandlerContext ctx, byte[] response) { if (LOG.isDebugEnabled()) { LOG.debug("Will send token of size " + response.length + " from initSASLContext."); }/* w w w.j a va2 s. c o m*/ ctx.writeAndFlush(ctx.alloc().buffer(4 + response.length).writeInt(response.length).writeBytes(response)); }
From source file:org.apache.hadoop.hbase.security.SaslClientHandler.java
License:Apache License
/** * Write SASL token//from w ww . j a v a 2s . c o m * @param ctx to write to * @param saslToken to write */ private void writeSaslToken(final ChannelHandlerContext ctx, byte[] saslToken) { ByteBuf b = ctx.alloc().buffer(4 + saslToken.length); b.writeInt(saslToken.length); b.writeBytes(saslToken, 0, saslToken.length); ctx.writeAndFlush(b).addListener(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture future) throws Exception { if (!future.isSuccess()) { exceptionCaught(ctx, future.cause()); } } }); }
From source file:org.apache.hadoop.hbase.security.SaslWrapHandler.java
License:Apache License
@Override public void flush(ChannelHandlerContext ctx) throws Exception { if (queue.isEmpty()) { return;/* w w w .j av a 2s . c om*/ } ByteBuf buf = null; try { ChannelPromise promise = ctx.newPromise(); int readableBytes = queue.readableBytes(); buf = queue.remove(readableBytes, promise); byte[] bytes = new byte[readableBytes]; buf.readBytes(bytes); byte[] wrapperBytes = saslClient.wrap(bytes, 0, bytes.length); ChannelPromise lenPromise = ctx.newPromise(); ctx.write(ctx.alloc().buffer(4).writeInt(wrapperBytes.length), lenPromise); ChannelPromise contentPromise = ctx.newPromise(); ctx.write(Unpooled.wrappedBuffer(wrapperBytes), contentPromise); PromiseCombiner combiner = new PromiseCombiner(); combiner.addAll(lenPromise, contentPromise); combiner.finish(promise); ctx.flush(); } finally { if (buf != null) { ReferenceCountUtil.safeRelease(buf); } } }
From source file:org.apache.hadoop.hdfs.server.datanode.web.dtp.DtpHttp2FrameListener.java
License:Apache License
@Override public void onHeadersRead(ChannelHandlerContext ctx, int streamId, Http2Headers headers, int streamDependency, short weight, boolean exclusive, int padding, boolean endStream) throws Http2Exception { encoder.writeHeaders(ctx, streamId, new DefaultHttp2Headers().status(HttpResponseStatus.OK.codeAsText()), 0, false, ctx.newPromise());/*w w w. ja v a 2 s . c o m*/ encoder.writeData(ctx, streamId, ctx.alloc().buffer().writeBytes("HTTP/2 DTP".getBytes(StandardCharsets.UTF_8)), 0, true, ctx.newPromise()); }
From source file:org.apache.hadoop.hive.llap.ChannelOutputStream.java
License:Apache License
public ChannelOutputStream(ChannelHandlerContext chc, String id, int bufSize, int maxOutstandingWrites) { this.chc = chc; this.id = id; this.bufSize = bufSize; this.buf = chc.alloc().buffer(bufSize); this.maxPendingWrites = maxOutstandingWrites; }
From source file:org.apache.hama.ipc.AsyncServer.java
License:Apache License
/** * This is a wrapper around {@link WritableByteChannel#write(ByteBuffer)}. If * the amount of data is large, it writes to channel in smaller chunks. This * is to avoid jdk from creating many direct buffers as the size of buffer * increases. This also minimizes extra copies in NIO layer as a result of * multiple write operations required to write a large buffer. * //from www.j a v a 2 s. co m * @see WritableByteChannel#write(ByteBuffer) * * @param ctx * @param buffer */ private void channelWrite(ChannelHandlerContext ctx, ByteBuffer buffer) { try { ByteBuf buf = ctx.alloc().buffer(); buf.writeBytes(buffer.array()); ctx.writeAndFlush(buf); } catch (Throwable e) { e.printStackTrace(); } }
From source file:org.apache.hyracks.http.server.ChunkedNettyOutputStream.java
License:Apache License
public ChunkedNettyOutputStream(ChannelHandlerContext ctx, int chunkSize, ChunkedResponse response) { this.response = response; this.ctx = ctx; buffer = ctx.alloc().buffer(chunkSize); // register listener for channel closed ctx.channel().closeFuture().addListener(futureListener -> { synchronized (ChunkedNettyOutputStream.this) { ChunkedNettyOutputStream.this.notifyAll(); }//from w w w .ja va2 s . c om }); }
From source file:org.apache.hyracks.http.server.HttpRequestCapacityController.java
License:Apache License
public static void reject(ChannelHandlerContext ctx) { HttpResponseEncoder encoder = new HttpResponseEncoder(); ChannelPromise promise = ctx.newPromise(); promise.addListener(ChannelFutureListener.CLOSE); DefaultFullHttpResponse response = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.SERVICE_UNAVAILABLE); try {/*from w w w .j a v a 2 s. co m*/ encoder.write(ctx, response, ctx.voidPromise()); ctx.writeAndFlush(ctx.alloc().buffer(0), promise); } catch (Throwable th) {//NOSONAR try { LOGGER.log(Level.SEVERE, "Failure during request rejection", th); } catch (Throwable loggingFailure) {//NOSONAR } PromiseNotificationUtil.tryFailure(promise, th, null); } }
From source file:org.apache.tajo.rpc.MonitorClientHandler.java
License:Apache License
@Override public void channelActive(ChannelHandlerContext ctx) throws Exception { // Initialize the message. ping = ctx.alloc().buffer(RpcConstants.PING_PACKET.length()) .writeBytes(RpcConstants.PING_PACKET.getBytes(Charset.defaultCharset())); IdleStateHandler handler = ctx.pipeline().get(IdleStateHandler.class); if (handler != null && handler.getWriterIdleTimeInMillis() > 0) { enableMonitor = true;/*from w w w. j a v a 2s .com*/ } super.channelActive(ctx); }
From source file:org.apache.tajo.rpc.MonitorServerHandler.java
License:Apache License
@Override public void channelActive(ChannelHandlerContext ctx) throws Exception { // Initialize the message. ping = ctx.alloc().directBuffer(4).writeBytes(RpcConstants.PING_PACKET.getBytes(Charset.defaultCharset())); super.channelActive(ctx); }