List of usage examples for io.netty.channel ChannelHandlerContext pipeline
ChannelPipeline pipeline();
From source file:org.apache.giraph.comm.netty.handler.SaslServerHandler.java
License:Apache License
@Override public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { if (LOG.isDebugEnabled()) { LOG.debug("messageReceived: Got " + msg.getClass()); }//from ww w. j ava 2 s. co m WritableRequest writableRequest = (WritableRequest) msg; // Simulate a closed connection on the first request (if desired) // TODO: Move out into a separate, dedicated handler. if (closeFirstRequest && !ALREADY_CLOSED_FIRST_REQUEST) { LOG.info("messageReceived: Simulating closing channel on first " + "request " + writableRequest.getRequestId() + " from " + writableRequest.getClientId()); setAlreadyClosedFirstRequest(); ctx.close(); return; } if (writableRequest.getType() == RequestType.SASL_TOKEN_MESSAGE_REQUEST) { // initialize server-side SASL functionality, if we haven't yet // (in which case we are looking at the first SASL message from the // client). SaslNettyServer saslNettyServer = ctx.attr(NettyServer.CHANNEL_SASL_NETTY_SERVERS).get(); if (saslNettyServer == null) { if (LOG.isDebugEnabled()) { LOG.debug("No saslNettyServer for " + ctx.channel() + " yet; creating now, with secret manager: " + secretManager); } try { saslNettyServer = new SaslNettyServer(secretManager, AuthMethod.SIMPLE); } catch (IOException ioe) { //TODO: throw new RuntimeException(ioe); } ctx.attr(NettyServer.CHANNEL_SASL_NETTY_SERVERS).set(saslNettyServer); } else { if (LOG.isDebugEnabled()) { LOG.debug("Found existing saslNettyServer on server:" + ctx.channel().localAddress() + " for client " + ctx.channel().remoteAddress()); } } ((SaslTokenMessageRequest) writableRequest).processToken(saslNettyServer); // Send response to client. ctx.write(writableRequest); if (saslNettyServer.isComplete()) { // If authentication of client is complete, we will also send a // SASL-Complete message to the client. if (LOG.isDebugEnabled()) { LOG.debug("SASL authentication is complete for client with " + "username: " + saslNettyServer.getUserName()); } SaslCompleteRequest saslComplete = new SaslCompleteRequest(); ctx.write(saslComplete); if (LOG.isDebugEnabled()) { LOG.debug( "Removing SaslServerHandler from pipeline since SASL " + "authentication is complete."); } ctx.pipeline().remove(this); } ctx.flush(); // do not send upstream to other handlers: no further action needs to be // done for SASL_TOKEN_MESSAGE_REQUEST requests. return; } else { // Client should not be sending other-than-SASL messages before // SaslServerHandler has removed itself from the pipeline. Such non-SASL // requests will be denied by the Authorize channel handler (the next // handler upstream in the server pipeline) if SASL authentication has // not completed. LOG.warn("Sending upstream an unexpected non-SASL message : " + writableRequest); ctx.fireChannelRead(msg); } }
From source file:org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.java
License:Apache License
private static void processWriteBlockResponse(Channel channel, final DatanodeInfo dnInfo, final Promise<Channel> promise, final int timeoutMs) { channel.pipeline().addLast(new IdleStateHandler(timeoutMs, 0, 0, TimeUnit.MILLISECONDS), new ProtobufVarint32FrameDecoder(), new ProtobufDecoder(BlockOpResponseProto.getDefaultInstance()), new SimpleChannelInboundHandler<BlockOpResponseProto>() { @Override/*from w w w .j ava2 s .c o m*/ protected void channelRead0(ChannelHandlerContext ctx, BlockOpResponseProto resp) throws Exception { Status pipelineStatus = resp.getStatus(); if (PipelineAck.isRestartOOBStatus(pipelineStatus)) { throw new IOException("datanode " + dnInfo + " is restarting"); } String logInfo = "ack with firstBadLink as " + resp.getFirstBadLink(); if (resp.getStatus() != Status.SUCCESS) { if (resp.getStatus() == Status.ERROR_ACCESS_TOKEN) { throw new InvalidBlockTokenException("Got access token error" + ", status message " + resp.getMessage() + ", " + logInfo); } else { throw new IOException("Got error" + ", status=" + resp.getStatus().name() + ", status message " + resp.getMessage() + ", " + logInfo); } } // success ChannelPipeline p = ctx.pipeline(); for (ChannelHandler handler; (handler = p.removeLast()) != null;) { // do not remove all handlers because we may have wrap or unwrap handlers at the header // of pipeline. if (handler instanceof IdleStateHandler) { break; } } // Disable auto read here. Enable it after we setup the streaming pipeline in // FanOutOneBLockAsyncDFSOutput. ctx.channel().config().setAutoRead(false); promise.trySuccess(ctx.channel()); } @Override public void channelInactive(ChannelHandlerContext ctx) throws Exception { promise.tryFailure(new IOException("connection to " + dnInfo + " is closed")); } @Override public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception { if (evt instanceof IdleStateEvent && ((IdleStateEvent) evt).state() == READER_IDLE) { promise.tryFailure( new IOException("Timeout(" + timeoutMs + "ms) waiting for response")); } else { super.userEventTriggered(ctx, evt); } } @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { promise.tryFailure(cause); } }); }
From source file:org.apache.hadoop.hbase.ipc.BufferCallBeforeInitHandler.java
License:Apache License
@Override public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception { if (evt instanceof BufferCallEvent) { BufferCallEvent bcEvt = (BufferCallBeforeInitHandler.BufferCallEvent) evt; switch (bcEvt.action) { case FLUSH: for (Call call : id2Call.values()) { ctx.write(call);//from w w w . jav a2 s .c o m } break; case FAIL: for (Call call : id2Call.values()) { call.setException(bcEvt.error); } break; } ctx.flush(); ctx.pipeline().remove(this); } else if (evt instanceof CallEvent) { // just remove the call for now until we add other call event other than timeout and cancel. id2Call.remove(((CallEvent) evt).call.id); } else { ctx.fireUserEventTriggered(evt); } }
From source file:org.apache.hadoop.hbase.security.NettyHBaseRpcConnectionHeaderHandler.java
License:Apache License
@Override protected void channelRead0(ChannelHandlerContext ctx, ByteBuf msg) throws Exception { // read the ConnectionHeaderResponse from server int len = msg.readInt(); byte[] buff = new byte[len]; msg.readBytes(buff);// w w w .j a v a 2 s.co m RPCProtos.ConnectionHeaderResponse connectionHeaderResponse = RPCProtos.ConnectionHeaderResponse .parseFrom(buff); // Get the CryptoCipherMeta, update the HBaseSaslRpcClient for Crypto Cipher if (connectionHeaderResponse.hasCryptoCipherMeta()) { CryptoAES cryptoAES = EncryptionUtil.createCryptoAES(connectionHeaderResponse.getCryptoCipherMeta(), conf); // replace the Sasl handler with Crypto AES handler setupCryptoAESHandler(ctx.pipeline(), cryptoAES); } saslPromise.setSuccess(true); }
From source file:org.apache.hadoop.hbase.security.NettyHBaseSaslRpcClientHandler.java
License:Apache License
private void tryComplete(ChannelHandlerContext ctx) { if (!saslRpcClient.isComplete()) { return;//from w w w . j a va 2 s . com } saslRpcClient.setupSaslHandler(ctx.pipeline()); setCryptoAESOption(); saslPromise.setSuccess(true); }
From source file:org.apache.hadoop.hbase.security.SaslClientHandler.java
License:Apache License
@Override public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { ByteBuf in = (ByteBuf) msg;/*from w w w .j a v a2 s . co m*/ // If not complete, try to negotiate if (!saslClient.isComplete()) { while (!saslClient.isComplete() && in.isReadable()) { readStatus(in); int len = in.readInt(); if (firstRead) { firstRead = false; if (len == SaslUtil.SWITCH_TO_SIMPLE_AUTH) { if (!fallbackAllowed) { throw new IOException("Server asks us to fall back to SIMPLE auth, " + "but this " + "client is configured to only allow secure connections."); } if (LOG.isDebugEnabled()) { LOG.debug("Server asks us to fall back to simple auth."); } saslClient.dispose(); ctx.pipeline().remove(this); successfulConnectHandler.onSuccess(ctx.channel()); return; } } saslToken = new byte[len]; if (LOG.isDebugEnabled()) { LOG.debug("Will read input token of size " + saslToken.length + " for processing by initSASLContext"); } in.readBytes(saslToken); saslToken = evaluateChallenge(saslToken); if (saslToken != null) { if (LOG.isDebugEnabled()) { LOG.debug("Will send token of size " + saslToken.length + " from initSASLContext."); } writeSaslToken(ctx, saslToken); } } if (saslClient.isComplete()) { String qop = (String) saslClient.getNegotiatedProperty(Sasl.QOP); if (LOG.isDebugEnabled()) { LOG.debug("SASL client context established. Negotiated QoP: " + qop); } boolean useWrap = qop != null && !"auth".equalsIgnoreCase(qop); if (!useWrap) { ctx.pipeline().remove(this); } successfulConnectHandler.onSuccess(ctx.channel()); } } // Normal wrapped reading else { try { int length = in.readInt(); if (LOG.isDebugEnabled()) { LOG.debug("Actual length is " + length); } saslToken = new byte[length]; in.readBytes(saslToken); } catch (IndexOutOfBoundsException e) { return; } try { ByteBuf b = ctx.channel().alloc().buffer(saslToken.length); b.writeBytes(saslClient.unwrap(saslToken, 0, saslToken.length)); ctx.fireChannelRead(b); } catch (SaslException se) { try { saslClient.dispose(); } catch (SaslException ignored) { LOG.debug("Ignoring SASL exception", ignored); } throw se; } } }
From source file:org.apache.hadoop.hbase.util.FanOutOneBlockAsyncDFSOutputHelper.java
License:Apache License
private static void processWriteBlockResponse(Channel channel, final DatanodeInfo dnInfo, final Promise<Channel> promise, final int timeoutMs) { channel.pipeline().addLast(new IdleStateHandler(timeoutMs, 0, 0, TimeUnit.MILLISECONDS), new ProtobufVarint32FrameDecoder(), new ProtobufDecoder(BlockOpResponseProto.getDefaultInstance()), new SimpleChannelInboundHandler<BlockOpResponseProto>() { @Override/*from w ww . ja va 2 s. c om*/ protected void channelRead0(ChannelHandlerContext ctx, BlockOpResponseProto resp) throws Exception { Status pipelineStatus = resp.getStatus(); if (PipelineAck.isRestartOOBStatus(pipelineStatus)) { throw new IOException("datanode " + dnInfo + " is restarting"); } String logInfo = "ack with firstBadLink as " + resp.getFirstBadLink(); if (resp.getStatus() != Status.SUCCESS) { if (resp.getStatus() == Status.ERROR_ACCESS_TOKEN) { throw new InvalidBlockTokenException("Got access token error" + ", status message " + resp.getMessage() + ", " + logInfo); } else { throw new IOException("Got error" + ", status=" + resp.getStatus().name() + ", status message " + resp.getMessage() + ", " + logInfo); } } // success ChannelPipeline p = ctx.pipeline(); while (p.first() != null) { p.removeFirst(); } // Disable auto read here. Enable it after we setup the streaming pipeline in // FanOutOneBLockAsyncDFSOutput. ctx.channel().config().setAutoRead(false); promise.trySuccess(ctx.channel()); } @Override public void channelInactive(ChannelHandlerContext ctx) throws Exception { promise.tryFailure(new IOException("connection to " + dnInfo + " is closed")); } @Override public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception { if (evt instanceof IdleStateEvent && ((IdleStateEvent) evt).state() == IdleState.READER_IDLE) { promise.tryFailure( new IOException("Timeout(" + timeoutMs + "ms) waiting for response")); } else { super.userEventTriggered(ctx, evt); } } @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { promise.tryFailure(cause); } }); }
From source file:org.apache.hadoop.hdfs.server.datanode.web.PortUnificationServerHandler.java
License:Apache License
private void configureHttp1(ChannelHandlerContext ctx) { ctx.pipeline().addLast(new HttpServerCodec(), new ChunkedWriteHandler(), new URLDispatcher(proxyHost, conf, confForCreate)); }
From source file:org.apache.hadoop.hdfs.server.datanode.web.PortUnificationServerHandler.java
License:Apache License
private void configureHttp2(ChannelHandlerContext ctx) { ctx.pipeline().addLast(new DtpHttp2Handler()); }
From source file:org.apache.hadoop.hdfs.server.datanode.web.PortUnificationServerHandler.java
License:Apache License
@Override protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) throws Exception { if (in.readableBytes() < MAGIC_HEADER_LENGTH) { return;//from w w w. ja va2s . c om } if (ByteBufUtil.equals(in, 0, HTTP2_CLIENT_CONNECTION_PREFACE, 0, MAGIC_HEADER_LENGTH)) { configureHttp2(ctx); } else { configureHttp1(ctx); } ctx.pipeline().remove(this); }