List of usage examples for io.netty.channel ChannelOutboundHandlerAdapter ChannelOutboundHandlerAdapter
ChannelOutboundHandlerAdapter
From source file:com.zextras.modules.chat.server.EventSenderImpl.java
License:Open Source License
private Channel getChannel() throws IOException { if (mChannel == null || !mChannel.isOpen()) { mChannel = mLocalXmppConnectionProvider.openConnection(mHost, LocalServerDestination.DEFAULT_LOCAL_XMPP_PORT, new ChannelOutboundHandlerAdapter()); }/*from w w w . j a va 2 s . c o m*/ return mChannel; }
From source file:io.grpc.alts.internal.TsiFrameHandlerTest.java
License:Apache License
@Test public void flushAfterCloseShouldWork() throws InterruptedException { ByteBuf msg = Unpooled.copiedBuffer("message after handshake failed", CharsetUtil.UTF_8); channel.write(msg);/*from w w w. j a v a2s . c o m*/ channel.pipeline().addFirst(new ChannelOutboundHandlerAdapter() { @Override public void close(ChannelHandlerContext ctx, ChannelPromise promise) throws Exception { // We have to call flush while doing a close, since close() tears down the pipeline // immediately after. channel.flush(); super.close(ctx, promise); } }); assertThat(channel.outboundMessages()).isEmpty(); channel.close().sync(); Object actual = channel.readOutbound(); assertWithMessage("pending write should be flushed on close").that(actual).isEqualTo(msg); channel.checkException(); }
From source file:io.grpc.netty.WriteBufferingAndExceptionHandlerTest.java
License:Apache License
@Test public void writesBuffered() throws Exception { final AtomicBoolean handlerAdded = new AtomicBoolean(); final AtomicBoolean flush = new AtomicBoolean(); final AtomicReference<Object> write = new AtomicReference<>(); final WriteBufferingAndExceptionHandler handler = new WriteBufferingAndExceptionHandler( new ChannelOutboundHandlerAdapter() { @Override/* w w w. j a v a 2 s. co m*/ public void handlerAdded(ChannelHandlerContext ctx) throws Exception { assertFalse(handlerAdded.getAndSet(true)); super.handlerAdded(ctx); } @Override public void flush(ChannelHandlerContext ctx) throws Exception { assertFalse(flush.getAndSet(true)); super.flush(ctx); } @Override public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) { assertNull(write.getAndSet(msg)); promise.setSuccess(); } }); LocalAddress addr = new LocalAddress("local"); ChannelFuture cf = new Bootstrap().channel(LocalChannel.class).handler(handler).group(group).register(); chan = cf.channel(); cf.sync(); ChannelFuture sf = new ServerBootstrap().channel(LocalServerChannel.class) .childHandler(new ChannelHandlerAdapter() { }).group(group).bind(addr); server = sf.channel(); sf.sync(); assertTrue(handlerAdded.get()); chan.write(new Object()); chan.connect(addr).sync(); assertNull(write.get()); chan.flush(); assertNull(write.get()); assertFalse(flush.get()); assertThat(chan.pipeline().context(handler)).isNotNull(); chan.eventLoop().submit(new Runnable() { @Override public void run() { handler.writeBufferedAndRemove(chan.pipeline().context(handler)); } }).sync(); assertThat(chan.pipeline().context(handler)).isNull(); assertThat(write.get().getClass()).isSameInstanceAs(Object.class); assertTrue(flush.get()); assertThat(chan.pipeline()).doesNotContain(handler); }
From source file:io.reactivex.netty.protocol.http.sse.SseOverHttpServerPipelineConfigurator.java
License:Apache License
@Override public void configureNewPipeline(ChannelPipeline pipeline) { serverPipelineConfigurator.configureNewPipeline(pipeline); pipeline.addLast(SSE_ENCODER_HANDLER_NAME, SERVER_SENT_EVENT_ENCODER); pipeline.addLast(SSE_RESPONSE_HEADERS_COMPLETER, new ChannelOutboundHandlerAdapter() { @Override//from w w w.ja va 2 s. c om public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception { if (HttpServerResponse.class.isAssignableFrom(msg.getClass())) { @SuppressWarnings("rawtypes") HttpServerResponse rxResponse = (HttpServerResponse) msg; String contentTypeHeader = rxResponse.getHeaders().get(CONTENT_TYPE); if (null == contentTypeHeader) { rxResponse.getHeaders().set(CONTENT_TYPE, "text/event-stream"); } } super.write(ctx, msg, promise); } }); }
From source file:io.viewserver.network.netty.websocket.NettyWebSocketEndpoint.java
License:Apache License
@Override public ServerBootstrap getServerBootstrap(EventLoopGroup parentGroup, EventLoopGroup childGroup, ChannelHandler handler) {//from ww w . j a va 2 s . co m if (this.uri.getScheme().equals("wss")) { if (keyCertChainFile == null) { log.warn("No certificate provided for WSS endpoint - will use self-signed"); try { SelfSignedCertificate certificate = new SelfSignedCertificate(); keyCertChainFile = certificate.certificate(); keyFile = certificate.privateKey(); usingSelfSignedCertificate = true; } catch (CertificateException e) { throw new RuntimeException(e); } } try { serverSslContext = SslContextBuilder.forServer(keyCertChainFile, keyFile, keyPassword).build(); } catch (SSLException e) { throw new RuntimeException(e); } } else if (!this.uri.getScheme().equals("ws")) { throw new IllegalArgumentException("Invalid scheme '" + uri.getScheme() + "' for web socket endpoint"); } ServerBootstrap server = new ServerBootstrap(); server.group(parentGroup, childGroup).channel(NioServerSocketChannel.class) .childHandler(new ChannelInitializer<Channel>() { @Override protected void initChannel(Channel ch) throws Exception { ChannelPipeline pipeline = ch.pipeline(); if (serverSslContext != null) { pipeline.addLast(serverSslContext.newHandler(ch.alloc())); } pipeline.addLast(new HttpServerCodec()); pipeline.addLast(new HttpObjectAggregator(65536)); // pipeline.addLast(new WebSocketServerCompressionHandler()); pipeline.addLast("websocket", new WebSocketServerProtocolHandler("/")); pipeline.addLast(new ChannelInboundHandlerAdapter() { @Override public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception { if (evt == WebSocketServerProtocolHandler.ServerHandshakeStateEvent.HANDSHAKE_COMPLETE) { ChannelPipeline pipeline = ctx.channel().pipeline(); pipeline.addAfter("websocket", "ws-decoder-xx", new MessageToMessageDecoder<BinaryWebSocketFrame>() { @Override protected void decode(ChannelHandlerContext ctx, BinaryWebSocketFrame msg, List<Object> out) throws Exception { out.add(msg.content().retain()); } }); pipeline.addAfter("websocket", "ws-encoder-xx", new MessageToMessageEncoder<ByteBuf>() { @Override protected void encode(ChannelHandlerContext ctx, ByteBuf msg, List<Object> out) throws Exception { out.add(new BinaryWebSocketFrame(msg).retain()); } }); } super.userEventTriggered(ctx, evt); } }); pipeline.addLast("frameDecoder", new ChannelInboundHandlerAdapter()); pipeline.addLast("frameEncoder", new ChannelOutboundHandlerAdapter()); pipeline.addLast(handler); } }); server.bind(uri.getPort()); return server; }
From source file:io.viewserver.network.netty.websocket.NettyWebSocketEndpoint.java
License:Apache License
@Override public IClient getClient(EventLoopGroup eventLoopGroup, ChannelHandler handler) { SslContext sslContext;//from w w w. ja va 2 s. c o m if (this.uri.getScheme().equals("wss")) { try { SslContextBuilder builder = SslContextBuilder.forClient(); if (bypassCertificateChecks || usingSelfSignedCertificate) { builder.trustManager(InsecureTrustManagerFactory.INSTANCE); } sslContext = builder.build(); } catch (SSLException e) { throw new RuntimeException(e); } } else { sslContext = null; } Bootstrap bootstrap = new Bootstrap(); WebSocketClientHandshaker handshaker = WebSocketClientHandshakerFactory.newHandshaker(uri, WebSocketVersion.V13, null, false, new DefaultHttpHeaders()); bootstrap.group(eventLoopGroup).channel(NioSocketChannel.class).handler(new ChannelInitializer<Channel>() { @Override protected void initChannel(Channel ch) throws Exception { ChannelPipeline pipeline = ch.pipeline(); if (sslContext != null) { pipeline.addLast(sslContext.newHandler(ch.alloc(), uri.getHost(), uri.getPort())); } pipeline.addLast(new HttpClientCodec()); pipeline.addLast(new HttpObjectAggregator(1 << 30)); pipeline.addLast("websocket", new WebSocketClientProtocolHandler(handshaker)); pipeline.addLast(new ChannelInboundHandlerAdapter() { @Override public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception { if (evt == WebSocketClientProtocolHandler.ClientHandshakeStateEvent.HANDSHAKE_COMPLETE) { ChannelPipeline pipeline = ctx.channel().pipeline(); pipeline.addAfter("websocket", "ws-decoder-xx", new MessageToMessageDecoder<BinaryWebSocketFrame>() { @Override protected void decode(ChannelHandlerContext ctx, BinaryWebSocketFrame msg, List<Object> out) throws Exception { out.add(msg.content().retain()); } }); pipeline.addAfter("websocket", "ws-encoder-xx", new MessageToMessageEncoder<ByteBuf>() { @Override protected void encode(ChannelHandlerContext ctx, ByteBuf msg, List<Object> out) throws Exception { out.add(new BinaryWebSocketFrame(msg).retain()); } }); } super.userEventTriggered(ctx, evt); } }); pipeline.addLast("frameDecoder", new ChannelInboundHandlerAdapter()); pipeline.addLast("frameEncoder", new ChannelOutboundHandlerAdapter()); pipeline.addLast(handler); } }); return () -> bootstrap.connect(uri.getHost(), uri.getPort()); }
From source file:org.apache.flink.runtime.io.network.netty.ClientTransportErrorHandlingTest.java
License:Apache License
/** * Verifies that failed client requests via {@link PartitionRequestClient} are correctly * attributed to the respective {@link RemoteInputChannel}. *//*from w w w . j a va 2 s.c o m*/ @Test public void testExceptionOnWrite() throws Exception { NettyProtocol protocol = new NettyProtocol() { @Override public ChannelHandler[] getServerChannelHandlers() { return new ChannelHandler[0]; } @Override public ChannelHandler[] getClientChannelHandlers() { return new PartitionRequestProtocol(mock(ResultPartitionProvider.class), mock(TaskEventDispatcher.class), mock(NetworkBufferPool.class)).getClientChannelHandlers(); } }; // We need a real server and client in this test, because Netty's EmbeddedChannel is // not failing the ChannelPromise of failed writes. NettyServerAndClient serverAndClient = initServerAndClient(protocol, createConfig()); Channel ch = connect(serverAndClient); PartitionRequestClientHandler handler = getClientHandler(ch); // Last outbound handler throws Exception after 1st write ch.pipeline().addFirst(new ChannelOutboundHandlerAdapter() { int writeNum = 0; @Override public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception { if (writeNum >= 1) { throw new RuntimeException("Expected test exception."); } writeNum++; ctx.write(msg, promise); } }); PartitionRequestClient requestClient = new PartitionRequestClient(ch, handler, mock(ConnectionID.class), mock(PartitionRequestClientFactory.class)); // Create input channels RemoteInputChannel[] rich = new RemoteInputChannel[] { createRemoteInputChannel(), createRemoteInputChannel() }; final CountDownLatch sync = new CountDownLatch(1); // Do this with explicit synchronization. Otherwise this is not robust against slow timings // of the callback (e.g. we cannot just verify that it was called once, because there is // a chance that we do this too early). doAnswer(new Answer<Void>() { @Override public Void answer(InvocationOnMock invocation) throws Throwable { sync.countDown(); return null; } }).when(rich[1]).onError(isA(LocalTransportException.class)); // First request is successful ChannelFuture f = requestClient.requestSubpartition(new ResultPartitionID(), 0, rich[0], 0); assertTrue(f.await().isSuccess()); // Second request is *not* successful f = requestClient.requestSubpartition(new ResultPartitionID(), 0, rich[1], 0); assertFalse(f.await().isSuccess()); // Only the second channel should be notified about the error verify(rich[0], times(0)).onError(any(LocalTransportException.class)); // Wait for the notification if (!sync.await(TestingUtils.TESTING_DURATION().toMillis(), TimeUnit.MILLISECONDS)) { fail("Timed out after waiting for " + TestingUtils.TESTING_DURATION().toMillis() + " ms to be notified about the channel error."); } shutdown(serverAndClient); }
From source file:org.apache.flink.runtime.io.network.netty.OutboundConnectionQueueTest.java
License:Apache License
/** * Verifies that concurrent enqueue and close events are handled * correctly./*w w w . ja v a 2s . c o m*/ */ private void doTestConcurrentEnqueueAndClose(final int numProducers, final int numEnvelopesPerProducer, final int minSleepTimeMs, final int maxSleepTimeMs) throws Exception { final InetAddress bindHost = InetAddress.getLocalHost(); final int bindPort = 20000; // Testing concurrent enqueue and close requires real TCP channels, // because Netty's testing EmbeddedChannel does not implement the // same threading model as the NioEventLoopGroup (for example there // is no difference between being IN and OUTSIDE of the event loop // thread). final ServerBootstrap in = new ServerBootstrap(); in.group(new NioEventLoopGroup(1)).channel(NioServerSocketChannel.class).localAddress(bindHost, bindPort) .childHandler(new ChannelInitializer<SocketChannel>() { @Override public void initChannel(SocketChannel channel) throws Exception { channel.pipeline().addLast(new ChannelInboundHandlerAdapter()); } }); final Bootstrap out = new Bootstrap(); out.group(new NioEventLoopGroup(1)).channel(NioSocketChannel.class) .handler(new ChannelInitializer<SocketChannel>() { @Override public void initChannel(SocketChannel channel) throws Exception { channel.pipeline().addLast(new ChannelOutboundHandlerAdapter()); } }).option(ChannelOption.TCP_NODELAY, false).option(ChannelOption.SO_KEEPALIVE, true); in.bind().sync(); // -------------------------------------------------------------------- // The testing pipeline looks as follows: // - Test Verification Handler [OUT] // - Test Control Handler [IN] // - Idle State Handler [IN/OUT] [added by OutboundConnectionQueue] // - Outbound queue (SUT) [IN] [added by OutboundConnectionQueue] channel = out.connect(bindHost, bindPort).sync().channel(); queue = new OutboundConnectionQueue(channel, receiver, connectionManager, 0); controller = new TestControlHandler(true); verifier = new TestVerificationHandler(); channel.pipeline().addFirst("Test Control Handler", controller); channel.pipeline().addFirst("Test Verification Handler", verifier); // -------------------------------------------------------------------- final Random rand = new Random(RANDOM_SEED); // Every producer works on their local reference of the queue and only // updates it to the new channel when enqueue returns false, which // should only happen if the channel has been closed. final ConcurrentMap<ChannelID, OutboundConnectionQueue> producerQueues = new ConcurrentHashMap<ChannelID, OutboundConnectionQueue>(); final ChannelID[] ids = new ChannelID[numProducers]; for (int i = 0; i < numProducers; i++) { ids[i] = new ChannelID(); producerQueues.put(ids[i], queue); } final CountDownLatch receivedAllEnvelopesLatch = verifier.waitForEnvelopes(numEnvelopesPerProducer - 1, ids); final List<Channel> closedChannels = new ArrayList<Channel>(); // -------------------------------------------------------------------- final Runnable closer = new Runnable() { @Override public void run() { while (receivedAllEnvelopesLatch.getCount() != 0) { try { controller.fireIdle(); // Test two idle events arriving "closely" // after each other if (rand.nextBoolean()) { controller.fireIdle(); } Thread.sleep(minSleepTimeMs / 2); } catch (InterruptedException e) { e.printStackTrace(); } } } }; final Runnable[] producers = new Runnable[numProducers]; for (int i = 0; i < numProducers; i++) { final int index = i; producers[i] = new Runnable() { @Override public void run() { final JobID jid = new JobID(); final ChannelID cid = ids[index]; for (int j = 0; j < numEnvelopesPerProducer; j++) { OutboundConnectionQueue localQueue = producerQueues.get(cid); try { // This code path is handled by the NetworkConnectionManager // in production to enqueue the envelope either to the current // channel or a new one if it was closed. while (!localQueue.enqueue(new Envelope(j, jid, cid))) { synchronized (lock) { if (localQueue == queue) { closedChannels.add(channel); channel = out.connect(bindHost, bindPort).sync().channel(); queue = new OutboundConnectionQueue(channel, receiver, connectionManager, 0); channel.pipeline().addFirst("Test Control Handler", controller); channel.pipeline().addFirst("Test Verification Handler", verifier); } } producerQueues.put(cid, queue); localQueue = queue; } int sleepTime = rand.nextInt((maxSleepTimeMs - minSleepTimeMs) + 1) + minSleepTimeMs; Thread.sleep(sleepTime); } catch (InterruptedException e) { throw new RuntimeException(e); } } } }; } for (int i = 0; i < numProducers; i++) { new Thread(producers[i], "Producer " + i).start(); } new Thread(closer, "Closer").start(); // -------------------------------------------------------------------- while (receivedAllEnvelopesLatch.getCount() != 0) { receivedAllEnvelopesLatch.await(); } // Final close, if the last close didn't make it. synchronized (lock) { if (channel != null) { controller.fireIdle(); } } verifier.waitForClose(); // If the producers do not sleep after each envelope, the close // should not make it through and no channel should have been // added to the list of closed channels if (minSleepTimeMs == 0 && maxSleepTimeMs == 0) { Assert.assertEquals(0, closedChannels.size()); } for (Channel ch : closedChannels) { Assert.assertFalse(ch.isOpen()); } System.out.println(closedChannels.size() + " channels were closed during execution."); out.group().shutdownGracefully().sync(); in.group().shutdownGracefully().sync(); }
From source file:org.apache.hadoop.hbase.ipc.TestAsyncIPC.java
License:Apache License
@Override protected AsyncRpcClient createRpcClientRTEDuringConnectionSetup(Configuration conf) { setConf(conf);/*from www . j a v a 2 s.c o m*/ return new AsyncRpcClient(conf, HConstants.CLUSTER_ID_DEFAULT, null, new ChannelInitializer<SocketChannel>() { @Override protected void initChannel(SocketChannel ch) throws Exception { ch.pipeline().addFirst(new ChannelOutboundHandlerAdapter() { @Override public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception { promise.setFailure(new RuntimeException("Injected fault")); } }); } }); }
From source file:org.apache.hadoop.hbase.ipc.TestIPC.java
License:Apache License
@Test public void testRTEDuringAsyncBlockingConnectionSetup() throws Exception { Configuration conf = HBaseConfiguration.create(); TestRpcServer rpcServer = new TestRpcServer(); AsyncRpcClient client = new AsyncRpcClient(conf, HConstants.CLUSTER_ID_DEFAULT, null, new ChannelInitializer<SocketChannel>() { @Override/*from w w w .j a v a2s.c o m*/ protected void initChannel(SocketChannel ch) throws Exception { ch.pipeline().addFirst(new ChannelOutboundHandlerAdapter() { @Override public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception { promise.setFailure(new RuntimeException("Injected fault")); } }); } }); try { rpcServer.start(); InetSocketAddress address = rpcServer.getListenerAddress(); MethodDescriptor md = SERVICE.getDescriptorForType().findMethodByName("echo"); EchoRequestProto param = EchoRequestProto.newBuilder().setMessage("hello").build(); BlockingRpcChannel channel = client.createBlockingRpcChannel( ServerName.valueOf(address.getHostName(), address.getPort(), System.currentTimeMillis()), User.getCurrent(), 0); channel.callBlockingMethod(md, new PayloadCarryingRpcController(), param, md.getOutputType().toProto()); fail("Expected an exception to have been thrown!"); } catch (Exception e) { LOG.info("Caught expected exception: " + e.toString()); assertTrue(StringUtils.stringifyException(e).contains("Injected fault")); } finally { client.close(); rpcServer.stop(); } }