Example usage for io.netty.channel ChannelFuture cause

List of usage examples for io.netty.channel ChannelFuture cause

Introduction

In this page you can find the example usage for io.netty.channel ChannelFuture cause.

Prototype

Throwable cause();

Source Link

Document

Returns the cause of the failed I/O operation if the I/O operation has failed.

Usage

From source file:io.codis.nedis.NedisClientBuilder.java

License:Apache License

public Future<NedisClientImpl> connect(SocketAddress remoteAddress) {
    validateGroupConfig();// w  w w  .  j  a va 2  s .c o  m
    Bootstrap b = new Bootstrap().group(group).channel(channelClass).handler(new ChannelInitializer<Channel>() {

        @Override
        protected void initChannel(Channel ch) throws Exception {
            ch.pipeline().addLast(new RedisResponseDecoder(),
                    new RedisDuplexHandler(TimeUnit.MILLISECONDS.toNanos(timeoutMs)));
        }

    });
    if (timeoutMs > 0) {
        b.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, (int) Math.min(Integer.MAX_VALUE, timeoutMs));
    }
    ChannelFuture f = b.connect(remoteAddress);
    final Promise<NedisClientImpl> promise = f.channel().eventLoop().newPromise();
    f.addListener(new ChannelFutureListener() {

        @Override
        public void operationComplete(ChannelFuture future) throws Exception {
            if (future.isSuccess()) {
                promise.trySuccess(new NedisClientImpl(future.channel(), pool));
            } else {
                promise.tryFailure(future.cause());
            }
        }
    });
    return promise;
}

From source file:io.flood.rpc.network.AcceptorImpl.java

License:Apache License

private void bind(final SocketAddress address) {
    id = SocketIdGenerator.nextId();/*w  w w  .  jav  a  2 s. co  m*/
    boot = new ServerBootstrap();
    boot.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
            .childHandler(new ChannelInitializer<SocketChannel>() {

                protected void initChannel(SocketChannel ch) throws Exception {
                    LinkedHashMap<String, ChannelHandler> handlers = getHandlers();
                    for (Map.Entry<String, ChannelHandler> entry : handlers.entrySet()) {
                        ch.pipeline().addLast(entry.getKey(), entry.getValue());
                    }
                    ch.pipeline().addLast("messageDecoder", new MessageDecoder());
                    ch.pipeline().addLast("messageEncoder", new MessageEncoder());
                    ch.pipeline().addLast("messageHandler", new MessageHandler(AcceptorImpl.this));
                }
            });

    try {
        final ChannelFuture future = boot.bind(address).sync();

        if (future.isSuccess()) {
            id = SocketIdGenerator.nextId();
            LOG.info(format(id, "socket bind success({})"), address);
            ConnectionManager.put(id, Connection.Type.Server, future.channel());
            onBindCompleted(future);
            future.channel().closeFuture()
                    .addListener(new GenericFutureListener<io.netty.util.concurrent.Future<? super Void>>() {

                        public void operationComplete(Future<? super Void> completefuture) throws Exception {
                            LOG.info(format(id, "socket closed()"));
                            serverChannel.remove(future.channel());
                        }

                    });
        }

        else {
            LOG.error(format(id, "socket bind failed({})"), address, future.cause());
        }

    } catch (InterruptedException e) {
        e.printStackTrace();
    }

}

From source file:io.gomint.proxprox.network.DownstreamConnection.java

License:BSD License

/**
 * Create a new AbstractConnection to a server.
 *
 * @param proxProx           The proxy instance
 * @param upstreamConnection The upstream connection which requested to connect to this downstream
 * @param ip                 The ip of the server we want to connect to
 * @param port               The port of the server we want to connect to
 *///from   ww  w.java2 s . c om
DownstreamConnection(ProxProxProxy proxProx, UpstreamConnection upstreamConnection, String ip, int port) {
    this.upstreamConnection = upstreamConnection;
    this.proxProx = proxProx;

    this.ip = ip;
    this.port = port;

    // Check if we use UDP or TCP for downstream connections
    if (proxProx.getConfig().isUseTCP()) {
        io.netty.bootstrap.Bootstrap bootstrap = Initializer.buildBootstrap(this.upstreamConnection, ip, port,
                new Consumer<ConnectionHandler>() {
                    @Override
                    public void accept(ConnectionHandler connectionHandler) {
                        DownstreamConnection.this.tcpConnection = connectionHandler;

                        // There are no batches in TCP
                        connectionHandler.onData(DownstreamConnection.this::handlePacket);

                        connectionHandler.whenDisconnected(new Consumer<Void>() {
                            @Override
                            public void accept(Void aVoid) {
                                if (upstreamConnection.isConnected()) {
                                    LOGGER.info("Disconnected downstream...");
                                    if (!DownstreamConnection.this.manualClose) {
                                        DownstreamConnection.this.close(true, "Server disconnected");

                                        // Check if we need to disconnect upstream
                                        if (DownstreamConnection.this
                                                .equals(upstreamConnection.getDownStream())) {
                                            if (upstreamConnection.getPendingDownStream() != null
                                                    || upstreamConnection.connectToLastKnown()) {
                                                return;
                                            } else {
                                                upstreamConnection.disconnect("The Server has gone down");
                                            }
                                        } else {
                                            upstreamConnection.resetPendingDownStream();
                                        }
                                    }
                                }
                            }
                        });

                        DownstreamConnection.this.upstreamConnection
                                .onDownStreamConnected(DownstreamConnection.this);
                    }
                });
        bootstrap.connect(this.ip, this.port).addListener(new ChannelFutureListener() {
            @Override
            public void operationComplete(ChannelFuture channelFuture) throws Exception {
                if (!channelFuture.isSuccess()) {
                    LOGGER.warn("Could not connect to {}:{}", DownstreamConnection.this.ip,
                            DownstreamConnection.this.port, channelFuture.cause());
                    DownstreamConnection.this.upstreamConnection.resetPendingDownStream();
                }
            }
        });
    } else {
        this.initDecompressor();
        this.connection = new ClientSocket();
        this.connection.setMojangModificationEnabled(true);
        this.connection.setEventHandler((socket, socketEvent) -> {
            LOGGER.debug("Got socketEvent: " + socketEvent.getType().name());
            switch (socketEvent.getType()) {
            case CONNECTION_ATTEMPT_SUCCEEDED:
                // We got accepted *yay*
                DownstreamConnection.this.setup();
                DownstreamConnection.this.upstreamConnection.onDownStreamConnected(DownstreamConnection.this);
                break;

            case CONNECTION_CLOSED:
            case CONNECTION_DISCONNECTED:
                LOGGER.info("Disconnected downstream...");
                if (!DownstreamConnection.this.manualClose) {
                    DownstreamConnection.this.updateIncoming(socketEvent.getConnection());
                    DownstreamConnection.this.close(true, "Raknet disconnected");

                    // Check if we need to disconnect upstream
                    if (DownstreamConnection.this.equals(upstreamConnection.getDownStream())) {
                        if (upstreamConnection.getPendingDownStream() != null
                                || upstreamConnection.connectToLastKnown()) {
                            return;
                        } else {
                            upstreamConnection.disconnect("The Server has gone down");
                        }
                    } else {
                        upstreamConnection.resetPendingDownStream();
                    }
                }

                break;

            default:
                break;
            }
        });

        try {
            this.connection.initialize();
        } catch (SocketException e) {
            LOGGER.warn("Could not connect to {}:{}", this.ip, this.port, e);
        }

        this.connection.connect(ip, port);
    }
}

From source file:io.grpc.netty.NettyClientHandler.java

License:Apache License

private void createStreamTraced(final int streamId, final NettyClientStream.TransportState stream,
        final Http2Headers headers, boolean isGet, final boolean shouldBeCountedForInUse,
        final ChannelPromise promise) {
    // Create an intermediate promise so that we can intercept the failure reported back to the
    // application.
    ChannelPromise tempPromise = ctx().newPromise();
    encoder().writeHeaders(ctx(), streamId, headers, 0, isGet, tempPromise)
            .addListener(new ChannelFutureListener() {
                @Override//from   ww  w .j  a v  a  2  s . c o m
                public void operationComplete(ChannelFuture future) throws Exception {
                    if (future.isSuccess()) {
                        // The http2Stream will be null in case a stream buffered in the encoder
                        // was canceled via RST_STREAM.
                        Http2Stream http2Stream = connection().stream(streamId);
                        if (http2Stream != null) {
                            stream.getStatsTraceContext().clientOutboundHeaders();
                            http2Stream.setProperty(streamKey, stream);

                            // This delays the in-use state until the I/O completes, which technically may
                            // be later than we would like.
                            if (shouldBeCountedForInUse) {
                                inUseState.updateObjectInUse(http2Stream, true);
                            }

                            // Attach the client stream to the HTTP/2 stream object as user data.
                            stream.setHttp2Stream(http2Stream);
                        }
                        // Otherwise, the stream has been cancelled and Netty is sending a
                        // RST_STREAM frame which causes it to purge pending writes from the
                        // flow-controller and delete the http2Stream. The stream listener has already
                        // been notified of cancellation so there is nothing to do.

                        // Just forward on the success status to the original promise.
                        promise.setSuccess();
                    } else {
                        final Throwable cause = future.cause();
                        if (cause instanceof StreamBufferingEncoder.Http2GoAwayException) {
                            StreamBufferingEncoder.Http2GoAwayException e = (StreamBufferingEncoder.Http2GoAwayException) cause;
                            lifecycleManager.notifyShutdown(statusFromGoAway(e.errorCode(), e.debugData()));
                            promise.setFailure(lifecycleManager.getShutdownThrowable());
                        } else {
                            promise.setFailure(cause);
                        }
                    }
                }
            });
}

From source file:io.grpc.netty.NettyClientHandler.java

License:Apache License

/**
 * Sends a PING frame. If a ping operation is already outstanding, the callback in the message is
 * registered to be called when the existing operation completes, and no new frame is sent.
 *//*w ww  .j  a v  a  2  s  .  c o m*/
private void sendPingFrameTraced(ChannelHandlerContext ctx, SendPingCommand msg, ChannelPromise promise) {
    // Don't check lifecycleManager.getShutdownStatus() since we want to allow pings after shutdown
    // but before termination. After termination, messages will no longer arrive because the
    // pipeline clears all handlers on channel close.

    PingCallback callback = msg.callback();
    Executor executor = msg.executor();
    // we only allow one outstanding ping at a time, so just add the callback to
    // any outstanding operation
    if (ping != null) {
        promise.setSuccess();
        ping.addCallback(callback, executor);
        return;
    }

    // Use a new promise to prevent calling the callback twice on write failure: here and in
    // NettyClientTransport.ping(). It may appear strange, but it will behave the same as if
    // ping != null above.
    promise.setSuccess();
    promise = ctx().newPromise();
    // set outstanding operation
    long data = USER_PING_PAYLOAD;
    Stopwatch stopwatch = stopwatchFactory.get();
    stopwatch.start();
    ping = new Http2Ping(data, stopwatch);
    ping.addCallback(callback, executor);
    // and then write the ping
    encoder().writePing(ctx, false, USER_PING_PAYLOAD, promise);
    ctx.flush();
    final Http2Ping finalPing = ping;
    promise.addListener(new ChannelFutureListener() {
        @Override
        public void operationComplete(ChannelFuture future) throws Exception {
            if (future.isSuccess()) {
                transportTracer.reportKeepAliveSent();
            } else {
                Throwable cause = future.cause();
                if (cause instanceof ClosedChannelException) {
                    cause = lifecycleManager.getShutdownThrowable();
                    if (cause == null) {
                        cause = Status.UNKNOWN.withDescription("Ping failed but for unknown reason.")
                                .withCause(future.cause()).asException();
                    }
                }
                finalPing.failed(cause);
                if (ping == finalPing) {
                    ping = null;
                }
            }
        }
    });
}

From source file:io.grpc.netty.NettyClientHandlerTest.java

License:Apache License

@Test
public void receivedGoAwayShouldCancelBufferedStream() throws Exception {
    // Force the stream to be buffered.
    receiveMaxConcurrentStreams(0);/*ww  w. j  a  va  2 s.  c  o  m*/
    ChannelFuture future = enqueue(newCreateStreamCommand(grpcHeaders, streamTransportState));
    channelRead(goAwayFrame(0));
    assertTrue(future.isDone());
    assertFalse(future.isSuccess());
    Status status = Status.fromThrowable(future.cause());
    assertEquals(Status.Code.UNAVAILABLE, status.getCode());
    assertEquals("HTTP/2 error code: NO_ERROR\nReceived Goaway", status.getDescription());
}

From source file:io.grpc.netty.NettyClientHandlerTest.java

License:Apache License

@Test
public void receivedGoAwayShouldFailUnknownBufferedStreams() throws Exception {
    receiveMaxConcurrentStreams(0);/*from  w  ww .  j  av  a2  s .c  om*/

    ChannelFuture future = enqueue(newCreateStreamCommand(grpcHeaders, streamTransportState));

    // Read a GOAWAY that indicates our stream was never processed by the server.
    channelRead(goAwayFrame(0, 8 /* Cancel */, Unpooled.copiedBuffer("this is a test", UTF_8)));
    assertTrue(future.isDone());
    assertFalse(future.isSuccess());
    Status status = Status.fromThrowable(future.cause());
    assertEquals(Status.CANCELLED.getCode(), status.getCode());
    assertEquals("HTTP/2 error code: CANCEL\nReceived Goaway\nthis is a test", status.getDescription());
}

From source file:io.grpc.netty.NettyClientHandlerTest.java

License:Apache License

@Test
public void receivedGoAwayShouldFailNewStreams() throws Exception {
    // Read a GOAWAY that indicates our stream was never processed by the server.
    channelRead(goAwayFrame(0, 8 /* Cancel */, Unpooled.copiedBuffer("this is a test", UTF_8)));

    // Now try to create a stream.
    ChannelFuture future = enqueue(newCreateStreamCommand(grpcHeaders, streamTransportState));
    assertTrue(future.isDone());//from   w  w w .j a v a2s  . c  o m
    assertFalse(future.isSuccess());
    Status status = Status.fromThrowable(future.cause());
    assertEquals(Status.CANCELLED.getCode(), status.getCode());
    assertEquals("HTTP/2 error code: CANCEL\nReceived Goaway\nthis is a test", status.getDescription());
}

From source file:io.grpc.netty.NettyClientHandlerTest.java

License:Apache License

@Test
public void nonExistentStream() throws Exception {
    Status status = Status.INTERNAL.withDescription("zz");

    lifecycleManager.notifyShutdown(status);
    // Stream creation can race with the transport shutting down, with the create command already
    // enqueued./*  w  w  w .  j  a v a  2s. c o  m*/
    ChannelFuture future1 = createStream();
    future1.await();
    assertNotNull(future1.cause());
    assertThat(Status.fromThrowable(future1.cause()).getCode()).isEqualTo(status.getCode());

    ChannelFuture future2 = enqueue(new CancelClientStreamCommand(streamTransportState, status));
    future2.sync();
}

From source file:io.grpc.netty.NettyClientTransport.java

License:Apache License

@SuppressWarnings("unchecked")
@Override/*from ww  w. j  a v a 2 s . c  om*/
public Runnable start(Listener transportListener) {
    lifecycleManager = new ClientTransportLifecycleManager(
            Preconditions.checkNotNull(transportListener, "listener"));
    EventLoop eventLoop = group.next();
    if (keepAliveTimeNanos != KEEPALIVE_TIME_NANOS_DISABLED) {
        keepAliveManager = new KeepAliveManager(new ClientKeepAlivePinger(this), eventLoop, keepAliveTimeNanos,
                keepAliveTimeoutNanos, keepAliveWithoutCalls);
    }

    handler = NettyClientHandler.newHandler(lifecycleManager, keepAliveManager, flowControlWindow,
            maxHeaderListSize, GrpcUtil.STOPWATCH_SUPPLIER, tooManyPingsRunnable, transportTracer,
            eagAttributes, authorityString);
    NettyHandlerSettings.setAutoWindow(handler);

    ChannelHandler negotiationHandler = negotiator.newHandler(handler);

    Bootstrap b = new Bootstrap();
    b.attr(LOGGER_KEY, channelLogger);
    b.group(eventLoop);
    b.channelFactory(channelFactory);
    // For non-socket based channel, the option will be ignored.
    b.option(SO_KEEPALIVE, true);
    // For non-epoll based channel, the option will be ignored.
    if (keepAliveTimeNanos != KEEPALIVE_TIME_NANOS_DISABLED) {
        ChannelOption<Integer> tcpUserTimeout = Utils.maybeGetTcpUserTimeoutOption();
        if (tcpUserTimeout != null) {
            b.option(tcpUserTimeout, (int) TimeUnit.NANOSECONDS.toMillis(keepAliveTimeoutNanos));
        }
    }
    for (Map.Entry<ChannelOption<?>, ?> entry : channelOptions.entrySet()) {
        // Every entry in the map is obtained from
        // NettyChannelBuilder#withOption(ChannelOption<T> option, T value)
        // so it is safe to pass the key-value pair to b.option().
        b.option((ChannelOption<Object>) entry.getKey(), entry.getValue());
    }

    ChannelHandler bufferingHandler = new WriteBufferingAndExceptionHandler(negotiationHandler);

    /**
     * We don't use a ChannelInitializer in the client bootstrap because its "initChannel" method
     * is executed in the event loop and we need this handler to be in the pipeline immediately so
     * that it may begin buffering writes.
     */
    b.handler(bufferingHandler);
    ChannelFuture regFuture = b.register();
    if (regFuture.isDone() && !regFuture.isSuccess()) {
        channel = null;
        // Initialization has failed badly. All new streams should be made to fail.
        Throwable t = regFuture.cause();
        if (t == null) {
            t = new IllegalStateException("Channel is null, but future doesn't have a cause");
        }
        statusExplainingWhyTheChannelIsNull = Utils.statusFromThrowable(t);
        // Use a Runnable since lifecycleManager calls transportListener
        return new Runnable() {
            @Override
            public void run() {
                // NOTICE: we not are calling lifecycleManager from the event loop. But there isn't really
                // an event loop in this case, so nothing should be accessing the lifecycleManager. We
                // could use GlobalEventExecutor (which is what regFuture would use for notifying
                // listeners in this case), but avoiding on-demand thread creation in an error case seems
                // a good idea and is probably clearer threading.
                lifecycleManager.notifyTerminated(statusExplainingWhyTheChannelIsNull);
            }
        };
    }
    channel = regFuture.channel();
    // Start the write queue as soon as the channel is constructed
    handler.startWriteQueue(channel);
    // This write will have no effect, yet it will only complete once the negotiationHandler
    // flushes any pending writes. We need it to be staged *before* the `connect` so that
    // the channel can't have been closed yet, removing all handlers. This write will sit in the
    // AbstractBufferingHandler's buffer, and will either be flushed on a successful connection,
    // or failed if the connection fails.
    channel.writeAndFlush(NettyClientHandler.NOOP_MESSAGE).addListener(new ChannelFutureListener() {
        @Override
        public void operationComplete(ChannelFuture future) throws Exception {
            if (!future.isSuccess()) {
                // Need to notify of this failure, because NettyClientHandler may not have been added to
                // the pipeline before the error occurred.
                lifecycleManager.notifyTerminated(Utils.statusFromThrowable(future.cause()));
            }
        }
    });
    // Start the connection operation to the server.
    SocketAddress localAddress = localSocketPicker.createSocketAddress(remoteAddress, eagAttributes);
    if (localAddress != null) {
        channel.connect(remoteAddress, localAddress);
    } else {
        channel.connect(remoteAddress);
    }

    if (keepAliveManager != null) {
        keepAliveManager.onTransportStarted();
    }

    return null;
}