Example usage for io.netty.channel ChannelFuture isSuccess

List of usage examples for io.netty.channel ChannelFuture isSuccess

Introduction

In this page you can find the example usage for io.netty.channel ChannelFuture isSuccess.

Prototype

boolean isSuccess();

Source Link

Document

Returns true if and only if the I/O operation was completed successfully.

Usage

From source file:org.apache.hadoop.hdfs.server.datanode.web.SimpleHttpProxyHandler.java

License:Apache License

@Override
public void channelRead0(final ChannelHandlerContext ctx, final HttpRequest req) {
    uri = req.getUri();/*from  w ww. j av  a  2 s  .  co  m*/
    final Channel client = ctx.channel();
    Bootstrap proxiedServer = new Bootstrap().group(client.eventLoop()).channel(NioSocketChannel.class)
            .handler(new ChannelInitializer<SocketChannel>() {
                @Override
                protected void initChannel(SocketChannel ch) throws Exception {
                    ChannelPipeline p = ch.pipeline();
                    p.addLast(new HttpRequestEncoder(), new Forwarder(uri, client));
                }
            });
    ChannelFuture f = proxiedServer.connect(host);
    proxiedChannel = f.channel();
    f.addListener(new ChannelFutureListener() {
        @Override
        public void operationComplete(ChannelFuture future) throws Exception {
            if (future.isSuccess()) {
                ctx.channel().pipeline().remove(HttpResponseEncoder.class);
                HttpRequest newReq = new DefaultFullHttpRequest(HTTP_1_1, req.getMethod(), req.getUri());
                newReq.headers().add(req.headers());
                newReq.headers().set(CONNECTION, Values.CLOSE);
                future.channel().writeAndFlush(newReq);
            } else {
                DefaultHttpResponse resp = new DefaultHttpResponse(HTTP_1_1, INTERNAL_SERVER_ERROR);
                resp.headers().set(CONNECTION, Values.CLOSE);
                LOG.info("Proxy " + uri + " failed. Cause: ", future.cause());
                ctx.writeAndFlush(resp).addListener(ChannelFutureListener.CLOSE);
                client.close();
            }
        }
    });
}

From source file:org.apache.hive.spark.client.rpc.Rpc.java

License:Apache License

/**
 * Creates an RPC client for a server running on the given remote host and port.
 *
 * @param config RPC configuration data.
 * @param eloop Event loop for managing the connection.
 * @param host Host name or IP address to connect to.
 * @param port Port where server is listening.
 * @param clientId The client ID that identifies the connection.
 * @param secret Secret for authenticating the client with the server.
 * @param dispatcher Dispatcher used to handle RPC calls.
 * @return A future that can be used to monitor the creation of the RPC object.
 *//*w w  w .j a v  a2 s  .  c o  m*/
public static Promise<Rpc> createClient(Map<String, String> config, final NioEventLoopGroup eloop, String host,
        int port, final String clientId, final String secret, final RpcDispatcher dispatcher) throws Exception {
    final RpcConfiguration rpcConf = new RpcConfiguration(config);
    int connectTimeoutMs = (int) rpcConf.getConnectTimeoutMs();

    final ChannelFuture cf = new Bootstrap().group(eloop).handler(new ChannelInboundHandlerAdapter() {
    }).channel(NioSocketChannel.class).option(ChannelOption.SO_KEEPALIVE, true)
            .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, connectTimeoutMs).connect(host, port);

    final Promise<Rpc> promise = eloop.next().newPromise();
    final AtomicReference<Rpc> rpc = new AtomicReference<Rpc>();

    // Set up a timeout to undo everything.
    final Runnable timeoutTask = new Runnable() {
        @Override
        public void run() {
            promise.setFailure(new TimeoutException("Timed out waiting for RPC server connection."));
        }
    };
    final ScheduledFuture<?> timeoutFuture = eloop.schedule(timeoutTask, rpcConf.getServerConnectTimeoutMs(),
            TimeUnit.MILLISECONDS);

    // The channel listener instantiates the Rpc instance when the connection is established,
    // and initiates the SASL handshake.
    cf.addListener(new ChannelFutureListener() {
        @Override
        public void operationComplete(ChannelFuture cf) throws Exception {
            if (cf.isSuccess()) {
                SaslClientHandler saslHandler = new SaslClientHandler(rpcConf, clientId, promise, timeoutFuture,
                        secret, dispatcher);
                Rpc rpc = createRpc(rpcConf, saslHandler, (SocketChannel) cf.channel(), eloop);
                saslHandler.rpc = rpc;
                saslHandler.sendHello(cf.channel());
            } else {
                promise.setFailure(cf.cause());
            }
        }
    });

    // Handle cancellation of the promise.
    promise.addListener(new GenericFutureListener<Promise<Rpc>>() {
        @Override
        public void operationComplete(Promise<Rpc> p) {
            if (p.isCancelled()) {
                cf.cancel(true);
            }
        }
    });

    return promise;
}

From source file:org.apache.hive.spark.client.rpc.Rpc.java

License:Apache License

/**
 * Send an RPC call to the remote endpoint and returns a future that can be used to monitor the
 * operation./*from  w  w  w.  j a  v a 2 s.c  o  m*/
 *
 * @param msg RPC call to send.
 * @param retType Type of expected reply.
 * @return A future used to monitor the operation.
 */
public <T> Future<T> call(Object msg, Class<T> retType) {
    Preconditions.checkArgument(msg != null);
    Preconditions.checkState(channel.isActive(), "RPC channel is closed.");
    try {
        final long id = rpcId.getAndIncrement();
        final Promise<T> promise = createPromise();
        ChannelFutureListener listener = new ChannelFutureListener() {
            @Override
            public void operationComplete(ChannelFuture cf) {
                if (!cf.isSuccess() && !promise.isDone()) {
                    LOG.warn("Failed to send RPC, closing connection.", cf.cause());
                    promise.setFailure(cf.cause());
                    dispatcher.discardRpc(id);
                    close();
                }
            }
        };

        dispatcher.registerRpc(id, promise, msg.getClass().getName());
        synchronized (channelLock) {
            channel.write(new MessageHeader(id, Rpc.MessageType.CALL)).addListener(listener);
            channel.writeAndFlush(msg).addListener(listener);
        }
        return promise;
    } catch (Exception e) {
        throw Throwables.propagate(e);
    }
}

From source file:org.apache.jackrabbit.oak.plugins.segment.standby.client.FailedRequestListener.java

License:Apache License

@Override
public void operationComplete(ChannelFuture future) throws Exception {
    if (!future.isSuccess()) {
        promise.setFailure(future.cause());
        future.channel().close();/*w ww.j  ava  2 s .c  om*/
    } else {
        future.channel().read();
    }
}

From source file:org.apache.jackrabbit.oak.plugins.segment.standby.server.StandbyServer.java

License:Apache License

private void start(boolean wait) {
    if (running)/*from   ww w  .j  a  va 2s. c om*/
        return;

    this.handler.state = STATUS_STARTING;

    final Thread close = new Thread() {
        @Override
        public void run() {
            try {
                running = true;
                handler.state = STATUS_RUNNING;
                channelFuture.sync().channel().closeFuture().sync();
            } catch (InterruptedException e) {
                StandbyServer.this.stop();
            }
        }
    };
    final ChannelFutureListener bindListener = new ChannelFutureListener() {
        @Override
        public void operationComplete(ChannelFuture future) {
            if (future.isSuccess()) {
                close.start();
            } else {
                log.error("Server failed to start on port " + port + ", will be canceled", future.cause());
                future.channel().close();
                new Thread() {
                    @Override
                    public void run() {
                        close();
                    }
                }.start();
            }
        }
    };
    Future<?> startup = bossGroup.submit(new Runnable() {
        @Override
        public void run() {
            //netty 4.0.20 has a race condition issue with
            //asynchronous channel registration. As a workaround
            //we bind asynchronously from the boss event group to make
            //the channel registration synchronous.
            //Note that now this method will return immediately.
            channelFuture = b.bind(port);
            channelFuture.addListener(bindListener);
        }
    });
    if (!startup.awaitUninterruptibly(10000)) {
        log.error("Server failed to start within 10 seconds and will be canceled");
        startup.cancel(true);
    } else if (wait) {
        try {
            close.join();
        } catch (InterruptedException ignored) {
        }
    }
}

From source file:org.apache.pulsar.client.impl.ConnectionPool.java

License:Apache License

private CompletableFuture<ClientCnx> createConnection(InetSocketAddress address, int connectionKey) {
    if (log.isDebugEnabled()) {
        log.debug("Connection for {} not found in cache", address);
    }//w  w w .  j a  va 2 s  .  co m

    final CompletableFuture<ClientCnx> cnxFuture = new CompletableFuture<ClientCnx>();

    // Trigger async connect to broker
    bootstrap.connect(address).addListener((ChannelFuture future) -> {
        if (!future.isSuccess()) {
            log.warn("Failed to open connection to {} : {}", address,
                    future.cause().getClass().getSimpleName());
            cnxFuture.completeExceptionally(new PulsarClientException(future.cause()));
            cleanupConnection(address, connectionKey, cnxFuture);
            return;
        }

        log.info("[{}] Connected to server", future.channel());

        future.channel().closeFuture().addListener(v -> {
            // Remove connection from pool when it gets closed
            if (log.isDebugEnabled()) {
                log.debug("Removing closed connection from pool: {}", v);
            }
            cleanupConnection(address, connectionKey, cnxFuture);
        });

        // We are connected to broker, but need to wait until the connect/connected handshake is
        // complete
        final ClientCnx cnx = (ClientCnx) future.channel().pipeline().get("handler");
        if (!future.channel().isActive() || cnx == null) {
            if (log.isDebugEnabled()) {
                log.debug("[{}] Connection was already closed by the time we got notified", future.channel());
            }
            cnxFuture.completeExceptionally(new ChannelException("Connection already closed"));
            return;
        }

        cnx.connectionFuture().thenRun(() -> {
            if (log.isDebugEnabled()) {
                log.debug("[{}] Connection handshake completed", cnx.channel());
            }
            cnxFuture.complete(cnx);
        }).exceptionally(exception -> {
            log.warn("[{}] Connection handshake failed: {}", cnx.channel(), exception.getMessage());
            cnxFuture.completeExceptionally(exception);
            cleanupConnection(address, connectionKey, cnxFuture);
            cnx.ctx().close();
            return null;
        });
    });

    return cnxFuture;
}

From source file:org.apache.qpid.jms.transports.netty.NettyTcpTransport.java

License:Apache License

@Override
public void connect(SSLContext sslContextOverride) throws IOException {

    if (listener == null) {
        throw new IllegalStateException("A transport listener must be set before connection attempts.");
    }/*from   w ww. j  a va 2s .  c  o  m*/

    final SslHandler sslHandler;
    if (isSecure()) {
        try {
            TransportSslOptions sslOptions = getSslOptions();
            sslOptions.setSslContextOverride(sslContextOverride);

            sslHandler = TransportSupport.createSslHandler(getRemoteLocation(), sslOptions);
        } catch (Exception ex) {
            // TODO: can we stop it throwing Exception?
            throw IOExceptionSupport.create(ex);
        }
    } else {
        sslHandler = null;
    }

    group = new NioEventLoopGroup(1);

    bootstrap = new Bootstrap();
    bootstrap.group(group);
    bootstrap.channel(NioSocketChannel.class);
    bootstrap.handler(new ChannelInitializer<Channel>() {
        @Override
        public void initChannel(Channel connectedChannel) throws Exception {
            configureChannel(connectedChannel, sslHandler);
        }
    });

    configureNetty(bootstrap, getTransportOptions());

    ChannelFuture future = bootstrap.connect(getRemoteHost(), getRemotePort());
    future.addListener(new ChannelFutureListener() {

        @Override
        public void operationComplete(ChannelFuture future) throws Exception {
            if (!future.isSuccess()) {
                handleException(future.channel(), IOExceptionSupport.create(future.cause()));
            }
        }
    });

    try {
        connectLatch.await();
    } catch (InterruptedException ex) {
        LOG.debug("Transport connection was interrupted.");
        Thread.interrupted();
        failureCause = IOExceptionSupport.create(ex);
    }

    if (failureCause != null) {
        // Close out any Netty resources now as they are no longer needed.
        if (channel != null) {
            channel.close().syncUninterruptibly();
            channel = null;
        }
        if (group != null) {
            Future<?> fut = group.shutdownGracefully(0, SHUTDOWN_TIMEOUT, TimeUnit.MILLISECONDS);
            if (!fut.awaitUninterruptibly(2 * SHUTDOWN_TIMEOUT)) {
                LOG.trace("Channel group shutdown failed to complete in allotted time");
            }
            group = null;
        }

        throw failureCause;
    } else {
        // Connected, allow any held async error to fire now and close the transport.
        channel.eventLoop().execute(new Runnable() {

            @Override
            public void run() {
                if (failureCause != null) {
                    channel.pipeline().fireExceptionCaught(failureCause);
                }
            }
        });
    }
}

From source file:org.apache.qpid.proton.netty.ProtonNettyHandler.java

License:Apache License

private void write(final ChannelHandlerContext ctx) {
    synchronized (lock) {
        while (true) {
            int pending = transport.pending();
            if (pending > 0) {
                final int size = pending - offset;
                if (size > 0) {
                    ByteBuf buffer = Unpooled.buffer(size);
                    ByteBuffer head = transport.head();
                    head.position(offset);
                    buffer.writeBytes(head);
                    ChannelFuture chf = ctx.writeAndFlush(buffer);
                    offset += size;// w  w  w.  j  a v  a 2  s  .co m
                    chf.addListener(new ChannelFutureListener() {
                        @Override
                        public void operationComplete(ChannelFuture chf) {
                            if (chf.isSuccess()) {
                                synchronized (lock) {
                                    transport.pop(size);
                                    offset -= size;
                                }
                                write(ctx);
                                dispatch();
                            } else {
                                // ???
                            }
                        }
                    });
                } else {
                    return;
                }
            } else {
                if (pending < 0) {
                    closeOnFlush(ctx.channel());
                }
                return;
            }
        }
    }
}

From source file:org.apache.rocketmq.broker.client.net.Broker2Client.java

License:Apache License

public void checkProducerTransactionState(final Channel channel,
        final CheckTransactionStateRequestHeader requestHeader,
        final SelectMappedBufferResult selectMappedBufferResult) {
    RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.CHECK_TRANSACTION_STATE,
            requestHeader);/* w  w  w.  j  ava 2 s. c o m*/
    request.markOnewayRPC();

    try {
        FileRegion fileRegion = new OneMessageTransfer(request.encodeHeader(selectMappedBufferResult.getSize()),
                selectMappedBufferResult);
        channel.writeAndFlush(fileRegion).addListener(new ChannelFutureListener() {
            @Override
            public void operationComplete(ChannelFuture future) throws Exception {
                selectMappedBufferResult.release();
                if (!future.isSuccess()) {
                    log.error("invokeProducer failed,", future.cause());
                }
            }
        });
    } catch (Throwable e) {
        log.error("invokeProducer exception", e);
        selectMappedBufferResult.release();
    }
}