Example usage for io.netty.channel ChannelFuture cause

List of usage examples for io.netty.channel ChannelFuture cause

Introduction

In this page you can find the example usage for io.netty.channel ChannelFuture cause.

Prototype

Throwable cause();

Source Link

Document

Returns the cause of the failed I/O operation if the I/O operation has failed.

Usage

From source file:org.apache.hadoop.hbase.ipc.AsyncRpcChannel.java

License:Apache License

/**
 * Connect to channel//from  w w w  . j ava 2  s.  c  o m
 *
 * @param bootstrap to connect to
 * @return future of connection
 */
private ChannelFuture connect(final Bootstrap bootstrap) {
    return bootstrap.remoteAddress(address).connect().addListener(new GenericFutureListener<ChannelFuture>() {
        @Override
        public void operationComplete(final ChannelFuture f) throws Exception {
            if (!f.isSuccess()) {
                if (f.cause() instanceof SocketException) {
                    retryOrClose(bootstrap, connectFailureCounter++, f.cause());
                } else {
                    retryOrClose(bootstrap, ioFailureCounter++, f.cause());
                }
                return;
            }
            channel = f.channel();

            setupAuthorization();

            ByteBuf b = channel.alloc().directBuffer(6);
            createPreamble(b, authMethod);
            channel.writeAndFlush(b).addListener(ChannelFutureListener.CLOSE_ON_FAILURE);
            if (useSasl) {
                UserGroupInformation ticket = AsyncRpcChannel.this.ticket.getUGI();
                if (authMethod == AuthMethod.KERBEROS) {
                    if (ticket != null && ticket.getRealUser() != null) {
                        ticket = ticket.getRealUser();
                    }
                }
                SaslClientHandler saslHandler;
                if (ticket == null) {
                    throw new FatalConnectionException("ticket/user is null");
                }
                final UserGroupInformation realTicket = ticket;
                saslHandler = ticket.doAs(new PrivilegedExceptionAction<SaslClientHandler>() {
                    @Override
                    public SaslClientHandler run() throws IOException {
                        return getSaslHandler(realTicket, bootstrap);
                    }
                });
                if (saslHandler != null) {
                    // Sasl connect is successful. Let's set up Sasl channel handler
                    channel.pipeline().addFirst(saslHandler);
                } else {
                    // fall back to simple auth because server told us so.
                    authMethod = AuthMethod.SIMPLE;
                    useSasl = false;
                }
            } else {
                startHBaseConnection(f.channel());
            }
        }
    });
}

From source file:org.apache.hadoop.hbase.ipc.AsyncRpcChannel.java

License:Apache License

/**
 * Start HBase connection//from ww w  .java  2s.  c om
 *
 * @param ch channel to start connection on
 */
private void startHBaseConnection(Channel ch) {
    ch.pipeline().addLast("frameDecoder", new LengthFieldBasedFrameDecoder(Integer.MAX_VALUE, 0, 4, 0, 4));
    ch.pipeline().addLast(new AsyncServerResponseHandler(this));
    try {
        writeChannelHeader(ch).addListener(new GenericFutureListener<ChannelFuture>() {
            @Override
            public void operationComplete(ChannelFuture future) throws Exception {
                if (!future.isSuccess()) {
                    close(future.cause());
                    return;
                }
                List<AsyncCall> callsToWrite;
                synchronized (pendingCalls) {
                    connected = true;
                    callsToWrite = new ArrayList<AsyncCall>(pendingCalls.values());
                }
                for (AsyncCall call : callsToWrite) {
                    writeRequest(call);
                }
            }
        });
    } catch (IOException e) {
        close(e);
    }
}

From source file:org.apache.hadoop.hbase.ipc.AsyncRpcChannelImpl.java

License:Apache License

/**
 * Connect to channel/* ww  w .  j a v  a  2s.c  om*/
 * @param bootstrap to connect to
 * @return future of connection
 */
private ChannelFuture connect(final Bootstrap bootstrap) {
    return bootstrap.remoteAddress(address).connect().addListener(new GenericFutureListener<ChannelFuture>() {
        @Override
        public void operationComplete(final ChannelFuture f) throws Exception {
            if (!f.isSuccess()) {
                retryOrClose(bootstrap, failureCounter++, client.failureSleep, f.cause());
                return;
            }
            channel = f.channel();

            setupAuthorization();

            ByteBuf b = channel.alloc().directBuffer(6);
            createPreamble(b, authMethod);
            channel.writeAndFlush(b).addListener(ChannelFutureListener.CLOSE_ON_FAILURE);
            if (useSasl) {
                UserGroupInformation ticket = AsyncRpcChannelImpl.this.ticket.getUGI();
                if (authMethod == AuthMethod.KERBEROS) {
                    if (ticket != null && ticket.getRealUser() != null) {
                        ticket = ticket.getRealUser();
                    }
                }
                SaslClientHandler saslHandler;
                if (ticket == null) {
                    throw new FatalConnectionException("ticket/user is null");
                }
                final UserGroupInformation realTicket = ticket;
                saslHandler = ticket.doAs(new PrivilegedExceptionAction<SaslClientHandler>() {
                    @Override
                    public SaslClientHandler run() throws IOException {
                        return getSaslHandler(realTicket, bootstrap);
                    }
                });
                if (saslHandler != null) {
                    // Sasl connect is successful. Let's set up Sasl channel handler
                    channel.pipeline().addFirst(saslHandler);
                } else {
                    // fall back to simple auth because server told us so.
                    authMethod = AuthMethod.SIMPLE;
                    useSasl = false;
                }
            } else {
                startHBaseConnection(f.channel());
            }
        }
    });
}

From source file:org.apache.hadoop.hbase.ipc.NettyRpcConnection.java

License:Apache License

private void connect() {
    if (LOG.isDebugEnabled()) {
        LOG.debug("Connecting to " + remoteId.address);
    }/*from w  w  w .  j  a va  2  s  .c  o m*/

    this.channel = new Bootstrap().group(rpcClient.group).channel(rpcClient.channelClass)
            .option(ChannelOption.TCP_NODELAY, rpcClient.isTcpNoDelay())
            .option(ChannelOption.SO_KEEPALIVE, rpcClient.tcpKeepAlive)
            .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, rpcClient.connectTO)
            .handler(new BufferCallBeforeInitHandler()).localAddress(rpcClient.localAddr)
            .remoteAddress(remoteId.address).connect().addListener(new ChannelFutureListener() {

                @Override
                public void operationComplete(ChannelFuture future) throws Exception {
                    Channel ch = future.channel();
                    if (!future.isSuccess()) {
                        failInit(ch, toIOE(future.cause()));
                        rpcClient.failedServers.addToFailedServers(remoteId.address);
                        return;
                    }
                    ch.writeAndFlush(connectionHeaderPreamble.retainedDuplicate());
                    if (useSasl) {
                        saslNegotiate(ch);
                    } else {
                        // send the connection header to server
                        ch.write(connectionHeaderWithLength.retainedDuplicate());
                        established(ch);
                    }
                }
            }).channel();
}

From source file:org.apache.hadoop.hbase.ipc.NettyRpcConnection.java

License:Apache License

@Override
public synchronized void sendRequest(final Call call, HBaseRpcController hrc) throws IOException {
    if (reloginInProgress) {
        throw new IOException("Can not send request because relogin is in progress.");
    }/* ww w.j  a  v  a  2s . c o  m*/
    hrc.notifyOnCancel(new RpcCallback<Object>() {

        @Override
        public void run(Object parameter) {
            setCancelled(call);
            synchronized (this) {
                if (channel != null) {
                    channel.pipeline().fireUserEventTriggered(new CallEvent(CANCELLED, call));
                }
            }
        }
    }, new CancellationCallback() {

        @Override
        public void run(boolean cancelled) throws IOException {
            if (cancelled) {
                setCancelled(call);
            } else {
                if (channel == null) {
                    connect();
                }
                scheduleTimeoutTask(call);
                channel.writeAndFlush(call).addListener(new ChannelFutureListener() {

                    @Override
                    public void operationComplete(ChannelFuture future) throws Exception {
                        // Fail the call if we failed to write it out. This usually because the channel is
                        // closed. This is needed because we may shutdown the channel inside event loop and
                        // there may still be some pending calls in the event loop queue after us.
                        if (!future.isSuccess()) {
                            call.setException(toIOE(future.cause()));
                        }
                    }
                });
            }
        }
    });
}

From source file:org.apache.hadoop.hbase.security.SaslClientHandler.java

License:Apache License

/**
 * Write SASL token/*from ww w  .j a  va2s  .c om*/
 * @param ctx to write to
 * @param saslToken to write
 */
private void writeSaslToken(final ChannelHandlerContext ctx, byte[] saslToken) {
    ByteBuf b = ctx.alloc().buffer(4 + saslToken.length);
    b.writeInt(saslToken.length);
    b.writeBytes(saslToken, 0, saslToken.length);
    ctx.writeAndFlush(b).addListener(new ChannelFutureListener() {
        @Override
        public void operationComplete(ChannelFuture future) throws Exception {
            if (!future.isSuccess()) {
                exceptionCaught(ctx, future.cause());
            }
        }
    });
}

From source file:org.apache.hadoop.hbase.security.SaslClientHandler.java

License:Apache License

@Override
public void write(final ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception {
    // If not complete, try to negotiate
    if (!saslClient.isComplete()) {
        super.write(ctx, msg, promise);
    } else {//from w  w w  .  j  ava 2  s  .co  m
        ByteBuf in = (ByteBuf) msg;

        try {
            saslToken = saslClient.wrap(in.array(), in.readerIndex(), in.readableBytes());
        } catch (SaslException se) {
            try {
                saslClient.dispose();
            } catch (SaslException ignored) {
                LOG.debug("Ignoring SASL exception", ignored);
            }
            promise.setFailure(se);
        }
        if (saslToken != null) {
            ByteBuf out = ctx.channel().alloc().buffer(4 + saslToken.length);
            out.writeInt(saslToken.length);
            out.writeBytes(saslToken, 0, saslToken.length);

            ctx.write(out).addListener(new ChannelFutureListener() {
                @Override
                public void operationComplete(ChannelFuture future) throws Exception {
                    if (!future.isSuccess()) {
                        exceptionCaught(ctx, future.cause());
                    }
                }
            });

            saslToken = null;
        }
    }
}

From source file:org.apache.hadoop.hbase.util.FanOutOneBlockAsyncDFSOutputHelper.java

License:Apache License

private static List<Future<Channel>> connectToDataNodes(Configuration conf, String clientName,
        LocatedBlock locatedBlock, long maxBytesRcvd, long latestGS, BlockConstructionStage stage,
        DataChecksum summer, EventLoop eventLoop) {
    Enum<?>[] storageTypes = locatedBlock.getStorageTypes();
    DatanodeInfo[] datanodeInfos = locatedBlock.getLocations();
    boolean connectToDnViaHostname = conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME,
            DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT);
    final int timeoutMs = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, HdfsServerConstants.READ_TIMEOUT);
    ExtendedBlock blockCopy = new ExtendedBlock(locatedBlock.getBlock());
    blockCopy.setNumBytes(locatedBlock.getBlockSize());
    ClientOperationHeaderProto header = ClientOperationHeaderProto.newBuilder()
            .setBaseHeader(BaseHeaderProto.newBuilder().setBlock(PBHelper.convert(blockCopy))
                    .setToken(PBHelper.convert(locatedBlock.getBlockToken())))
            .setClientName(clientName).build();
    ChecksumProto checksumProto = DataTransferProtoUtil.toProto(summer);
    final OpWriteBlockProto.Builder writeBlockProtoBuilder = OpWriteBlockProto.newBuilder().setHeader(header)
            .setStage(OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name())).setPipelineSize(1)
            .setMinBytesRcvd(locatedBlock.getBlock().getNumBytes()).setMaxBytesRcvd(maxBytesRcvd)
            .setLatestGenerationStamp(latestGS).setRequestedChecksum(checksumProto)
            .setCachingStrategy(CachingStrategyProto.newBuilder().setDropBehind(true).build());
    List<Future<Channel>> futureList = new ArrayList<>(datanodeInfos.length);
    for (int i = 0; i < datanodeInfos.length; i++) {
        final DatanodeInfo dnInfo = datanodeInfos[i];
        // Use Enum here because StoregType is moved to another package in hadoop 2.6. Use StorageType
        // will cause compilation error for hadoop 2.5 or before.
        final Enum<?> storageType = storageTypes[i];
        final Promise<Channel> promise = eventLoop.newPromise();
        futureList.add(promise);/*from ww w  .j a v  a  2s.  co m*/
        String dnAddr = dnInfo.getXferAddr(connectToDnViaHostname);
        new Bootstrap().group(eventLoop).channel(NioSocketChannel.class)
                .option(CONNECT_TIMEOUT_MILLIS, timeoutMs).handler(new ChannelInitializer<Channel>() {

                    @Override
                    protected void initChannel(Channel ch) throws Exception {
                        processWriteBlockResponse(ch, dnInfo, promise, timeoutMs);
                    }
                }).connect(NetUtils.createSocketAddr(dnAddr)).addListener(new ChannelFutureListener() {

                    @Override
                    public void operationComplete(ChannelFuture future) throws Exception {
                        if (future.isSuccess()) {
                            requestWriteBlock(future.channel(), storageType, writeBlockProtoBuilder);
                        } else {
                            promise.tryFailure(future.cause());
                        }
                    }
                });
    }
    return futureList;
}

From source file:org.apache.hadoop.hdfs.server.datanode.web.SimpleHttpProxyHandler.java

License:Apache License

@Override
public void channelRead0(final ChannelHandlerContext ctx, final HttpRequest req) {
    uri = req.getUri();/*from  w w w. ja  va 2 s  .  c  om*/
    final Channel client = ctx.channel();
    Bootstrap proxiedServer = new Bootstrap().group(client.eventLoop()).channel(NioSocketChannel.class)
            .handler(new ChannelInitializer<SocketChannel>() {
                @Override
                protected void initChannel(SocketChannel ch) throws Exception {
                    ChannelPipeline p = ch.pipeline();
                    p.addLast(new HttpRequestEncoder(), new Forwarder(uri, client));
                }
            });
    ChannelFuture f = proxiedServer.connect(host);
    proxiedChannel = f.channel();
    f.addListener(new ChannelFutureListener() {
        @Override
        public void operationComplete(ChannelFuture future) throws Exception {
            if (future.isSuccess()) {
                ctx.channel().pipeline().remove(HttpResponseEncoder.class);
                HttpRequest newReq = new DefaultFullHttpRequest(HTTP_1_1, req.getMethod(), req.getUri());
                newReq.headers().add(req.headers());
                newReq.headers().set(CONNECTION, Values.CLOSE);
                future.channel().writeAndFlush(newReq);
            } else {
                DefaultHttpResponse resp = new DefaultHttpResponse(HTTP_1_1, INTERNAL_SERVER_ERROR);
                resp.headers().set(CONNECTION, Values.CLOSE);
                LOG.info("Proxy " + uri + " failed. Cause: ", future.cause());
                ctx.writeAndFlush(resp).addListener(ChannelFutureListener.CLOSE);
                client.close();
            }
        }
    });
}

From source file:org.apache.hive.spark.client.rpc.Rpc.java

License:Apache License

/**
 * Creates an RPC client for a server running on the given remote host and port.
 *
 * @param config RPC configuration data.
 * @param eloop Event loop for managing the connection.
 * @param host Host name or IP address to connect to.
 * @param port Port where server is listening.
 * @param clientId The client ID that identifies the connection.
 * @param secret Secret for authenticating the client with the server.
 * @param dispatcher Dispatcher used to handle RPC calls.
 * @return A future that can be used to monitor the creation of the RPC object.
 *//*from ww  w.  ja va2s  . c o m*/
public static Promise<Rpc> createClient(Map<String, String> config, final NioEventLoopGroup eloop, String host,
        int port, final String clientId, final String secret, final RpcDispatcher dispatcher) throws Exception {
    final RpcConfiguration rpcConf = new RpcConfiguration(config);
    int connectTimeoutMs = (int) rpcConf.getConnectTimeoutMs();

    final ChannelFuture cf = new Bootstrap().group(eloop).handler(new ChannelInboundHandlerAdapter() {
    }).channel(NioSocketChannel.class).option(ChannelOption.SO_KEEPALIVE, true)
            .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, connectTimeoutMs).connect(host, port);

    final Promise<Rpc> promise = eloop.next().newPromise();
    final AtomicReference<Rpc> rpc = new AtomicReference<Rpc>();

    // Set up a timeout to undo everything.
    final Runnable timeoutTask = new Runnable() {
        @Override
        public void run() {
            promise.setFailure(new TimeoutException("Timed out waiting for RPC server connection."));
        }
    };
    final ScheduledFuture<?> timeoutFuture = eloop.schedule(timeoutTask, rpcConf.getServerConnectTimeoutMs(),
            TimeUnit.MILLISECONDS);

    // The channel listener instantiates the Rpc instance when the connection is established,
    // and initiates the SASL handshake.
    cf.addListener(new ChannelFutureListener() {
        @Override
        public void operationComplete(ChannelFuture cf) throws Exception {
            if (cf.isSuccess()) {
                SaslClientHandler saslHandler = new SaslClientHandler(rpcConf, clientId, promise, timeoutFuture,
                        secret, dispatcher);
                Rpc rpc = createRpc(rpcConf, saslHandler, (SocketChannel) cf.channel(), eloop);
                saslHandler.rpc = rpc;
                saslHandler.sendHello(cf.channel());
            } else {
                promise.setFailure(cf.cause());
            }
        }
    });

    // Handle cancellation of the promise.
    promise.addListener(new GenericFutureListener<Promise<Rpc>>() {
        @Override
        public void operationComplete(Promise<Rpc> p) {
            if (p.isCancelled()) {
                cf.cancel(true);
            }
        }
    });

    return promise;
}