Example usage for io.netty.channel ChannelFuture isSuccess

List of usage examples for io.netty.channel ChannelFuture isSuccess

Introduction

In this page you can find the example usage for io.netty.channel ChannelFuture isSuccess.

Prototype

boolean isSuccess();

Source Link

Document

Returns true if and only if the I/O operation was completed successfully.

Usage

From source file:org.apache.giraph.comm.netty.NettyClient.java

License:Apache License

/**
 * Check if there are some open requests which have been sent a long time
 * ago, and if so resend them.//from w  ww .ja  v a2 s.com
 */
private void checkRequestsForProblems() {
    long lastTimeChecked = lastTimeCheckedRequestsForProblems.get();
    // If not enough time passed from the previous check, return
    if (System.currentTimeMillis() < lastTimeChecked + waitingRequestMsecs) {
        return;
    }
    // If another thread did the check already, return
    if (!lastTimeCheckedRequestsForProblems.compareAndSet(lastTimeChecked, System.currentTimeMillis())) {
        return;
    }
    List<ClientRequestId> addedRequestIds = Lists.newArrayList();
    List<RequestInfo> addedRequestInfos = Lists.newArrayList();
    // Check all the requests for problems
    for (Map.Entry<ClientRequestId, RequestInfo> entry : clientRequestIdRequestInfoMap.entrySet()) {
        RequestInfo requestInfo = entry.getValue();
        ChannelFuture writeFuture = requestInfo.getWriteFuture();
        // Request wasn't sent yet
        if (writeFuture == null) {
            continue;
        }
        // If not connected anymore, request failed, or the request is taking
        // too long, re-establish and resend
        if (!writeFuture.channel().isActive() || (writeFuture.isDone() && !writeFuture.isSuccess())
                || (requestInfo.getElapsedMsecs() > maxRequestMilliseconds)) {
            LOG.warn("checkRequestsForProblems: Problem with request id " + entry.getKey() + " connected = "
                    + writeFuture.channel().isActive() + ", future done = " + writeFuture.isDone() + ", "
                    + "success = " + writeFuture.isSuccess() + ", " + "cause = " + writeFuture.cause() + ", "
                    + "elapsed time = " + requestInfo.getElapsedMsecs() + ", " + "destination = "
                    + writeFuture.channel().remoteAddress() + " " + requestInfo);
            addedRequestIds.add(entry.getKey());
            addedRequestInfos
                    .add(new RequestInfo(requestInfo.getDestinationAddress(), requestInfo.getRequest()));
        }
    }

    // Add any new requests to the system, connect if necessary, and re-send
    for (int i = 0; i < addedRequestIds.size(); ++i) {
        ClientRequestId requestId = addedRequestIds.get(i);
        RequestInfo requestInfo = addedRequestInfos.get(i);

        if (clientRequestIdRequestInfoMap.put(requestId, requestInfo) == null) {
            LOG.warn("checkRequestsForProblems: Request " + requestId
                    + " completed prior to sending the next request");
            clientRequestIdRequestInfoMap.remove(requestId);
        }
        InetSocketAddress remoteServer = requestInfo.getDestinationAddress();
        Channel channel = getNextChannel(remoteServer);
        if (LOG.isInfoEnabled()) {
            LOG.info("checkRequestsForProblems: Re-issuing request " + requestInfo);
        }
        ChannelFuture writeFuture = channel.write(requestInfo.getRequest());
        requestInfo.setWriteFuture(writeFuture);
        writeFuture.addListener(logErrorListener);
    }
    addedRequestIds.clear();
    addedRequestInfos.clear();
}

From source file:org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.java

License:Apache License

private static List<Future<Channel>> connectToDataNodes(final Configuration conf, final DFSClient client,
        String clientName, final LocatedBlock locatedBlock, long maxBytesRcvd, long latestGS,
        BlockConstructionStage stage, DataChecksum summer, EventLoop eventLoop) {
    Enum<?>[] storageTypes = locatedBlock.getStorageTypes();
    DatanodeInfo[] datanodeInfos = locatedBlock.getLocations();
    boolean connectToDnViaHostname = conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME,
            DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT);
    final int timeoutMs = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, READ_TIMEOUT);
    ExtendedBlock blockCopy = new ExtendedBlock(locatedBlock.getBlock());
    blockCopy.setNumBytes(locatedBlock.getBlockSize());
    ClientOperationHeaderProto header = ClientOperationHeaderProto.newBuilder()
            .setBaseHeader(BaseHeaderProto.newBuilder().setBlock(PB_HELPER.convert(blockCopy))
                    .setToken(PB_HELPER.convert(locatedBlock.getBlockToken())))
            .setClientName(clientName).build();
    ChecksumProto checksumProto = DataTransferProtoUtil.toProto(summer);
    final OpWriteBlockProto.Builder writeBlockProtoBuilder = OpWriteBlockProto.newBuilder().setHeader(header)
            .setStage(OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name())).setPipelineSize(1)
            .setMinBytesRcvd(locatedBlock.getBlock().getNumBytes()).setMaxBytesRcvd(maxBytesRcvd)
            .setLatestGenerationStamp(latestGS).setRequestedChecksum(checksumProto)
            .setCachingStrategy(CachingStrategyProto.newBuilder().setDropBehind(true).build());
    List<Future<Channel>> futureList = new ArrayList<>(datanodeInfos.length);
    for (int i = 0; i < datanodeInfos.length; i++) {
        final DatanodeInfo dnInfo = datanodeInfos[i];
        // Use Enum here because StoregType is moved to another package in hadoop 2.6. Use StorageType
        // will cause compilation error for hadoop 2.5 or before.
        final Enum<?> storageType = storageTypes[i];
        final Promise<Channel> promise = eventLoop.newPromise();
        futureList.add(promise);// w  w w  .  j a  va 2 s . c o m
        String dnAddr = dnInfo.getXferAddr(connectToDnViaHostname);
        new Bootstrap().group(eventLoop).channel(NioSocketChannel.class)
                .option(CONNECT_TIMEOUT_MILLIS, timeoutMs).handler(new ChannelInitializer<Channel>() {

                    @Override
                    protected void initChannel(Channel ch) throws Exception {
                        // we need to get the remote address of the channel so we can only move on after
                        // channel connected. Leave an empty implementation here because netty does not allow
                        // a null handler.
                    }
                }).connect(NetUtils.createSocketAddr(dnAddr)).addListener(new ChannelFutureListener() {

                    @Override
                    public void operationComplete(ChannelFuture future) throws Exception {
                        if (future.isSuccess()) {
                            initialize(conf, future.channel(), dnInfo, storageType, writeBlockProtoBuilder,
                                    timeoutMs, client, locatedBlock.getBlockToken(), promise);
                        } else {
                            promise.tryFailure(future.cause());
                        }
                    }
                });
    }
    return futureList;
}

From source file:org.apache.hadoop.hbase.ipc.AsyncRpcChannel.java

License:Apache License

/**
 * Connect to channel/*from  w ww. ja  va2  s  .co m*/
 *
 * @param bootstrap to connect to
 * @return future of connection
 */
private ChannelFuture connect(final Bootstrap bootstrap) {
    return bootstrap.remoteAddress(address).connect().addListener(new GenericFutureListener<ChannelFuture>() {
        @Override
        public void operationComplete(final ChannelFuture f) throws Exception {
            if (!f.isSuccess()) {
                if (f.cause() instanceof SocketException) {
                    retryOrClose(bootstrap, connectFailureCounter++, f.cause());
                } else {
                    retryOrClose(bootstrap, ioFailureCounter++, f.cause());
                }
                return;
            }
            channel = f.channel();

            setupAuthorization();

            ByteBuf b = channel.alloc().directBuffer(6);
            createPreamble(b, authMethod);
            channel.writeAndFlush(b).addListener(ChannelFutureListener.CLOSE_ON_FAILURE);
            if (useSasl) {
                UserGroupInformation ticket = AsyncRpcChannel.this.ticket.getUGI();
                if (authMethod == AuthMethod.KERBEROS) {
                    if (ticket != null && ticket.getRealUser() != null) {
                        ticket = ticket.getRealUser();
                    }
                }
                SaslClientHandler saslHandler;
                if (ticket == null) {
                    throw new FatalConnectionException("ticket/user is null");
                }
                final UserGroupInformation realTicket = ticket;
                saslHandler = ticket.doAs(new PrivilegedExceptionAction<SaslClientHandler>() {
                    @Override
                    public SaslClientHandler run() throws IOException {
                        return getSaslHandler(realTicket, bootstrap);
                    }
                });
                if (saslHandler != null) {
                    // Sasl connect is successful. Let's set up Sasl channel handler
                    channel.pipeline().addFirst(saslHandler);
                } else {
                    // fall back to simple auth because server told us so.
                    authMethod = AuthMethod.SIMPLE;
                    useSasl = false;
                }
            } else {
                startHBaseConnection(f.channel());
            }
        }
    });
}

From source file:org.apache.hadoop.hbase.ipc.AsyncRpcChannel.java

License:Apache License

/**
 * Start HBase connection//from   ww w  .ja  v a 2s .  co  m
 *
 * @param ch channel to start connection on
 */
private void startHBaseConnection(Channel ch) {
    ch.pipeline().addLast("frameDecoder", new LengthFieldBasedFrameDecoder(Integer.MAX_VALUE, 0, 4, 0, 4));
    ch.pipeline().addLast(new AsyncServerResponseHandler(this));
    try {
        writeChannelHeader(ch).addListener(new GenericFutureListener<ChannelFuture>() {
            @Override
            public void operationComplete(ChannelFuture future) throws Exception {
                if (!future.isSuccess()) {
                    close(future.cause());
                    return;
                }
                List<AsyncCall> callsToWrite;
                synchronized (pendingCalls) {
                    connected = true;
                    callsToWrite = new ArrayList<AsyncCall>(pendingCalls.values());
                }
                for (AsyncCall call : callsToWrite) {
                    writeRequest(call);
                }
            }
        });
    } catch (IOException e) {
        close(e);
    }
}

From source file:org.apache.hadoop.hbase.ipc.AsyncRpcChannelImpl.java

License:Apache License

/**
 * Connect to channel//from  w  ww  .j  a v  a 2 s  .c o m
 * @param bootstrap to connect to
 * @return future of connection
 */
private ChannelFuture connect(final Bootstrap bootstrap) {
    return bootstrap.remoteAddress(address).connect().addListener(new GenericFutureListener<ChannelFuture>() {
        @Override
        public void operationComplete(final ChannelFuture f) throws Exception {
            if (!f.isSuccess()) {
                retryOrClose(bootstrap, failureCounter++, client.failureSleep, f.cause());
                return;
            }
            channel = f.channel();

            setupAuthorization();

            ByteBuf b = channel.alloc().directBuffer(6);
            createPreamble(b, authMethod);
            channel.writeAndFlush(b).addListener(ChannelFutureListener.CLOSE_ON_FAILURE);
            if (useSasl) {
                UserGroupInformation ticket = AsyncRpcChannelImpl.this.ticket.getUGI();
                if (authMethod == AuthMethod.KERBEROS) {
                    if (ticket != null && ticket.getRealUser() != null) {
                        ticket = ticket.getRealUser();
                    }
                }
                SaslClientHandler saslHandler;
                if (ticket == null) {
                    throw new FatalConnectionException("ticket/user is null");
                }
                final UserGroupInformation realTicket = ticket;
                saslHandler = ticket.doAs(new PrivilegedExceptionAction<SaslClientHandler>() {
                    @Override
                    public SaslClientHandler run() throws IOException {
                        return getSaslHandler(realTicket, bootstrap);
                    }
                });
                if (saslHandler != null) {
                    // Sasl connect is successful. Let's set up Sasl channel handler
                    channel.pipeline().addFirst(saslHandler);
                } else {
                    // fall back to simple auth because server told us so.
                    authMethod = AuthMethod.SIMPLE;
                    useSasl = false;
                }
            } else {
                startHBaseConnection(f.channel());
            }
        }
    });
}

From source file:org.apache.hadoop.hbase.ipc.NettyRpcConnection.java

License:Apache License

private void connect() {
    if (LOG.isDebugEnabled()) {
        LOG.debug("Connecting to " + remoteId.address);
    }//w  ww  . j  a v  a2  s.com

    this.channel = new Bootstrap().group(rpcClient.group).channel(rpcClient.channelClass)
            .option(ChannelOption.TCP_NODELAY, rpcClient.isTcpNoDelay())
            .option(ChannelOption.SO_KEEPALIVE, rpcClient.tcpKeepAlive)
            .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, rpcClient.connectTO)
            .handler(new BufferCallBeforeInitHandler()).localAddress(rpcClient.localAddr)
            .remoteAddress(remoteId.address).connect().addListener(new ChannelFutureListener() {

                @Override
                public void operationComplete(ChannelFuture future) throws Exception {
                    Channel ch = future.channel();
                    if (!future.isSuccess()) {
                        failInit(ch, toIOE(future.cause()));
                        rpcClient.failedServers.addToFailedServers(remoteId.address);
                        return;
                    }
                    ch.writeAndFlush(connectionHeaderPreamble.retainedDuplicate());
                    if (useSasl) {
                        saslNegotiate(ch);
                    } else {
                        // send the connection header to server
                        ch.write(connectionHeaderWithLength.retainedDuplicate());
                        established(ch);
                    }
                }
            }).channel();
}

From source file:org.apache.hadoop.hbase.ipc.NettyRpcConnection.java

License:Apache License

@Override
public synchronized void sendRequest(final Call call, HBaseRpcController hrc) throws IOException {
    if (reloginInProgress) {
        throw new IOException("Can not send request because relogin is in progress.");
    }//  ww  w  . j a  v a2 s  .  com
    hrc.notifyOnCancel(new RpcCallback<Object>() {

        @Override
        public void run(Object parameter) {
            setCancelled(call);
            synchronized (this) {
                if (channel != null) {
                    channel.pipeline().fireUserEventTriggered(new CallEvent(CANCELLED, call));
                }
            }
        }
    }, new CancellationCallback() {

        @Override
        public void run(boolean cancelled) throws IOException {
            if (cancelled) {
                setCancelled(call);
            } else {
                if (channel == null) {
                    connect();
                }
                scheduleTimeoutTask(call);
                channel.writeAndFlush(call).addListener(new ChannelFutureListener() {

                    @Override
                    public void operationComplete(ChannelFuture future) throws Exception {
                        // Fail the call if we failed to write it out. This usually because the channel is
                        // closed. This is needed because we may shutdown the channel inside event loop and
                        // there may still be some pending calls in the event loop queue after us.
                        if (!future.isSuccess()) {
                            call.setException(toIOE(future.cause()));
                        }
                    }
                });
            }
        }
    });
}

From source file:org.apache.hadoop.hbase.security.SaslClientHandler.java

License:Apache License

/**
 * Write SASL token/*w  w  w . j av  a  2  s  . c  o m*/
 * @param ctx to write to
 * @param saslToken to write
 */
private void writeSaslToken(final ChannelHandlerContext ctx, byte[] saslToken) {
    ByteBuf b = ctx.alloc().buffer(4 + saslToken.length);
    b.writeInt(saslToken.length);
    b.writeBytes(saslToken, 0, saslToken.length);
    ctx.writeAndFlush(b).addListener(new ChannelFutureListener() {
        @Override
        public void operationComplete(ChannelFuture future) throws Exception {
            if (!future.isSuccess()) {
                exceptionCaught(ctx, future.cause());
            }
        }
    });
}

From source file:org.apache.hadoop.hbase.security.SaslClientHandler.java

License:Apache License

@Override
public void write(final ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception {
    // If not complete, try to negotiate
    if (!saslClient.isComplete()) {
        super.write(ctx, msg, promise);
    } else {/* ww  w . j a v a  2  s .  c  o m*/
        ByteBuf in = (ByteBuf) msg;

        try {
            saslToken = saslClient.wrap(in.array(), in.readerIndex(), in.readableBytes());
        } catch (SaslException se) {
            try {
                saslClient.dispose();
            } catch (SaslException ignored) {
                LOG.debug("Ignoring SASL exception", ignored);
            }
            promise.setFailure(se);
        }
        if (saslToken != null) {
            ByteBuf out = ctx.channel().alloc().buffer(4 + saslToken.length);
            out.writeInt(saslToken.length);
            out.writeBytes(saslToken, 0, saslToken.length);

            ctx.write(out).addListener(new ChannelFutureListener() {
                @Override
                public void operationComplete(ChannelFuture future) throws Exception {
                    if (!future.isSuccess()) {
                        exceptionCaught(ctx, future.cause());
                    }
                }
            });

            saslToken = null;
        }
    }
}

From source file:org.apache.hadoop.hbase.util.FanOutOneBlockAsyncDFSOutputHelper.java

License:Apache License

private static List<Future<Channel>> connectToDataNodes(Configuration conf, String clientName,
        LocatedBlock locatedBlock, long maxBytesRcvd, long latestGS, BlockConstructionStage stage,
        DataChecksum summer, EventLoop eventLoop) {
    Enum<?>[] storageTypes = locatedBlock.getStorageTypes();
    DatanodeInfo[] datanodeInfos = locatedBlock.getLocations();
    boolean connectToDnViaHostname = conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME,
            DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT);
    final int timeoutMs = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, HdfsServerConstants.READ_TIMEOUT);
    ExtendedBlock blockCopy = new ExtendedBlock(locatedBlock.getBlock());
    blockCopy.setNumBytes(locatedBlock.getBlockSize());
    ClientOperationHeaderProto header = ClientOperationHeaderProto.newBuilder()
            .setBaseHeader(BaseHeaderProto.newBuilder().setBlock(PBHelper.convert(blockCopy))
                    .setToken(PBHelper.convert(locatedBlock.getBlockToken())))
            .setClientName(clientName).build();
    ChecksumProto checksumProto = DataTransferProtoUtil.toProto(summer);
    final OpWriteBlockProto.Builder writeBlockProtoBuilder = OpWriteBlockProto.newBuilder().setHeader(header)
            .setStage(OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name())).setPipelineSize(1)
            .setMinBytesRcvd(locatedBlock.getBlock().getNumBytes()).setMaxBytesRcvd(maxBytesRcvd)
            .setLatestGenerationStamp(latestGS).setRequestedChecksum(checksumProto)
            .setCachingStrategy(CachingStrategyProto.newBuilder().setDropBehind(true).build());
    List<Future<Channel>> futureList = new ArrayList<>(datanodeInfos.length);
    for (int i = 0; i < datanodeInfos.length; i++) {
        final DatanodeInfo dnInfo = datanodeInfos[i];
        // Use Enum here because StoregType is moved to another package in hadoop 2.6. Use StorageType
        // will cause compilation error for hadoop 2.5 or before.
        final Enum<?> storageType = storageTypes[i];
        final Promise<Channel> promise = eventLoop.newPromise();
        futureList.add(promise);/*from   w w w.j  a va2s.c o m*/
        String dnAddr = dnInfo.getXferAddr(connectToDnViaHostname);
        new Bootstrap().group(eventLoop).channel(NioSocketChannel.class)
                .option(CONNECT_TIMEOUT_MILLIS, timeoutMs).handler(new ChannelInitializer<Channel>() {

                    @Override
                    protected void initChannel(Channel ch) throws Exception {
                        processWriteBlockResponse(ch, dnInfo, promise, timeoutMs);
                    }
                }).connect(NetUtils.createSocketAddr(dnAddr)).addListener(new ChannelFutureListener() {

                    @Override
                    public void operationComplete(ChannelFuture future) throws Exception {
                        if (future.isSuccess()) {
                            requestWriteBlock(future.channel(), storageType, writeBlockProtoBuilder);
                        } else {
                            promise.tryFailure(future.cause());
                        }
                    }
                });
    }
    return futureList;
}