List of usage examples for io.netty.util.concurrent Future isSuccess
boolean isSuccess();
From source file:org.apache.activemq.artemis.core.remoting.impl.netty.SharedEventLoopGroup.java
License:Apache License
@Override public Future<?> shutdownGracefully(final long l, final long l2, final TimeUnit timeUnit) { if (channelFactoryCount.decrementAndGet() == 0) { shutdown.compareAndSet(null, next().scheduleAtFixedRate(new Runnable() { @Override/*from ww w . j a v a 2 s .c o m*/ public void run() { synchronized (SharedEventLoopGroup.class) { if (shutdown.get() != null) { Future<?> future = SharedEventLoopGroup.super.shutdownGracefully(l, l2, timeUnit); future.addListener(new FutureListener<Object>() { @Override public void operationComplete(Future<Object> future) throws Exception { if (future.isSuccess()) { terminationPromise.setSuccess(null); } else { terminationPromise.setFailure(future.cause()); } } }); instance = null; } } } }, 10, 10, TimeUnit.SECONDS)); } return terminationPromise; }
From source file:org.apache.activemq.artemis.core.remoting.impl.netty.SharedNioEventLoopGroup.java
License:Apache License
@Override public Future<?> shutdownGracefully(final long l, final long l2, final TimeUnit timeUnit) { if (nioChannelFactoryCount.decrementAndGet() == 0) { shutdown.compareAndSet(null, next().scheduleAtFixedRate(new Runnable() { @Override/*from ww w . j a v a 2s. c o m*/ public void run() { synchronized (SharedNioEventLoopGroup.class) { if (shutdown.get() != null) { Future<?> future = SharedNioEventLoopGroup.super.shutdownGracefully(l, l2, timeUnit); future.addListener(new FutureListener<Object>() { @Override public void operationComplete(Future future) throws Exception { if (future.isSuccess()) { terminationPromise.setSuccess(null); } else { terminationPromise.setFailure(future.cause()); } } }); instance = null; } } } }, 10, 10, TimeUnit.SECONDS)); } return terminationPromise; }
From source file:org.apache.bookkeeper.proto.BookieRequestProcessor.java
License:Apache License
private void processStartTLSRequestV3(final BookkeeperProtocol.Request r, final Channel c) { BookkeeperProtocol.Response.Builder response = BookkeeperProtocol.Response.newBuilder(); BookkeeperProtocol.BKPacketHeader.Builder header = BookkeeperProtocol.BKPacketHeader.newBuilder(); header.setVersion(BookkeeperProtocol.ProtocolVersion.VERSION_THREE); header.setOperation(r.getHeader().getOperation()); header.setTxnId(r.getHeader().getTxnId()); response.setHeader(header.build());/*from w w w . j a v a 2 s . c o m*/ if (shFactory == null) { LOG.error("Got StartTLS request but TLS not configured"); response.setStatus(BookkeeperProtocol.StatusCode.EBADREQ); c.writeAndFlush(response.build()); } else { // there is no need to execute in a different thread as this operation is light SslHandler sslHandler = shFactory.newTLSHandler(); c.pipeline().addFirst("tls", sslHandler); response.setStatus(BookkeeperProtocol.StatusCode.EOK); BookkeeperProtocol.StartTLSResponse.Builder builder = BookkeeperProtocol.StartTLSResponse.newBuilder(); response.setStartTLSResponse(builder.build()); sslHandler.handshakeFuture().addListener(new GenericFutureListener<Future<Channel>>() { @Override public void operationComplete(Future<Channel> future) throws Exception { // notify the AuthPlugin the completion of the handshake, even in case of failure AuthHandler.ServerSideHandler authHandler = c.pipeline() .get(AuthHandler.ServerSideHandler.class); authHandler.authProvider.onProtocolUpgrade(); if (future.isSuccess()) { LOG.info("Session is protected by: {}", sslHandler.engine().getSession().getCipherSuite()); } else { LOG.error("TLS Handshake failure: {}", future.cause()); BookkeeperProtocol.Response.Builder errResponse = BookkeeperProtocol.Response.newBuilder() .setHeader(r.getHeader()).setStatus(BookkeeperProtocol.StatusCode.EIO); c.writeAndFlush(errResponse.build()); if (statsEnabled) { bkStats.getOpStats(BKStats.STATS_UNKNOWN).incrementFailedOps(); } } } }); c.writeAndFlush(response.build()); } }
From source file:org.apache.bookkeeper.proto.PerChannelBookieClient.java
License:Apache License
void initTLSHandshake() { // create TLS handler PerChannelBookieClient parentObj = PerChannelBookieClient.this; SslHandler handler = parentObj.shFactory.newTLSHandler(); channel.pipeline().addFirst(parentObj.shFactory.getHandlerName(), handler); handler.handshakeFuture().addListener(new GenericFutureListener<Future<Channel>>() { @Override// w ww . j av a 2 s . c o m public void operationComplete(Future<Channel> future) throws Exception { int rc; Queue<GenericCallback<PerChannelBookieClient>> oldPendingOps; synchronized (PerChannelBookieClient.this) { if (future.isSuccess() && state == ConnectionState.CONNECTING) { LOG.error("Connection state changed before TLS handshake completed {}/{}", addr, state); rc = BKException.Code.BookieHandleNotAvailableException; closeChannel(channel); channel = null; if (state != ConnectionState.CLOSED) { state = ConnectionState.DISCONNECTED; } } else if (future.isSuccess() && state == ConnectionState.START_TLS) { rc = BKException.Code.OK; LOG.info("Successfully connected to bookie using TLS: " + addr); state = ConnectionState.CONNECTED; AuthHandler.ClientSideHandler authHandler = future.get().pipeline() .get(AuthHandler.ClientSideHandler.class); authHandler.authProvider.onProtocolUpgrade(); activeTlsChannelCounter.inc(); } else if (future.isSuccess() && (state == ConnectionState.CLOSED || state == ConnectionState.DISCONNECTED)) { LOG.warn("Closed before TLS handshake completed, clean up: {}, current state {}", channel, state); closeChannel(channel); rc = BKException.Code.BookieHandleNotAvailableException; channel = null; } else if (future.isSuccess() && state == ConnectionState.CONNECTED) { LOG.debug("Already connected with another channel({}), so close the new channel({})", channel, channel); closeChannel(channel); return; // pendingOps should have been completed when other channel connected } else { LOG.error("TLS handshake failed with bookie: {}/{}, current state {} : ", channel, addr, state, future.cause()); rc = BKException.Code.SecurityException; closeChannel(channel); channel = null; if (state != ConnectionState.CLOSED) { state = ConnectionState.DISCONNECTED; } failedTlsHandshakeCounter.inc(); } // trick to not do operations under the lock, take the list // of pending ops and assign it to a new variable, while // emptying the pending ops by just assigning it to a new // list oldPendingOps = pendingOps; pendingOps = new ArrayDeque<>(); } makeWritable(); for (GenericCallback<PerChannelBookieClient> pendingOp : oldPendingOps) { pendingOp.operationComplete(rc, PerChannelBookieClient.this); } } }); }
From source file:org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.java
License:Apache License
private <A> void flush0(final A attachment, final CompletionHandler<Long, ? super A> handler, boolean syncBlock) { if (state != State.STREAMING) { handler.failed(new IOException("stream already broken"), attachment); return;//from ww w .ja v a 2 s . c o m } int dataLen = buf.readableBytes(); final long ackedLength = nextPacketOffsetInBlock + dataLen; if (ackedLength == locatedBlock.getBlock().getNumBytes()) { // no new data, just return handler.completed(locatedBlock.getBlock().getNumBytes(), attachment); return; } Promise<Void> promise = eventLoop.newPromise(); promise.addListener(new FutureListener<Void>() { @Override public void operationComplete(Future<Void> future) throws Exception { if (future.isSuccess()) { locatedBlock.getBlock().setNumBytes(ackedLength); handler.completed(ackedLength, attachment); } else { handler.failed(future.cause(), attachment); } } }); Callback c = waitingAckQueue.peekLast(); if (c != null && ackedLength == c.ackedLength) { // just append it to the tail of waiting ack queue,, do not issue new hflush request. waitingAckQueue.addLast(new Callback(promise, ackedLength, Collections.<Channel>emptyList())); return; } int chunkLen = summer.getBytesPerChecksum(); int trailingPartialChunkLen = dataLen % chunkLen; int numChecks = dataLen / chunkLen + (trailingPartialChunkLen != 0 ? 1 : 0); int checksumLen = numChecks * summer.getChecksumSize(); ByteBuf checksumBuf = alloc.directBuffer(checksumLen); summer.calculateChunkedSums(buf.nioBuffer(), checksumBuf.nioBuffer(0, checksumLen)); checksumBuf.writerIndex(checksumLen); PacketHeader header = new PacketHeader(4 + checksumLen + dataLen, nextPacketOffsetInBlock, nextPacketSeqno, false, dataLen, syncBlock); int headerLen = header.getSerializedSize(); ByteBuf headerBuf = alloc.buffer(headerLen); header.putInBuffer(headerBuf.nioBuffer(0, headerLen)); headerBuf.writerIndex(headerLen); waitingAckQueue.addLast(new Callback(promise, ackedLength, datanodeList)); for (Channel ch : datanodeList) { ch.write(headerBuf.duplicate().retain()); ch.write(checksumBuf.duplicate().retain()); ch.writeAndFlush(buf.duplicate().retain()); } checksumBuf.release(); headerBuf.release(); ByteBuf newBuf = alloc.directBuffer().ensureWritable(trailingPartialChunkLen); if (trailingPartialChunkLen != 0) { buf.readerIndex(dataLen - trailingPartialChunkLen).readBytes(newBuf, trailingPartialChunkLen); } buf.release(); this.buf = newBuf; nextPacketOffsetInBlock += dataLen - trailingPartialChunkLen; nextPacketSeqno++; }
From source file:org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.java
License:Apache License
private static void initialize(Configuration conf, final Channel channel, final DatanodeInfo dnInfo, final Enum<?> storageType, final OpWriteBlockProto.Builder writeBlockProtoBuilder, final int timeoutMs, DFSClient client, Token<BlockTokenIdentifier> accessToken, final Promise<Channel> promise) { Promise<Void> saslPromise = channel.eventLoop().newPromise(); trySaslNegotiate(conf, channel, dnInfo, timeoutMs, client, accessToken, saslPromise); saslPromise.addListener(new FutureListener<Void>() { @Override//from www.ja va2s .c om public void operationComplete(Future<Void> future) throws Exception { if (future.isSuccess()) { // setup response processing pipeline first, then send request. processWriteBlockResponse(channel, dnInfo, promise, timeoutMs); requestWriteBlock(channel, storageType, writeBlockProtoBuilder); } else { promise.tryFailure(future.cause()); } } }); }
From source file:org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.java
License:Apache License
private static FanOutOneBlockAsyncDFSOutput createOutput(DistributedFileSystem dfs, String src, boolean overwrite, boolean createParent, short replication, long blockSize, EventLoop eventLoop) throws IOException { Configuration conf = dfs.getConf(); FSUtils fsUtils = FSUtils.getInstance(dfs, conf); DFSClient client = dfs.getClient();//from w w w. j a v a2 s .c om String clientName = client.getClientName(); ClientProtocol namenode = client.getNamenode(); HdfsFileStatus stat; try { stat = FILE_CREATER.create(namenode, src, FsPermission.getFileDefault().applyUMask(FsPermission.getUMask(conf)), clientName, new EnumSetWritable<CreateFlag>(overwrite ? EnumSet.of(CREATE, OVERWRITE) : EnumSet.of(CREATE)), createParent, replication, blockSize); } catch (Exception e) { if (e instanceof RemoteException) { throw (RemoteException) e; } else { throw new NameNodeException(e); } } beginFileLease(client, src, stat.getFileId()); boolean succ = false; LocatedBlock locatedBlock = null; List<Future<Channel>> futureList = null; try { DataChecksum summer = createChecksum(client); locatedBlock = BLOCK_ADDER.addBlock(namenode, src, client.getClientName(), null, null, stat.getFileId(), null); List<Channel> datanodeList = new ArrayList<>(); futureList = connectToDataNodes(conf, client, clientName, locatedBlock, 0L, 0L, PIPELINE_SETUP_CREATE, summer, eventLoop); for (Future<Channel> future : futureList) { // fail the creation if there are connection failures since we are fail-fast. The upper // layer should retry itself if needed. datanodeList.add(future.syncUninterruptibly().getNow()); } CryptoCodec cryptocodec = createCryptoCodec(conf, stat, client); FanOutOneBlockAsyncDFSOutput output = new FanOutOneBlockAsyncDFSOutput(conf, fsUtils, dfs, client, namenode, clientName, src, stat.getFileId(), locatedBlock, cryptocodec, eventLoop, datanodeList, summer, ALLOC); succ = true; return output; } finally { if (!succ) { if (futureList != null) { for (Future<Channel> f : futureList) { f.addListener(new FutureListener<Channel>() { @Override public void operationComplete(Future<Channel> future) throws Exception { if (future.isSuccess()) { future.getNow().close(); } } }); } } endFileLease(client, src, stat.getFileId()); fsUtils.recoverFileLease(dfs, new Path(src), conf, new CancelOnClose(client)); } } }
From source file:org.apache.hadoop.hbase.ipc.AsyncRpcClient.java
License:Apache License
/** * Call method async/*from w w w .j av a 2s. c o m*/ */ private void callMethod(Descriptors.MethodDescriptor md, final PayloadCarryingRpcController pcrc, Message param, Message returnType, User ticket, InetSocketAddress addr, final RpcCallback<Message> done) { final AsyncRpcChannel connection; try { connection = createRpcChannel(md.getService().getName(), addr, ticket); connection.callMethod(md, pcrc, param, returnType) .addListener(new GenericFutureListener<Future<Message>>() { @Override public void operationComplete(Future<Message> future) throws Exception { if (!future.isSuccess()) { Throwable cause = future.cause(); if (cause instanceof IOException) { pcrc.setFailed((IOException) cause); } else { pcrc.setFailed(new IOException(cause)); } } else { try { done.run(future.get()); } catch (ExecutionException e) { Throwable cause = e.getCause(); if (cause instanceof IOException) { pcrc.setFailed((IOException) cause); } else { pcrc.setFailed(new IOException(cause)); } } catch (InterruptedException e) { pcrc.setFailed(new IOException(e)); } } } }); } catch (StoppedRpcClientException | FailedServerException e) { pcrc.setFailed(e); } }
From source file:org.apache.hadoop.hbase.ipc.NettyRpcConnection.java
License:Apache License
private void saslNegotiate(final Channel ch) { UserGroupInformation ticket = getUGI(); if (ticket == null) { failInit(ch, new FatalConnectionException("ticket/user is null")); return;/*w w w. ja va 2 s.c o m*/ } Promise<Boolean> saslPromise = ch.eventLoop().newPromise(); final NettyHBaseSaslRpcClientHandler saslHandler; try { saslHandler = new NettyHBaseSaslRpcClientHandler(saslPromise, ticket, authMethod, token, serverPrincipal, rpcClient.fallbackAllowed, this.rpcClient.conf); } catch (IOException e) { failInit(ch, e); return; } ch.pipeline().addFirst(new SaslChallengeDecoder(), saslHandler); saslPromise.addListener(new FutureListener<Boolean>() { @Override public void operationComplete(Future<Boolean> future) throws Exception { if (future.isSuccess()) { ChannelPipeline p = ch.pipeline(); p.remove(SaslChallengeDecoder.class); p.remove(NettyHBaseSaslRpcClientHandler.class); // check if negotiate with server for connection header is necessary if (saslHandler.isNeedProcessConnectionHeader()) { Promise<Boolean> connectionHeaderPromise = ch.eventLoop().newPromise(); // create the handler to handle the connection header ChannelHandler chHandler = new NettyHBaseRpcConnectionHeaderHandler(connectionHeaderPromise, conf, connectionHeaderWithLength); // add ReadTimeoutHandler to deal with server doesn't response connection header // because of the different configuration in client side and server side p.addFirst(new ReadTimeoutHandler(RpcClient.DEFAULT_SOCKET_TIMEOUT_READ, TimeUnit.MILLISECONDS)); p.addLast(chHandler); connectionHeaderPromise.addListener(new FutureListener<Boolean>() { @Override public void operationComplete(Future<Boolean> future) throws Exception { if (future.isSuccess()) { ChannelPipeline p = ch.pipeline(); p.remove(ReadTimeoutHandler.class); p.remove(NettyHBaseRpcConnectionHeaderHandler.class); // don't send connection header, NettyHbaseRpcConnectionHeaderHandler // sent it already established(ch); } else { final Throwable error = future.cause(); scheduleRelogin(error); failInit(ch, toIOE(error)); } } }); } else { // send the connection header to server ch.write(connectionHeaderWithLength.retainedDuplicate()); established(ch); } } else { final Throwable error = future.cause(); scheduleRelogin(error); failInit(ch, toIOE(error)); } } }); }
From source file:org.apache.hadoop.hbase.util.FanOutOneBlockAsyncDFSOutputHelper.java
License:Apache License
private static FanOutOneBlockAsyncDFSOutput createOutput(DistributedFileSystem dfs, String src, boolean overwrite, boolean createParent, short replication, long blockSize, EventLoop eventLoop) throws IOException { Configuration conf = dfs.getConf(); FSUtils fsUtils = FSUtils.getInstance(dfs, conf); DFSClient client = dfs.getClient();//w w w. jav a2s . c o m String clientName = client.getClientName(); ClientProtocol namenode = client.getNamenode(); HdfsFileStatus stat; try { stat = FILE_CREATER.create(namenode, src, FsPermission.getFileDefault().applyUMask(FsPermission.getUMask(conf)), clientName, new EnumSetWritable<CreateFlag>(overwrite ? EnumSet.of(CREATE, OVERWRITE) : EnumSet.of(CREATE)), createParent, replication, blockSize); } catch (Exception e) { if (e instanceof RemoteException) { throw (RemoteException) e; } else { throw new NameNodeException(e); } } beginFileLease(client, src, stat.getFileId()); boolean succ = false; LocatedBlock locatedBlock = null; List<Future<Channel>> futureList = null; try { DataChecksum summer = createChecksum(client); locatedBlock = namenode.addBlock(src, client.getClientName(), null, null, stat.getFileId(), null); List<Channel> datanodeList = new ArrayList<>(); futureList = connectToDataNodes(conf, clientName, locatedBlock, 0L, 0L, PIPELINE_SETUP_CREATE, summer, eventLoop); for (Future<Channel> future : futureList) { // fail the creation if there are connection failures since we are fail-fast. The upper // layer should retry itself if needed. datanodeList.add(future.syncUninterruptibly().getNow()); } succ = true; return new FanOutOneBlockAsyncDFSOutput(conf, fsUtils, dfs, client, namenode, clientName, src, stat.getFileId(), locatedBlock, eventLoop, datanodeList, summer, ALLOC); } finally { if (!succ) { if (futureList != null) { for (Future<Channel> f : futureList) { f.addListener(new FutureListener<Channel>() { @Override public void operationComplete(Future<Channel> future) throws Exception { if (future.isSuccess()) { future.getNow().close(); } } }); } } endFileLease(client, src, stat.getFileId()); fsUtils.recoverFileLease(dfs, new Path(src), conf, new CancelOnClose(client)); } } }