List of usage examples for io.netty.channel ChannelFutureListener ChannelFutureListener
ChannelFutureListener
From source file:org.apache.activemq.transport.netty.NettyTcpTransport.java
License:Apache License
@Override public void connect() throws IOException { if (listener == null) { throw new IllegalStateException("A transport listener must be set before connection attempts."); }/*from w ww. j a v a 2s . com*/ final SslHandler sslHandler; if (isSSL()) { try { sslHandler = NettyTransportSupport.createSslHandler(getRemoteLocation(), getSslOptions()); } catch (Exception ex) { // TODO: can we stop it throwing Exception? throw IOExceptionSupport.create(ex); } } else { sslHandler = null; } group = new NioEventLoopGroup(1); bootstrap = new Bootstrap(); bootstrap.group(group); bootstrap.channel(NioSocketChannel.class); bootstrap.handler(new ChannelInitializer<Channel>() { @Override public void initChannel(Channel connectedChannel) throws Exception { configureChannel(connectedChannel, sslHandler); } }); configureNetty(bootstrap, getTransportOptions()); ChannelFuture future = bootstrap.connect(getRemoteHost(), getRemotePort()); future.addListener(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture future) throws Exception { if (!future.isSuccess()) { handleException(future.channel(), IOExceptionSupport.create(future.cause())); } } }); try { connectLatch.await(); } catch (InterruptedException ex) { LOG.debug("Transport connection was interrupted."); Thread.interrupted(); failureCause = IOExceptionSupport.create(ex); } if (failureCause != null) { // Close out any Netty resources now as they are no longer needed. if (channel != null) { channel.close().syncUninterruptibly(); channel = null; } if (group != null) { Future<?> fut = group.shutdownGracefully(0, SHUTDOWN_TIMEOUT, TimeUnit.MILLISECONDS); if (!fut.awaitUninterruptibly(2 * SHUTDOWN_TIMEOUT)) { LOG.trace("Channel group shutdown failed to complete in allotted time"); } group = null; } throw failureCause; } else { // Connected, allow any held async error to fire now and close the transport. channel.eventLoop().execute(new Runnable() { @Override public void run() { if (failureCause != null) { channel.pipeline().fireExceptionCaught(failureCause); } } }); } }
From source file:org.apache.bookkeeper.proto.PacketProcessorBaseV3.java
License:Apache License
protected void sendResponse(StatusCode code, Object response, OpStatsLogger statsLogger) { final long writeNanos = MathUtils.nowInNano(); final long timeOut = requestProcessor.getWaitTimeoutOnBackpressureMillis(); if (timeOut >= 0 && !channel.isWritable()) { if (!requestProcessor.isBlacklisted(channel)) { synchronized (channel) { if (!channel.isWritable() && !requestProcessor.isBlacklisted(channel)) { final long waitUntilNanos = writeNanos + TimeUnit.MILLISECONDS.toNanos(timeOut); while (!channel.isWritable() && MathUtils.nowInNano() < waitUntilNanos) { try { TimeUnit.MILLISECONDS.sleep(1); } catch (InterruptedException e) { break; }//ww w . java 2s .c o m } if (!channel.isWritable()) { requestProcessor.blacklistChannel(channel); requestProcessor.handleNonWritableChannel(channel); } } } } if (!channel.isWritable()) { LOGGER.warn("cannot write response to non-writable channel {} for request {}", channel, StringUtils.requestToString(request)); requestProcessor.getRequestStats().getChannelWriteStats() .registerFailedEvent(MathUtils.elapsedNanos(writeNanos), TimeUnit.NANOSECONDS); statsLogger.registerFailedEvent(MathUtils.elapsedNanos(enqueueNanos), TimeUnit.NANOSECONDS); return; } else { requestProcessor.invalidateBlacklist(channel); } } channel.writeAndFlush(response).addListener(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture future) throws Exception { long writeElapsedNanos = MathUtils.elapsedNanos(writeNanos); if (!future.isSuccess()) { requestProcessor.getRequestStats().getChannelWriteStats().registerFailedEvent(writeElapsedNanos, TimeUnit.NANOSECONDS); } else { requestProcessor.getRequestStats().getChannelWriteStats() .registerSuccessfulEvent(writeElapsedNanos, TimeUnit.NANOSECONDS); } if (StatusCode.EOK == code) { statsLogger.registerSuccessfulEvent(MathUtils.elapsedNanos(enqueueNanos), TimeUnit.NANOSECONDS); } else { statsLogger.registerFailedEvent(MathUtils.elapsedNanos(enqueueNanos), TimeUnit.NANOSECONDS); } } }); }
From source file:org.apache.camel.component.netty4.ClientModeTCPNettyServerBootstrapFactory.java
License:Apache License
protected Channel openChannel(ChannelFuture channelFuture) throws Exception { // blocking for channel to be done if (LOG.isTraceEnabled()) { LOG.trace("Waiting for operation to complete {} for {} millis", channelFuture, configuration.getConnectTimeout()); }// w ww. j ava2 s .c om // here we need to wait it in other thread final CountDownLatch channelLatch = new CountDownLatch(1); channelFuture.addListener(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture cf) throws Exception { channelLatch.countDown(); } }); try { channelLatch.await(configuration.getConnectTimeout(), TimeUnit.MILLISECONDS); } catch (InterruptedException ex) { throw new CamelException( "Interrupted while waiting for " + "connection to " + configuration.getAddress()); } if (!channelFuture.isDone() || !channelFuture.isSuccess()) { ConnectException cause = new ConnectException("Cannot connect to " + configuration.getAddress()); if (channelFuture.cause() != null) { cause.initCause(channelFuture.cause()); } throw cause; } Channel answer = channelFuture.channel(); if (LOG.isDebugEnabled()) { LOG.debug("Creating connector to address: {}", configuration.getAddress()); } return answer; }
From source file:org.apache.camel.component.netty4.NettyProducer.java
License:Apache License
public boolean process(final Exchange exchange, AsyncCallback callback) { if (!isRunAllowed()) { if (exchange.getException() == null) { exchange.setException(new RejectedExecutionException()); }//from w w w. j a v a 2 s . com callback.done(true); return true; } Object body; try { body = getRequestBody(exchange); if (body == null) { noReplyLogger.log("No payload to send for exchange: " + exchange); callback.done(true); return true; } } catch (Exception e) { exchange.setException(e); callback.done(true); return true; } // set the exchange encoding property if (getConfiguration().getCharsetName() != null) { exchange.setProperty(Exchange.CHARSET_NAME, IOHelper.normalizeCharset(getConfiguration().getCharsetName())); } if (LOG.isTraceEnabled()) { LOG.trace("Pool[active={}, idle={}]", pool.getNumActive(), pool.getNumIdle()); } // get a channel from the pool Channel existing; try { existing = pool.borrowObject(); if (existing != null) { LOG.trace("Got channel from pool {}", existing); } } catch (Exception e) { exchange.setException(e); callback.done(true); return true; } // we must have a channel if (existing == null) { exchange.setException(new CamelExchangeException("Cannot get channel from pool", exchange)); callback.done(true); return true; } // need to declare as final final Channel channel = existing; final AsyncCallback producerCallback = new NettyProducerCallback(channel, callback); // setup state as attachment on the channel, so we can access the state later when needed putState(channel, new NettyCamelState(producerCallback, exchange)); // here we need to setup the remote address information here InetSocketAddress remoteAddress = null; if (!isTcp()) { remoteAddress = new InetSocketAddress(configuration.getHost(), configuration.getPort()); } // write body NettyHelper.writeBodyAsync(LOG, channel, remoteAddress, body, exchange, new ChannelFutureListener() { public void operationComplete(ChannelFuture channelFuture) throws Exception { LOG.trace("Operation complete {}", channelFuture); if (!channelFuture.isSuccess()) { // no success the set the caused exception and signal callback and break exchange.setException(channelFuture.cause()); producerCallback.done(false); return; } // if we do not expect any reply then signal callback to continue routing if (!configuration.isSync()) { try { // should channel be closed after complete? Boolean close; if (ExchangeHelper.isOutCapable(exchange)) { close = exchange.getOut().getHeader(NettyConstants.NETTY_CLOSE_CHANNEL_WHEN_COMPLETE, Boolean.class); } else { close = exchange.getIn().getHeader(NettyConstants.NETTY_CLOSE_CHANNEL_WHEN_COMPLETE, Boolean.class); } // should we disconnect, the header can override the configuration boolean disconnect = getConfiguration().isDisconnect(); if (close != null) { disconnect = close; } if (disconnect) { if (LOG.isTraceEnabled()) { LOG.trace("Closing channel when complete at address: {}", getEndpoint().getConfiguration().getAddress()); } NettyHelper.close(channel); } } finally { // signal callback to continue routing producerCallback.done(false); } } } }); // continue routing asynchronously return false; }
From source file:org.apache.camel.component.netty4.NettyProducer.java
License:Apache License
protected Channel openChannel(ChannelFuture channelFuture) throws Exception { // blocking for channel to be done if (LOG.isTraceEnabled()) { LOG.trace("Waiting for operation to complete {} for {} millis", channelFuture, configuration.getConnectTimeout()); }//from w w w . jav a2 s. c om // here we need to wait it in other thread final CountDownLatch channelLatch = new CountDownLatch(1); channelFuture.addListener(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture cf) throws Exception { channelLatch.countDown(); } }); try { channelLatch.await(configuration.getConnectTimeout(), TimeUnit.MILLISECONDS); } catch (InterruptedException ex) { throw new CamelException( "Interrupted while waiting for " + "connection to " + configuration.getAddress()); } if (!channelFuture.isDone() || !channelFuture.isSuccess()) { ConnectException cause = new ConnectException("Cannot connect to " + configuration.getAddress()); if (channelFuture.cause() != null) { cause.initCause(channelFuture.cause()); } throw cause; } Channel answer = channelFuture.channel(); // to keep track of all channels in use allChannels.add(answer); if (LOG.isDebugEnabled()) { LOG.debug("Creating connector to address: {}", configuration.getAddress()); } return answer; }
From source file:org.apache.flink.runtime.io.network.netty.PartitionRequestClient.java
License:Apache License
/** * Requests a remote intermediate result partition queue. * <p>//from w ww . j a va2 s .co m * The request goes to the remote producer, for which this partition * request client instance has been created. */ public ChannelFuture requestSubpartition(final ResultPartitionID partitionId, final int subpartitionIndex, final RemoteInputChannel inputChannel, int delayMs) throws IOException { checkNotClosed(); LOG.debug("Requesting subpartition {} of partition {} with {} ms delay.", subpartitionIndex, partitionId, delayMs); partitionRequestHandler.addInputChannel(inputChannel); final PartitionRequest request = new PartitionRequest(partitionId, subpartitionIndex, inputChannel.getInputChannelId()); final ChannelFutureListener listener = new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture future) throws Exception { if (!future.isSuccess()) { partitionRequestHandler.removeInputChannel(inputChannel); inputChannel.onError(new LocalTransportException("Sending the partition request failed.", future.channel().localAddress(), future.cause())); } } }; if (delayMs == 0) { ChannelFuture f = tcpChannel.writeAndFlush(request); f.addListener(listener); return f; } else { final ChannelFuture[] f = new ChannelFuture[1]; tcpChannel.eventLoop().schedule(new Runnable() { @Override public void run() { f[0] = tcpChannel.writeAndFlush(request); f[0].addListener(listener); } }, delayMs, TimeUnit.MILLISECONDS); return f[0]; } }
From source file:org.apache.flink.runtime.io.network.netty.PartitionRequestClient.java
License:Apache License
/** * Sends a task event backwards to an intermediate result partition producer. * <p>// w ww.j a v a 2s . co m * Backwards task events flow between readers and writers and therefore * will only work when both are running at the same time, which is only * guaranteed to be the case when both the respective producer and * consumer task run pipelined. */ public void sendTaskEvent(ResultPartitionID partitionId, TaskEvent event, final RemoteInputChannel inputChannel) throws IOException { checkNotClosed(); tcpChannel.writeAndFlush(new TaskEventRequest(event, partitionId, inputChannel.getInputChannelId())) .addListener(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture future) throws Exception { if (!future.isSuccess()) { inputChannel.onError(new LocalTransportException("Sending the task event failed.", future.channel().localAddress(), future.cause())); } } }); }
From source file:org.apache.giraph.comm.netty.NettyClient.java
License:Apache License
/** * Stop the client./*from w w w . j a va2s . c o m*/ */ public void stop() { if (LOG.isInfoEnabled()) { LOG.info("stop: Halting netty client"); } // Close connections asynchronously, in a Netty-approved // way, without cleaning up thread pools until all channels // in addressChannelMap are closed (success or failure) int channelCount = 0; for (ChannelRotater channelRotater : addressChannelMap.values()) { channelCount += channelRotater.size(); } final int done = channelCount; final AtomicInteger count = new AtomicInteger(0); for (ChannelRotater channelRotater : addressChannelMap.values()) { channelRotater.closeChannels(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture cf) { context.progress(); if (count.incrementAndGet() == done) { if (LOG.isInfoEnabled()) { LOG.info("stop: reached wait threshold, " + done + " connections closed, releasing " + "resources now."); } workerGroup.shutdownGracefully(); if (executionGroup != null) { executionGroup.shutdownGracefully(); } } } }); } ProgressableUtils.awaitTerminationFuture(workerGroup, context); if (executionGroup != null) { ProgressableUtils.awaitTerminationFuture(executionGroup, context); } if (LOG.isInfoEnabled()) { LOG.info("stop: Netty client halted"); } }
From source file:org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.java
License:Apache License
private static List<Future<Channel>> connectToDataNodes(final Configuration conf, final DFSClient client, String clientName, final LocatedBlock locatedBlock, long maxBytesRcvd, long latestGS, BlockConstructionStage stage, DataChecksum summer, EventLoop eventLoop) { Enum<?>[] storageTypes = locatedBlock.getStorageTypes(); DatanodeInfo[] datanodeInfos = locatedBlock.getLocations(); boolean connectToDnViaHostname = conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME, DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT); final int timeoutMs = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, READ_TIMEOUT); ExtendedBlock blockCopy = new ExtendedBlock(locatedBlock.getBlock()); blockCopy.setNumBytes(locatedBlock.getBlockSize()); ClientOperationHeaderProto header = ClientOperationHeaderProto.newBuilder() .setBaseHeader(BaseHeaderProto.newBuilder().setBlock(PB_HELPER.convert(blockCopy)) .setToken(PB_HELPER.convert(locatedBlock.getBlockToken()))) .setClientName(clientName).build(); ChecksumProto checksumProto = DataTransferProtoUtil.toProto(summer); final OpWriteBlockProto.Builder writeBlockProtoBuilder = OpWriteBlockProto.newBuilder().setHeader(header) .setStage(OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name())).setPipelineSize(1) .setMinBytesRcvd(locatedBlock.getBlock().getNumBytes()).setMaxBytesRcvd(maxBytesRcvd) .setLatestGenerationStamp(latestGS).setRequestedChecksum(checksumProto) .setCachingStrategy(CachingStrategyProto.newBuilder().setDropBehind(true).build()); List<Future<Channel>> futureList = new ArrayList<>(datanodeInfos.length); for (int i = 0; i < datanodeInfos.length; i++) { final DatanodeInfo dnInfo = datanodeInfos[i]; // Use Enum here because StoregType is moved to another package in hadoop 2.6. Use StorageType // will cause compilation error for hadoop 2.5 or before. final Enum<?> storageType = storageTypes[i]; final Promise<Channel> promise = eventLoop.newPromise(); futureList.add(promise);//from w w w . j a v a2 s . c o m String dnAddr = dnInfo.getXferAddr(connectToDnViaHostname); new Bootstrap().group(eventLoop).channel(NioSocketChannel.class) .option(CONNECT_TIMEOUT_MILLIS, timeoutMs).handler(new ChannelInitializer<Channel>() { @Override protected void initChannel(Channel ch) throws Exception { // we need to get the remote address of the channel so we can only move on after // channel connected. Leave an empty implementation here because netty does not allow // a null handler. } }).connect(NetUtils.createSocketAddr(dnAddr)).addListener(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture future) throws Exception { if (future.isSuccess()) { initialize(conf, future.channel(), dnInfo, storageType, writeBlockProtoBuilder, timeoutMs, client, locatedBlock.getBlockToken(), promise); } else { promise.tryFailure(future.cause()); } } }); } return futureList; }
From source file:org.apache.hadoop.hbase.ipc.NettyRpcConnection.java
License:Apache License
private void connect() { if (LOG.isDebugEnabled()) { LOG.debug("Connecting to " + remoteId.address); }/*from www. j a v a 2 s . c o m*/ this.channel = new Bootstrap().group(rpcClient.group).channel(rpcClient.channelClass) .option(ChannelOption.TCP_NODELAY, rpcClient.isTcpNoDelay()) .option(ChannelOption.SO_KEEPALIVE, rpcClient.tcpKeepAlive) .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, rpcClient.connectTO) .handler(new BufferCallBeforeInitHandler()).localAddress(rpcClient.localAddr) .remoteAddress(remoteId.address).connect().addListener(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture future) throws Exception { Channel ch = future.channel(); if (!future.isSuccess()) { failInit(ch, toIOE(future.cause())); rpcClient.failedServers.addToFailedServers(remoteId.address); return; } ch.writeAndFlush(connectionHeaderPreamble.retainedDuplicate()); if (useSasl) { saslNegotiate(ch); } else { // send the connection header to server ch.write(connectionHeaderWithLength.retainedDuplicate()); established(ch); } } }).channel(); }