List of usage examples for io.netty.channel ChannelFutureListener ChannelFutureListener
ChannelFutureListener
From source file:org.apache.spark.network.netty.NettyTransportClient.java
License:Apache License
/** * Requests a single chunk from the remote side, from the pre-negotiated streamId. * * Chunk indices go from 0 onwards. It is valid to request the same chunk multiple times, though * some streams may not support this.//w w w.j a v a 2s . c o m * * Multiple fetchChunk requests may be outstanding simultaneously, and the chunks are guaranteed * to be returned in the same order that they were requested, assuming only a single * TransportClient is used to fetch the chunks. * * @param streamId Identifier that refers to a stream in the remote StreamManager. This should * be agreed upon by client and server beforehand. * @param chunkIndex 0-based index of the chunk to fetch * @param callback Callback invoked upon successful receipt of chunk, or upon any failure. */ @Override public void fetchChunk(long streamId, final int chunkIndex, final ChunkReceivedCallback callback) { final String serverAddr = NettyUtils.getRemoteAddress(channel); final long startTime = System.currentTimeMillis(); logger.debug("Sending fetch chunk request {} to {}", chunkIndex, serverAddr); final StreamChunkId streamChunkId = new StreamChunkId(streamId, chunkIndex); handler.addFetchRequest(streamChunkId, callback); channel.writeAndFlush(new ChunkFetchRequest(streamChunkId)).addListener(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture future) throws Exception { if (future.isSuccess()) { long timeTaken = System.currentTimeMillis() - startTime; logger.trace("Sending request {} to {} took {} ms", streamChunkId, serverAddr, timeTaken); } else { String errorMsg = String.format("Failed to send request %s to %s: %s", streamChunkId, serverAddr, future.cause()); logger.error(errorMsg, future.cause()); handler.removeFetchRequest(streamChunkId); channel.close(); try { callback.onFailure(chunkIndex, new IOException(errorMsg, future.cause())); } catch (Exception e) { logger.error("Uncaught exception in RPC response callback handler!", e); } } } }); }
From source file:org.apache.spark.network.netty.NettyTransportClient.java
License:Apache License
/** * Request to stream the data with the given stream ID from the remote end. * * @param streamId The stream to fetch./* w w w . ja va 2 s. co m*/ * @param callback Object to call with the stream data. */ @Override public void stream(final String streamId, final StreamCallback callback) { final String serverAddr = NettyUtils.getRemoteAddress(channel); final long startTime = System.currentTimeMillis(); logger.debug("Sending stream request for {} to {}", streamId, serverAddr); // Need to synchronize here so that the callback is added to the queue and the RPC is // written to the socket atomically, so that callbacks are called in the right order // when responses arrive. synchronized (this) { handler.addStreamCallback(callback); channel.writeAndFlush(new StreamRequest(streamId)).addListener(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture future) throws Exception { if (future.isSuccess()) { long timeTaken = System.currentTimeMillis() - startTime; logger.trace("Sending request for {} to {} took {} ms", streamId, serverAddr, timeTaken); } else { String errorMsg = String.format("Failed to send request for %s to %s: %s", streamId, serverAddr, future.cause()); logger.error(errorMsg, future.cause()); channel.close(); try { callback.onFailure(streamId, new IOException(errorMsg, future.cause())); } catch (Exception e) { logger.error("Uncaught exception in RPC response callback handler!", e); } } } }); } }
From source file:org.apache.spark.network.netty.NettyTransportClient.java
License:Apache License
/** * Sends an opaque message to the RpcHandler on the server-side. The callback will be invoked * with the server's response or upon any failure. * * @param message The message to send.//from w w w . ja v a2s .co m * @param callback Callback to handle the RPC's reply. * @return The RPC's id. */ @Override public long sendRpc(ByteBuffer message, final RpcResponseCallback callback) { final String serverAddr = NettyUtils.getRemoteAddress(channel); final long startTime = System.currentTimeMillis(); logger.trace("Sending RPC to {}", serverAddr); final long requestId = Math.abs(UUID.randomUUID().getLeastSignificantBits()); handler.addRpcRequest(requestId, callback); channel.writeAndFlush(new RpcRequest(requestId, new NioManagedBuffer(message))) .addListener(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture future) throws Exception { if (future.isSuccess()) { long timeTaken = System.currentTimeMillis() - startTime; logger.trace("Sending request {} to {} took {} ms", requestId, serverAddr, timeTaken); } else { String errorMsg = String.format("Failed to send RPC %s to %s: %s", requestId, serverAddr, future.cause()); logger.error(errorMsg, future.cause()); handler.removeRpcRequest(requestId); channel.close(); try { callback.onFailure(new IOException(errorMsg, future.cause())); } catch (Exception e) { logger.error("Uncaught exception in RPC response callback handler!", e); } } } }); return requestId; }
From source file:org.apache.spark.network.netty.NettyTransportRequestHandler.java
License:Apache License
/** * Responds to a single message with some Encodable object. If a failure occurs while sending, * it will be logged and the channel closed. */// w w w .jav a 2s. c om public void respond(final Encodable result) { final String remoteAddress = channel.remoteAddress().toString(); channel.writeAndFlush(result).addListener(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture future) throws Exception { if (future.isSuccess()) { logger.trace(String.format("Sent result %s to client %s", result, remoteAddress)); } else { logger.error(String.format("Error sending result %s to %s; closing connection", result, remoteAddress), future.cause()); channel.close(); } } }); }
From source file:org.apache.spark.sql.hive.thriftserver.rsc.Rpc.java
License:Apache License
/** * Creates an RPC client for a server running on the given remote host and port. * * @param config RPC configuration data. * @param eloop Event loop for managing the connection. * @param host Host name or IP address to connect to. * @param port Port where server is listening. * @param clientId The client ID that identifies the connection. * @param secret Secret for authenticating the client with the server. * @param dispatcher Dispatcher used to handle RPC calls. * @return A future that can be used to monitor the creation of the RPC object. *///from w ww . j a va2 s . c o m public static Promise<Rpc> createClient(final RSCConf config, final EventLoopGroup eloop, String host, int port, final String clientId, final String secret, final RpcDispatcher dispatcher) throws Exception { int connectTimeoutMs = (int) config.getTimeAsMs(RSCConf.Entry.RPC_CLIENT_CONNECT_TIMEOUT); final ChannelFuture cf = new Bootstrap().group(eloop).handler(new ChannelInboundHandlerAdapter() { }).channel(NioSocketChannel.class).option(ChannelOption.SO_KEEPALIVE, true) .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, connectTimeoutMs).connect(host, port); final Promise<Rpc> promise = eloop.next().newPromise(); final AtomicReference<Rpc> rpc = new AtomicReference<Rpc>(); // Set up a timeout to undo everything. final Runnable timeoutTask = new Runnable() { @Override public void run() { promise.setFailure(new TimeoutException("Timed out waiting for RPC server connection.")); } }; final ScheduledFuture<?> timeoutFuture = eloop.schedule(timeoutTask, config.getTimeAsMs(RSCConf.Entry.RPC_CLIENT_HANDSHAKE_TIMEOUT), TimeUnit.MILLISECONDS); // The channel listener instantiates the Rpc instance when the connection is established, // and initiates the SASL handshake. cf.addListener(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture cf) throws Exception { if (cf.isSuccess()) { SaslClientHandler saslHandler = new SaslClientHandler(config, clientId, promise, timeoutFuture, secret, dispatcher); Rpc rpc = createRpc(config, saslHandler, (SocketChannel) cf.channel(), eloop); saslHandler.rpc = rpc; saslHandler.sendHello(cf.channel()); } else { promise.setFailure(cf.cause()); } } }); // Handle cancellation of the promise. promise.addListener(new GenericFutureListener<Promise<Rpc>>() { @Override public void operationComplete(Promise<Rpc> p) { if (p.isCancelled()) { cf.cancel(true); } } }); return promise; }
From source file:org.apache.spark.sql.hive.thriftserver.rsc.Rpc.java
License:Apache License
/** * Send an RPC call to the remote endpoint and returns a future that can be used to monitor the * operation./*from w w w .j a v a2s . c om*/ * * @param msg RPC call to send. * @param retType Type of expected reply. * @return A future used to monitor the operation. */ public <T> Future<T> call(Object msg, Class<T> retType) { LOG.info("tlitest retType: " + retType); Utils.checkArgument(msg != null); Utils.checkState(channel.isOpen(), "RPC channel is closed."); try { final long id = rpcId.getAndIncrement(); final Promise<T> promise = egroup.next().newPromise(); ChannelFutureListener listener = new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture cf) { if (!cf.isSuccess() && !promise.isDone()) { LOG.warn("Failed to send RPC, closing connection.", cf.cause()); promise.setFailure(cf.cause()); dispatcher.discardRpc(id); close(); } } }; dispatcher.registerRpc(id, promise, msg.getClass().getName()); synchronized (channelLock) { channel.write(new MessageHeader(id, Rpc.MessageType.CALL)).addListener(listener); channel.writeAndFlush(msg).addListener(listener); } return promise; } catch (Exception e) { throw Utils.propagate(e); } }
From source file:org.apache.tajo.pullserver.HttpDataServerHandler.java
License:Apache License
private ChannelFuture sendFile(ChannelHandlerContext ctx, FileChunk file) throws IOException { RandomAccessFile raf;/*from www . ja v a2s .c o m*/ try { raf = new RandomAccessFile(file.getFile(), "r"); } catch (FileNotFoundException fnfe) { return null; } ChannelFuture writeFuture; ChannelFuture lastContentFuture; if (ctx.pipeline().get(SslHandler.class) != null) { // Cannot use zero-copy with HTTPS. lastContentFuture = ctx .write(new HttpChunkedInput(new ChunkedFile(raf, file.startOffset(), file.length(), 8192))); } else { // No encryption - use zero-copy. final FileRegion region = new DefaultFileRegion(raf.getChannel(), file.startOffset(), file.length()); writeFuture = ctx.write(region); lastContentFuture = ctx.write(LastHttpContent.EMPTY_LAST_CONTENT); writeFuture.addListener(new ChannelFutureListener() { public void operationComplete(ChannelFuture future) { if (region.refCnt() > 0) { region.release(); } } }); } return lastContentFuture; }
From source file:org.apache.tinkerpop.gremlin.server.GremlinServer.java
License:Apache License
/** * Start Gremlin Server with {@link Settings} provided to the constructor. *//*from w ww . j a v a 2s.c om*/ public synchronized CompletableFuture<ServerGremlinExecutor<EventLoopGroup>> start() throws Exception { if (serverStarted != null) { // server already started - don't get it rolling again return serverStarted; } serverStarted = new CompletableFuture<>(); final CompletableFuture<ServerGremlinExecutor<EventLoopGroup>> serverReadyFuture = serverStarted; try { final ServerBootstrap b = new ServerBootstrap(); // when high value is reached then the channel becomes non-writable and stays like that until the // low value is so that there is time to recover b.childOption(ChannelOption.WRITE_BUFFER_HIGH_WATER_MARK, settings.writeBufferHighWaterMark); b.childOption(ChannelOption.WRITE_BUFFER_LOW_WATER_MARK, settings.writeBufferLowWaterMark); b.childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT); // fire off any lifecycle scripts that were provided by the user. hooks get initialized during // ServerGremlinExecutor initialization serverGremlinExecutor.getHooks().forEach(hook -> { logger.info("Executing start up {}", LifeCycleHook.class.getSimpleName()); try { hook.onStartUp(new LifeCycleHook.Context(logger)); } catch (UnsupportedOperationException uoe) { // if the user doesn't implement onStartUp the scriptengine will throw // this exception. it can safely be ignored. } }); final Channelizer channelizer = createChannelizer(settings); channelizer.init(serverGremlinExecutor); b.group(bossGroup, workerGroup).childHandler(channelizer); if (isEpollEnabled) { b.channel(EpollServerSocketChannel.class); } else { b.channel(NioServerSocketChannel.class); } // bind to host/port and wait for channel to be ready b.bind(settings.host, settings.port).addListener(new ChannelFutureListener() { @Override public void operationComplete(final ChannelFuture channelFuture) throws Exception { if (channelFuture.isSuccess()) { ch = channelFuture.channel(); logger.info( "Gremlin Server configured with worker thread pool of {}, gremlin pool of {} and boss thread pool of {}.", settings.threadPoolWorker, settings.gremlinPool, settings.threadPoolBoss); logger.info("Channel started at port {}.", settings.port); serverReadyFuture.complete(serverGremlinExecutor); } else { serverReadyFuture.completeExceptionally(new IOException(String.format( "Could not bind to %s and %s - perhaps something else is bound to that address.", settings.host, settings.port))); } } }); } catch (Exception ex) { logger.error("Gremlin Server Error", ex); serverReadyFuture.completeExceptionally(ex); } return serverStarted; }
From source file:org.apache.zookeeper.ClientCnxnSocketNetty.java
License:Apache License
@Override void connect(InetSocketAddress addr) throws IOException { firstConnect = new CountDownLatch(1); Bootstrap bootstrap = new Bootstrap().group(eventLoopGroup).channel(NettyUtils.nioOrEpollSocketChannel()) .option(ChannelOption.SO_LINGER, -1).option(ChannelOption.TCP_NODELAY, true) .handler(new ZKClientPipelineFactory(addr.getHostString(), addr.getPort())); bootstrap = configureBootstrapAllocator(bootstrap); bootstrap.validate();//from w w w. j av a 2 s. com connectLock.lock(); try { connectFuture = bootstrap.connect(addr); connectFuture.addListener(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture channelFuture) throws Exception { // this lock guarantees that channel won't be assigned after cleanup(). connectLock.lock(); try { if (!channelFuture.isSuccess()) { LOG.info("future isn't success, cause:", channelFuture.cause()); return; } else if (connectFuture == null) { LOG.info("connect attempt cancelled"); // If the connect attempt was cancelled but succeeded // anyway, make sure to close the channel, otherwise // we may leak a file descriptor. channelFuture.channel().close(); return; } // setup channel, variables, connection, etc. channel = channelFuture.channel(); disconnected.set(false); initialized = false; lenBuffer.clear(); incomingBuffer = lenBuffer; sendThread.primeConnection(); updateNow(); updateLastSendAndHeard(); if (sendThread.tunnelAuthInProgress()) { waitSasl.drainPermits(); needSasl.set(true); sendPrimePacket(); } else { needSasl.set(false); } LOG.info("channel is connected: {}", channelFuture.channel()); } finally { connectFuture = null; connectLock.unlock(); // need to wake on connect success or failure to avoid // timing out ClientCnxn.SendThread which may be // blocked waiting for first connect in doTransport(). wakeupCnxn(); firstConnect.countDown(); } } }); } finally { connectLock.unlock(); } }
From source file:org.apache.zookeeper.server.NettyServerCnxn.java
License:Apache License
@Override public void close() { closingChannel = true;/*from w w w . jav a 2s . com*/ if (LOG.isDebugEnabled()) { LOG.debug("close called for sessionid:0x{}", Long.toHexString(sessionId)); } setStale(); // ZOOKEEPER-2743: // Always unregister connection upon close to prevent // connection bean leak under certain race conditions. factory.unregisterConnection(this); // if this is not in cnxns then it's already closed if (!factory.cnxns.remove(this)) { if (LOG.isDebugEnabled()) { LOG.debug("cnxns size:{}", factory.cnxns.size()); } return; } if (LOG.isDebugEnabled()) { LOG.debug("close in progress for sessionid:0x{}", Long.toHexString(sessionId)); } factory.removeCnxnFromSessionMap(this); factory.removeCnxnFromIpMap(this, ((InetSocketAddress) channel.remoteAddress()).getAddress()); if (zkServer != null) { zkServer.removeCnxn(this); } if (channel.isOpen()) { // Since we don't check on the futures created by write calls to the channel complete we need to make sure // that all writes have been completed before closing the channel or we risk data loss // See: http://lists.jboss.org/pipermail/netty-users/2009-August/001122.html channel.writeAndFlush(Unpooled.EMPTY_BUFFER).addListener(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture future) { future.channel().close().addListener(f -> releaseQueuedBuffer()); } }); } else { channel.eventLoop().execute(this::releaseQueuedBuffer); } }