List of usage examples for io.netty.channel ChannelFuture cause
Throwable cause();
From source file:org.apache.spark.sql.hive.thriftserver.rsc.Rpc.java
License:Apache License
/** * Creates an RPC client for a server running on the given remote host and port. * * @param config RPC configuration data. * @param eloop Event loop for managing the connection. * @param host Host name or IP address to connect to. * @param port Port where server is listening. * @param clientId The client ID that identifies the connection. * @param secret Secret for authenticating the client with the server. * @param dispatcher Dispatcher used to handle RPC calls. * @return A future that can be used to monitor the creation of the RPC object. *//*from ww w . j a va 2s.c o m*/ public static Promise<Rpc> createClient(final RSCConf config, final EventLoopGroup eloop, String host, int port, final String clientId, final String secret, final RpcDispatcher dispatcher) throws Exception { int connectTimeoutMs = (int) config.getTimeAsMs(RSCConf.Entry.RPC_CLIENT_CONNECT_TIMEOUT); final ChannelFuture cf = new Bootstrap().group(eloop).handler(new ChannelInboundHandlerAdapter() { }).channel(NioSocketChannel.class).option(ChannelOption.SO_KEEPALIVE, true) .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, connectTimeoutMs).connect(host, port); final Promise<Rpc> promise = eloop.next().newPromise(); final AtomicReference<Rpc> rpc = new AtomicReference<Rpc>(); // Set up a timeout to undo everything. final Runnable timeoutTask = new Runnable() { @Override public void run() { promise.setFailure(new TimeoutException("Timed out waiting for RPC server connection.")); } }; final ScheduledFuture<?> timeoutFuture = eloop.schedule(timeoutTask, config.getTimeAsMs(RSCConf.Entry.RPC_CLIENT_HANDSHAKE_TIMEOUT), TimeUnit.MILLISECONDS); // The channel listener instantiates the Rpc instance when the connection is established, // and initiates the SASL handshake. cf.addListener(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture cf) throws Exception { if (cf.isSuccess()) { SaslClientHandler saslHandler = new SaslClientHandler(config, clientId, promise, timeoutFuture, secret, dispatcher); Rpc rpc = createRpc(config, saslHandler, (SocketChannel) cf.channel(), eloop); saslHandler.rpc = rpc; saslHandler.sendHello(cf.channel()); } else { promise.setFailure(cf.cause()); } } }); // Handle cancellation of the promise. promise.addListener(new GenericFutureListener<Promise<Rpc>>() { @Override public void operationComplete(Promise<Rpc> p) { if (p.isCancelled()) { cf.cancel(true); } } }); return promise; }
From source file:org.apache.spark.sql.hive.thriftserver.rsc.Rpc.java
License:Apache License
/** * Send an RPC call to the remote endpoint and returns a future that can be used to monitor the * operation./*from ww w. j av a 2s .c o m*/ * * @param msg RPC call to send. * @param retType Type of expected reply. * @return A future used to monitor the operation. */ public <T> Future<T> call(Object msg, Class<T> retType) { LOG.info("tlitest retType: " + retType); Utils.checkArgument(msg != null); Utils.checkState(channel.isOpen(), "RPC channel is closed."); try { final long id = rpcId.getAndIncrement(); final Promise<T> promise = egroup.next().newPromise(); ChannelFutureListener listener = new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture cf) { if (!cf.isSuccess() && !promise.isDone()) { LOG.warn("Failed to send RPC, closing connection.", cf.cause()); promise.setFailure(cf.cause()); dispatcher.discardRpc(id); close(); } } }; dispatcher.registerRpc(id, promise, msg.getClass().getName()); synchronized (channelLock) { channel.write(new MessageHeader(id, Rpc.MessageType.CALL)).addListener(listener); channel.writeAndFlush(msg).addListener(listener); } return promise; } catch (Exception e) { throw Utils.propagate(e); } }
From source file:org.apache.tajo.worker.Fetcher.java
License:Apache License
public FileChunk get() throws IOException { if (useLocalFile) { LOG.info("Get pseudo fetch from local host"); startTime = System.currentTimeMillis(); finishTime = System.currentTimeMillis(); state = TajoProtos.FetcherState.FETCH_FINISHED; return fileChunk; }/*from ww w .j a va2s . c o m*/ LOG.info("Get real fetch from remote host"); this.startTime = System.currentTimeMillis(); this.state = TajoProtos.FetcherState.FETCH_FETCHING; ChannelFuture future = null; try { future = bootstrap.clone().connect(new InetSocketAddress(host, port)) .addListener(ChannelFutureListener.CLOSE_ON_FAILURE); // Wait until the connection attempt succeeds or fails. Channel channel = future.awaitUninterruptibly().channel(); if (!future.isSuccess()) { state = TajoProtos.FetcherState.FETCH_FAILED; throw new IOException(future.cause()); } String query = uri.getPath() + (uri.getRawQuery() != null ? "?" + uri.getRawQuery() : ""); // Prepare the HTTP request. HttpRequest request = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, query); request.headers().set(HttpHeaders.Names.HOST, host); request.headers().set(HttpHeaders.Names.CONNECTION, HttpHeaders.Values.CLOSE); request.headers().set(HttpHeaders.Names.ACCEPT_ENCODING, HttpHeaders.Values.GZIP); LOG.info("Status: " + getState() + ", URI:" + uri); // Send the HTTP request. ChannelFuture channelFuture = channel.writeAndFlush(request); // Wait for the server to close the connection. channel.closeFuture().awaitUninterruptibly(); channelFuture.addListener(ChannelFutureListener.CLOSE); fileChunk.setLength(fileChunk.getFile().length()); return fileChunk; } finally { if (future != null) { // Close the channel to exit. future.channel().close(); } this.finishTime = System.currentTimeMillis(); LOG.info("Fetcher finished:" + (finishTime - startTime) + " ms, " + getState() + ", URI:" + uri); } }
From source file:org.apache.tajo.worker.LocalFetcher.java
License:Apache License
private List<FileChunk> getChunksForRangeShuffle(final PullServerParams params, final Path queryBaseDir) throws IOException { final List<FileChunk> fileChunks = new ArrayList<>(); if (state == FetcherState.FETCH_INIT) { final ChannelInitializer<Channel> initializer = new HttpClientChannelInitializer(); bootstrap.handler(initializer);/* w w w . j a v a2s . co m*/ } this.state = FetcherState.FETCH_META_FETCHING; ChannelFuture future = null; try { future = bootstrap.clone().connect(new InetSocketAddress(host, port)) .addListener(ChannelFutureListener.FIRE_EXCEPTION_ON_FAILURE); // Wait until the connection attempt succeeds or fails. Channel channel = future.awaitUninterruptibly().channel(); if (!future.isSuccess()) { endFetch(FetcherState.FETCH_FAILED); throw new IOException(future.cause()); } for (URI eachURI : createChunkMetaRequestURIs(host, port, params)) { String query = eachURI.getPath() + (eachURI.getRawQuery() != null ? "?" + eachURI.getRawQuery() : ""); HttpRequest request = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, query); request.headers().set(HttpHeaders.Names.HOST, host); request.headers().set(HttpHeaders.Names.CONNECTION, HttpHeaders.Values.CLOSE); request.headers().set(HttpHeaders.Names.ACCEPT_ENCODING, HttpHeaders.Values.GZIP); if (LOG.isDebugEnabled()) { LOG.debug("Status: " + getState() + ", URI:" + eachURI); } // Send the HTTP request. channel.writeAndFlush(request); } // Wait for the server to close the connection. throw exception if failed channel.closeFuture().syncUninterruptibly(); if (!state.equals(FetcherState.FETCH_META_FINISHED)) { endFetch(FetcherState.FETCH_FAILED); } else { state = FetcherState.FETCH_DATA_FETCHING; fileLen = fileNum = 0; for (FileChunkMeta eachMeta : chunkMetas) { Path outputPath = StorageUtil.concatPath(queryBaseDir, eachMeta.getTaskId(), "output"); if (!localDirAllocator.ifExists(outputPath.toString(), conf)) { LOG.warn("Range shuffle - file not exist. " + outputPath); continue; } Path path = localFileSystem .makeQualified(localDirAllocator.getLocalPathToRead(outputPath.toString(), conf)); File file = new File(URI.create(path.toUri() + "/output")); FileChunk chunk = new FileChunk(file, eachMeta.getStartOffset(), eachMeta.getLength()); chunk.setEbId(tableName); fileChunks.add(chunk); fileLen += chunk.length(); fileNum++; } endFetch(FetcherState.FETCH_DATA_FINISHED); } return fileChunks; } finally { if (future != null && future.channel().isOpen()) { // Close the channel to exit. future.channel().close().awaitUninterruptibly(); } } }
From source file:org.apache.tajo.worker.RemoteFetcher.java
License:Apache License
@Override public List<FileChunk> get() throws IOException { List<FileChunk> fileChunks = new ArrayList<>(); if (state == FetcherState.FETCH_INIT) { ChannelInitializer<Channel> initializer = new HttpClientChannelInitializer(fileChunk.getFile()); bootstrap.handler(initializer);//from w ww .j a va 2 s . c om } this.startTime = System.currentTimeMillis(); this.state = FetcherState.FETCH_DATA_FETCHING; ChannelFuture future = null; try { future = bootstrap.clone().connect(new InetSocketAddress(host, port)) .addListener(ChannelFutureListener.FIRE_EXCEPTION_ON_FAILURE); // Wait until the connection attempt succeeds or fails. Channel channel = future.awaitUninterruptibly().channel(); if (!future.isSuccess()) { state = TajoProtos.FetcherState.FETCH_FAILED; throw new IOException(future.cause()); } String query = uri.getPath() + (uri.getRawQuery() != null ? "?" + uri.getRawQuery() : ""); // Prepare the HTTP request. HttpRequest request = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, query); request.headers().set(HttpHeaders.Names.HOST, host); request.headers().set(HttpHeaders.Names.CONNECTION, HttpHeaders.Values.CLOSE); request.headers().set(HttpHeaders.Names.ACCEPT_ENCODING, HttpHeaders.Values.GZIP); if (LOG.isDebugEnabled()) { LOG.debug("Status: " + getState() + ", URI:" + uri); } // Send the HTTP request. channel.writeAndFlush(request); // Wait for the server to close the connection. throw exception if failed channel.closeFuture().syncUninterruptibly(); fileChunk.setLength(fileChunk.getFile().length()); long start = 0; for (Long eachChunkLength : chunkLengths) { if (eachChunkLength == 0) continue; FileChunk chunk = new FileChunk(fileChunk.getFile(), start, eachChunkLength); chunk.setEbId(fileChunk.getEbId()); chunk.setFromRemote(true); fileChunks.add(chunk); start += eachChunkLength; } return fileChunks; } finally { if (future != null && future.channel().isOpen()) { // Close the channel to exit. future.channel().close().awaitUninterruptibly(); } this.finishTime = System.currentTimeMillis(); long elapsedMills = finishTime - startTime; String transferSpeed; if (elapsedMills > 1000) { long bytePerSec = (fileChunk.length() * 1000) / elapsedMills; transferSpeed = FileUtils.byteCountToDisplaySize(bytePerSec); } else { transferSpeed = FileUtils.byteCountToDisplaySize(Math.max(fileChunk.length(), 0)); } LOG.info(String.format("Fetcher :%d ms elapsed. %s/sec, len:%d, state:%s, URL:%s", elapsedMills, transferSpeed, fileChunk.length(), getState(), uri)); } }
From source file:org.apache.zookeeper.ClientCnxnSocketNetty.java
License:Apache License
@Override void connect(InetSocketAddress addr) throws IOException { firstConnect = new CountDownLatch(1); Bootstrap bootstrap = new Bootstrap().group(eventLoopGroup).channel(NettyUtils.nioOrEpollSocketChannel()) .option(ChannelOption.SO_LINGER, -1).option(ChannelOption.TCP_NODELAY, true) .handler(new ZKClientPipelineFactory(addr.getHostString(), addr.getPort())); bootstrap = configureBootstrapAllocator(bootstrap); bootstrap.validate();/*from w ww . j ava2s. com*/ connectLock.lock(); try { connectFuture = bootstrap.connect(addr); connectFuture.addListener(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture channelFuture) throws Exception { // this lock guarantees that channel won't be assigned after cleanup(). connectLock.lock(); try { if (!channelFuture.isSuccess()) { LOG.info("future isn't success, cause:", channelFuture.cause()); return; } else if (connectFuture == null) { LOG.info("connect attempt cancelled"); // If the connect attempt was cancelled but succeeded // anyway, make sure to close the channel, otherwise // we may leak a file descriptor. channelFuture.channel().close(); return; } // setup channel, variables, connection, etc. channel = channelFuture.channel(); disconnected.set(false); initialized = false; lenBuffer.clear(); incomingBuffer = lenBuffer; sendThread.primeConnection(); updateNow(); updateLastSendAndHeard(); if (sendThread.tunnelAuthInProgress()) { waitSasl.drainPermits(); needSasl.set(true); sendPrimePacket(); } else { needSasl.set(false); } LOG.info("channel is connected: {}", channelFuture.channel()); } finally { connectFuture = null; connectLock.unlock(); // need to wake on connect success or failure to avoid // timing out ClientCnxn.SendThread which may be // blocked waiting for first connect in doTransport(). wakeupCnxn(); firstConnect.countDown(); } } }); } finally { connectLock.unlock(); } }
From source file:org.asynchttpclient.netty.request.WriteCompleteListener.java
License:Open Source License
@Override public void operationComplete(ChannelFuture future) throws Exception { operationComplete(future.channel(), future.cause()); }
From source file:org.asynchttpclient.netty.SimpleChannelFutureListener.java
License:Open Source License
@Override public final void operationComplete(ChannelFuture future) throws Exception { Channel channel = future.channel(); if (future.isSuccess()) { onSuccess(channel);// w w w.j a va 2 s .c o m } else { onFailure(channel, future.cause()); } }
From source file:org.asynchttpclient.providers.netty.request.NettyConnectListener.java
License:Apache License
public final void operationComplete(ChannelFuture f) throws Exception { if (f.isSuccess()) { onFutureSuccess(f.channel());// ww w. j av a2s.c o m } else { onFutureFailure(f.channel(), f.cause()); } }
From source file:org.asynchttpclient.providers.netty4.request.NettyConnectListener.java
License:Open Source License
public final void operationComplete(ChannelFuture f) throws Exception { if (f.isSuccess()) onFutureSuccess(f.channel());/* ww w . ja v a 2 s . c om*/ else onFutureFailure(f.channel(), f.cause()); }