Example usage for io.netty.util.concurrent Future cause

List of usage examples for io.netty.util.concurrent Future cause

Introduction

In this page you can find the example usage for io.netty.util.concurrent Future cause.

Prototype

Throwable cause();

Source Link

Document

Returns the cause of the failed I/O operation if the I/O operation has failed.

Usage

From source file:org.ow2.petals.bc.gateway.commons.handlers.AuthenticatorSSLHandler.java

License:Open Source License

private void setUpSslHandlers(final ChannelHandlerContext ctx, final AbstractDomain domain,
        final @Nullable String certificate, final @Nullable String key, final @Nullable String passphrase,
        final @Nullable String remoteCertificate) throws SSLException {

    // TODO could we use certificate only for auth and not encryption?
    // TODO support openssl
    final SslHandler sslHandler;
    if (pdOrAuth.isB() && certificate != null && key != null) {
        // server side ssl, do not forget startTls so that our accept can be sent after the handler is added

        final ServiceUnitDataHandler handler = domain.getSUHandler();

        final SslContextBuilder builder = SslContextBuilder
                .forServer(ServiceUnitUtil.getFile(handler.getInstallRoot(), certificate),
                        ServiceUnitUtil.getFile(handler.getInstallRoot(), key), passphrase)
                .sslProvider(SslProvider.JDK).ciphers(null, IdentityCipherSuiteFilter.INSTANCE)
                .sessionCacheSize(0).sessionTimeout(0);

        if (remoteCertificate != null) {
            builder.trustManager(ServiceUnitUtil.getFile(handler.getInstallRoot(), remoteCertificate))
                    .clientAuth(ClientAuth.REQUIRE);
        }/*from  ww  w .jav a 2s  . c  om*/

        // until https://github.com/netty/netty/issues/5170 is accepted
        // we need to create the handler by hand
        sslHandler = new SslHandler(builder.build().newEngine(ctx.alloc()), true);
    } else if (pdOrAuth.isA() && remoteCertificate != null) {
        // client side

        final String installRoot = domain.getSUHandler().getInstallRoot();
        final SslContextBuilder builder = SslContextBuilder.forClient().sslProvider(SslProvider.JDK)
                .trustManager(ServiceUnitUtil.getFile(installRoot, remoteCertificate))
                .ciphers(null, IdentityCipherSuiteFilter.INSTANCE).sessionCacheSize(0).sessionTimeout(0);

        if (certificate != null && key != null) {
            builder.keyManager(ServiceUnitUtil.getFile(installRoot, certificate),
                    ServiceUnitUtil.getFile(installRoot, key), passphrase);
        }

        sslHandler = builder.build().newHandler(ctx.alloc());
    } else {
        sslHandler = null;
    }

    // For a server, it contains the transporter name and the consumer domain name (it was updated in channelRead0)
    // For a client, it contains the provider domain name (it was set by the component)
    final String logName = logger.getName();

    // let's replace the debug logger with something specific to this consumer
    ctx.pipeline().replace(HandlerConstants.LOG_DEBUG_HANDLER, HandlerConstants.LOG_DEBUG_HANDLER,
            new LoggingHandler(logName, LogLevel.TRACE));

    ctx.pipeline().replace(HandlerConstants.LOG_ERRORS_HANDLER, HandlerConstants.LOG_ERRORS_HANDLER,
            new LastLoggingHandler(logName + ".errors"));

    if (sslHandler != null) {
        // if there is a sslHandler, then we can only add the domain handler after the handshake is finished
        // if not we risk sending things too early in it

        sslHandler.handshakeFuture().addListener(new FutureListener<Channel>() {
            @Override
            public void operationComplete(final @Nullable Future<Channel> future) throws Exception {
                assert future != null;
                if (!future.isSuccess()) {
                    authenticationFuture.setFailure(future.cause());
                } else {
                    // I must keep the handler here until now in case there is an exception so that I can log it
                    ctx.pipeline().replace(HandlerConstants.DOMAIN_HANDLER, HandlerConstants.DOMAIN_HANDLER,
                            dhb.build(domain));
                    authenticationFuture.setSuccess(ctx.channel());
                }
            }
        });

        ctx.pipeline().addAfter(HandlerConstants.LOG_DEBUG_HANDLER, HandlerConstants.SSL_HANDLER, sslHandler);
    }

    if (pdOrAuth.isB()) {
        if (logger.isLoggable(Level.FINE)) {
            logger.fine("Sending an Accept (" + ctx.channel().remoteAddress() + ")");
        }

        // this must be sent after the ssh handler is replaced (when using ssl) so that we are ready to receive ssl data right away
        // but this must be sent before the domain handler is replaced (when not using ssl), because it will send
        // data and it must arrive AFTER our Accept
        ctx.writeAndFlush(new AuthAccept());
    }

    // else it is done in the FutureListener
    if (sslHandler == null) {
        ctx.pipeline().replace(HandlerConstants.DOMAIN_HANDLER, HandlerConstants.DOMAIN_HANDLER,
                dhb.build(domain));
        authenticationFuture.setSuccess(ctx.channel());
    }
}

From source file:org.ow2.petals.bc.gateway.inbound.ConsumerDomain.java

License:Open Source License

private void scheduleNextPolling(final long currentDelay, final double accel, final long maxDelay) {
    final Runnable command = new Runnable() {
        @Override/*from   w  ww .  ja  va 2 s  . co m*/
        public void run() {
            final long nextDelay;
            if (accel > 1) {
                nextDelay = Math.min((long) (currentDelay * accel), maxDelay);
            } else {
                nextDelay = maxDelay;
            }

            try {
                if (logger.isLoggable(Level.FINE)) {
                    logger.fine("Propagation refresh polling (next in " + nextDelay + "ms)");
                }

                // TODO catch exceptions?!
                if (sendPropagations(false)) {
                    logger.info("Changes in propagations detected: refreshed!");
                }

            } finally {
                try {
                    // in case it was interrupted during the propagation sending
                    // this will also reset the interrupted flag
                    pollingLock.lockInterruptibly();
                    try {
                        // polling corresponds to the current task
                        // if it's null, it was cancelled (thus the test for
                        // isCancelled is not really needed but well...)
                        if (polling != null && !polling.isCancelled()) {
                            scheduleNextPolling(nextDelay, accel, maxDelay);
                        }
                    } finally {
                        pollingLock.unlock();
                    }
                } catch (final InterruptedException e) {
                    // we were interrupted, it's ok, we stop there
                }
            }
        }
    };

    final long delay;
    if (accel > 1) {
        delay = Math.min(currentDelay, maxDelay);
    } else {
        delay = maxDelay;
    }

    polling = (ScheduledFuture<?>) GlobalEventExecutor.INSTANCE.schedule(command, delay, TimeUnit.MILLISECONDS)
            .addListener(new FutureListener<Object>() {
                @Override
                public void operationComplete(final @Nullable Future<Object> future) throws Exception {
                    assert future != null;
                    if (!future.isSuccess() && !future.isCancelled()) {
                        logger.log(Level.WARNING, "Error during propagation refresh polling", future.cause());
                    }
                }
            });

}

From source file:org.redisson.async.OperationListener.java

License:Apache License

protected boolean isBreak(RedisAsyncConnection<Object, V> async, Promise<P> promise, Future<F> future) {
    if (!future.isSuccess()) {
        if (future.cause() instanceof RedisTimeoutException) {
            timeoutCallback.execute(promise, async);
            return false;
        } else {//from  w ww .j  a  v a 2s.co m
            promise.setFailure(future.cause());
            return true;
        }
    }

    if (promise.isCancelled()) {
        if (async.isMultiMode()) {
            async.discard();
        }
        return true;
    }

    return false;
}

From source file:org.redisson.BaseRemoteService.java

License:Apache License

private void awaitResultAsync(final RemoteInvocationOptions optionsCopy, final RemotePromise<Object> result,
        final RemoteServiceRequest request, final String responseName, final String ackName) {
    RFuture<Boolean> deleteFuture = redisson.getBucket(ackName).deleteAsync();
    deleteFuture.addListener(new FutureListener<Boolean>() {
        @Override/*from  ww w  .  j  a v a2s  .  c  om*/
        public void operationComplete(Future<Boolean> future) throws Exception {
            if (!future.isSuccess()) {
                result.tryFailure(future.cause());
                return;
            }

            awaitResultAsync(optionsCopy, result, request, responseName);
        }
    });
}

From source file:org.redisson.BaseRemoteService.java

License:Apache License

protected void awaitResultAsync(final RemoteInvocationOptions optionsCopy, final RemotePromise<Object> result,
        final RemoteServiceRequest request, final String responseName) {
    // poll for the response only if expected
    if (!optionsCopy.isResultExpected()) {
        return;//from   ww  w  . j ava2  s .com
    }

    RBlockingQueue<RRemoteServiceResponse> responseQueue = redisson.getBlockingQueue(responseName, getCodec());
    RFuture<RRemoteServiceResponse> responseFuture = responseQueue
            .pollAsync(optionsCopy.getExecutionTimeoutInMillis(), TimeUnit.MILLISECONDS);
    responseFuture.addListener(new FutureListener<RRemoteServiceResponse>() {

        @Override
        public void operationComplete(Future<RRemoteServiceResponse> future) throws Exception {
            if (!future.isSuccess()) {
                result.tryFailure(future.cause());
                return;
            }

            if (future.getNow() == null) {
                RemoteServiceTimeoutException e = new RemoteServiceTimeoutException("No response after "
                        + optionsCopy.getExecutionTimeoutInMillis() + "ms for request: " + request);
                result.tryFailure(e);
                return;
            }

            if (future.getNow() instanceof RemoteServiceCancelResponse) {
                result.doCancel();
                return;
            }

            RemoteServiceResponse response = (RemoteServiceResponse) future.getNow();
            if (response.getError() != null) {
                result.tryFailure(response.getError());
                return;
            }

            result.trySuccess(response.getResult());
        }
    });
}

From source file:org.redisson.BaseRemoteService.java

License:Apache License

private RFuture<RemoteServiceAck> tryPollAckAgainAsync(RemoteInvocationOptions optionsCopy,
        final RBlockingQueue<RemoteServiceAck> responseQueue, String ackName) throws InterruptedException {
    final RPromise<RemoteServiceAck> promise = commandExecutor.getConnectionManager().newPromise();
    RFuture<Boolean> ackClientsFuture = commandExecutor.evalWriteAsync(ackName, LongCodec.INSTANCE,
            RedisCommands.EVAL_BOOLEAN,/*from w w w  .  j  ava 2  s .  co  m*/
            "if redis.call('setnx', KEYS[1], 1) == 1 then " + "redis.call('pexpire', KEYS[1], ARGV[1]);"
                    + "return 0;" + "end;" + "redis.call('del', KEYS[1]);" + "return 1;",
            Arrays.<Object>asList(ackName), optionsCopy.getAckTimeoutInMillis());
    ackClientsFuture.addListener(new FutureListener<Boolean>() {
        @Override
        public void operationComplete(Future<Boolean> future) throws Exception {
            if (!future.isSuccess()) {
                promise.tryFailure(future.cause());
                return;
            }

            if (future.getNow()) {
                RFuture<RemoteServiceAck> pollFuture = responseQueue.pollAsync();
                pollFuture.addListener(new FutureListener<RemoteServiceAck>() {
                    @Override
                    public void operationComplete(Future<RemoteServiceAck> future) throws Exception {
                        if (!future.isSuccess()) {
                            promise.tryFailure(future.cause());
                            return;
                        }

                        promise.trySuccess(future.getNow());
                    }
                });
            } else {
                promise.trySuccess(null);
            }
        }
    });
    return promise;
}

From source file:org.redisson.cluster.ClusterConnectionManager.java

License:Apache License

private RFuture<Collection<RFuture<Void>>> addMasterEntry(final ClusterPartition partition,
        final ClusterServersConfig cfg) {
    if (partition.isMasterFail()) {
        RedisException e = new RedisException("Failed to add master: " + partition.getMasterAddress()
                + " for slot ranges: " + partition.getSlotRanges() + ". Reason - server has FAIL flag");

        if (partition.getSlotRanges().isEmpty()) {
            e = new RedisException("Failed to add master: " + partition.getMasterAddress()
                    + ". Reason - server has FAIL flag");
        }/*from  w ww  .ja v a  2  s. c o  m*/
        return newFailedFuture(e);
    }

    final RPromise<Collection<RFuture<Void>>> result = newPromise();
    RFuture<RedisConnection> connectionFuture = connect(cfg, partition.getMasterAddress());
    connectionFuture.addListener(new FutureListener<RedisConnection>() {
        @Override
        public void operationComplete(Future<RedisConnection> future) throws Exception {
            if (!future.isSuccess()) {
                log.error("Can't connect to master: {} with slot ranges: {}", partition.getMasterAddress(),
                        partition.getSlotRanges());
                result.tryFailure(future.cause());
                return;
            }

            final RedisConnection connection = future.getNow();
            RFuture<Map<String, String>> clusterFuture = connection.async(RedisCommands.CLUSTER_INFO);
            clusterFuture.addListener(new FutureListener<Map<String, String>>() {

                @Override
                public void operationComplete(Future<Map<String, String>> future) throws Exception {
                    if (!future.isSuccess()) {
                        log.error("Can't execute CLUSTER_INFO for " + connection.getRedisClient().getAddr(),
                                future.cause());
                        result.tryFailure(future.cause());
                        return;
                    }

                    Map<String, String> params = future.getNow();
                    if ("fail".equals(params.get("cluster_state"))) {
                        RedisException e = new RedisException(
                                "Failed to add master: " + partition.getMasterAddress() + " for slot ranges: "
                                        + partition.getSlotRanges() + ". Reason - cluster_state:fail");
                        log.error("cluster_state:fail for " + connection.getRedisClient().getAddr());
                        result.tryFailure(e);
                        return;
                    }

                    MasterSlaveServersConfig config = create(cfg);
                    config.setMasterAddress(partition.getMasterAddress());

                    final MasterSlaveEntry e;
                    List<RFuture<Void>> futures = new ArrayList<RFuture<Void>>();
                    if (config.getReadMode() == ReadMode.MASTER) {
                        e = new SingleEntry(partition.getSlotRanges(), ClusterConnectionManager.this, config);
                    } else {
                        config.setSlaveAddresses(partition.getSlaveAddresses());

                        e = new MasterSlaveEntry(partition.getSlotRanges(), ClusterConnectionManager.this,
                                config);

                        List<RFuture<Void>> fs = e.initSlaveBalancer(partition.getFailedSlaveAddresses());
                        futures.addAll(fs);
                        if (!partition.getSlaveAddresses().isEmpty()) {
                            log.info("slaves: {} added for slot ranges: {}", partition.getSlaveAddresses(),
                                    partition.getSlotRanges());
                            if (!partition.getFailedSlaveAddresses().isEmpty()) {
                                log.warn("slaves: {} is down for slot ranges: {}",
                                        partition.getFailedSlaveAddresses(), partition.getSlotRanges());
                            }
                        }
                    }

                    RFuture<Void> f = e.setupMasterEntry(config.getMasterAddress().getHost(),
                            config.getMasterAddress().getPort());
                    final RPromise<Void> initFuture = newPromise();
                    futures.add(initFuture);
                    f.addListener(new FutureListener<Void>() {
                        @Override
                        public void operationComplete(Future<Void> future) throws Exception {
                            if (!future.isSuccess()) {
                                log.error("Can't add master: {} for slot ranges: {}",
                                        partition.getMasterAddress(), partition.getSlotRanges());
                                initFuture.tryFailure(future.cause());
                                return;
                            }
                            for (Integer slot : partition.getSlots()) {
                                addEntry(slot, e);
                                lastPartitions.put(slot, partition);
                            }

                            log.info("master: {} added for slot ranges: {}", partition.getMasterAddress(),
                                    partition.getSlotRanges());
                            if (!initFuture.trySuccess(null)) {
                                throw new IllegalStateException();
                            }
                        }
                    });
                    if (!result.trySuccess(futures)) {
                        throw new IllegalStateException();
                    }
                }
            });

        }
    });

    return result;
}

From source file:org.redisson.cluster.ClusterConnectionManager.java

License:Apache License

private void checkClusterState(final ClusterServersConfig cfg, final Iterator<URL> iterator,
        final AtomicReference<Throwable> lastException) {
    if (!iterator.hasNext()) {
        log.error("Can't update cluster state", lastException.get());
        scheduleClusterChangeCheck(cfg, null);
        return;/*from w ww  .j a v  a 2  s  . c o  m*/
    }
    if (!getShutdownLatch().acquire()) {
        return;
    }
    final URL uri = iterator.next();
    RFuture<RedisConnection> connectionFuture = connect(cfg, uri);
    connectionFuture.addListener(new FutureListener<RedisConnection>() {
        @Override
        public void operationComplete(Future<RedisConnection> future) throws Exception {
            if (!future.isSuccess()) {
                lastException.set(future.cause());
                getShutdownLatch().release();
                checkClusterState(cfg, iterator, lastException);
                return;
            }

            RedisConnection connection = future.getNow();
            updateClusterState(cfg, connection, iterator, uri);
        }
    });
}

From source file:org.redisson.command.CommandAsyncService.java

License:Apache License

@Override
public <T, R> RFuture<Collection<R>> readAllAsync(RedisCommand<T> command, Object... params) {
    final RPromise<Collection<R>> mainPromise = connectionManager.newPromise();
    final Set<MasterSlaveEntry> nodes = connectionManager.getEntrySet();
    final List<R> results = new ArrayList<R>();
    final AtomicInteger counter = new AtomicInteger(nodes.size());
    FutureListener<R> listener = new FutureListener<R>() {
        @Override//from   w  ww  .j a v  a 2s  . com
        public void operationComplete(Future<R> future) throws Exception {
            if (!future.isSuccess()) {
                mainPromise.tryFailure(future.cause());
                return;
            }

            R result = future.getNow();
            if (result instanceof Collection) {
                synchronized (results) {
                    results.addAll((Collection) result);
                }
            } else {
                synchronized (results) {
                    results.add(result);
                }
            }

            if (counter.decrementAndGet() == 0 && !mainPromise.isDone()) {
                mainPromise.trySuccess(results);
            }
        }
    };

    for (MasterSlaveEntry entry : nodes) {
        RPromise<R> promise = connectionManager.newPromise();
        promise.addListener(listener);
        async(true, new NodeSource(entry), connectionManager.getCodec(), command, params, promise, 0);
    }
    return mainPromise;
}

From source file:org.redisson.command.CommandAsyncService.java

License:Apache License

private <R, T> void retryReadRandomAsync(final RedisCommand<T> command, final RPromise<R> mainPromise,
        final List<MasterSlaveEntry> nodes, final Object... params) {
    final RPromise<R> attemptPromise = connectionManager.newPromise();
    attemptPromise.addListener(new FutureListener<R>() {
        @Override/*from  ww w  .  j av  a2s .co m*/
        public void operationComplete(Future<R> future) throws Exception {
            if (future.isSuccess()) {
                if (future.getNow() == null) {
                    if (nodes.isEmpty()) {
                        mainPromise.trySuccess(null);
                    } else {
                        retryReadRandomAsync(command, mainPromise, nodes, params);
                    }
                } else {
                    mainPromise.trySuccess(future.getNow());
                }
            } else {
                mainPromise.tryFailure(future.cause());
            }
        }
    });

    MasterSlaveEntry entry = nodes.remove(0);
    async(true, new NodeSource(entry), connectionManager.getCodec(), command, params, attemptPromise, 0);
}