Example usage for java.util Optional ifPresent

List of usage examples for java.util Optional ifPresent

Introduction

In this page you can find the example usage for java.util Optional ifPresent.

Prototype

public void ifPresent(Consumer<? super T> action) 

Source Link

Document

If a value is present, performs the given action with the value, otherwise does nothing.

Usage

From source file:org.apache.hadoop.hbase.util.compaction.MajorCompactor.java

private void addNewRegions() {
    try {/*www  . j  ava2 s  . c om*/
        List<HRegionLocation> locations = connection.getRegionLocator(tableName).getAllRegionLocations();
        for (HRegionLocation location : locations) {
            if (location.getRegion().getRegionId() > timestamp) {
                Optional<MajorCompactionRequest> compactionRequest = MajorCompactionRequest.newRequest(
                        connection.getConfiguration(), location.getRegion(), storesToCompact, timestamp);
                compactionRequest.ifPresent(request -> clusterCompactionQueues
                        .addToCompactionQueue(location.getServerName(), request));
            }
        }
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
}

From source file:org.apache.james.backends.cassandra.init.configuration.ClusterConfiguration.java

private static Optional<PoolingOptions> readPoolingOptions(Configuration configuration) {
    Optional<Integer> maxConnections = Optional
            .ofNullable(configuration.getInteger("cassandra.pooling.local.max.connections", null));
    Optional<Integer> maxRequests = Optional
            .ofNullable(configuration.getInteger("cassandra.pooling.local.max.requests", null));
    Optional<Integer> poolingTimeout = Optional
            .ofNullable(configuration.getInteger("cassandra.pooling.timeout", null));
    Optional<Integer> heartbeatTimeout = Optional
            .ofNullable(configuration.getInteger("cassandra.pooling.heartbeat.timeout", null));
    Optional<Integer> maxQueueSize = Optional
            .ofNullable(configuration.getInteger("cassandra.pooling.max.queue.size", null));

    if (!maxConnections.isPresent() && !maxRequests.isPresent() && !poolingTimeout.isPresent()
            && !heartbeatTimeout.isPresent() && !maxQueueSize.isPresent()) {
        return Optional.empty();
    }//from ww  w . j  a v a  2s.  c  om
    PoolingOptions result = new PoolingOptions();

    maxConnections.ifPresent(value -> {
        result.setMaxConnectionsPerHost(HostDistance.LOCAL, value);
        result.setMaxConnectionsPerHost(HostDistance.REMOTE, value);
    });
    maxRequests.ifPresent(value -> {
        result.setMaxRequestsPerConnection(HostDistance.LOCAL, value);
        result.setMaxRequestsPerConnection(HostDistance.REMOTE, value);
    });
    poolingTimeout.ifPresent(result::setPoolTimeoutMillis);
    heartbeatTimeout.ifPresent(result::setHeartbeatIntervalSeconds);
    maxQueueSize.ifPresent(result::setMaxQueueSize);

    return Optional.of(result);
}

From source file:org.apache.james.backends.cassandra.init.configuration.QueryLoggerConfiguration.java

public static QueryLoggerConfiguration from(Configuration configuration) {
    QueryLoggerConfiguration.Builder builder = QueryLoggerConfiguration.builder();

    Optional<Long> constantThreshold = getOptionalIntegerFromConf(configuration,
            "cassandra.query.logger.constant.threshold").map(Long::valueOf);

    constantThreshold.ifPresent(builder::withConstantThreshold);

    getOptionalIntegerFromConf(configuration, "cassandra.query.logger.max.logged.parameters")
            .ifPresent(builder::withMaxLoggedParameters);

    getOptionalIntegerFromConf(configuration, "cassandra.query.logger.max.query.string.length")
            .ifPresent(builder::withMaxQueryStringLength);

    getOptionalIntegerFromConf(configuration, "cassandra.query.logger.max.parameter.value.length")
            .ifPresent(builder::withMaxParameterValueLength);

    Optional<Double> percentileLatencyConf = getOptionalDoubleFromConf(configuration,
            "cassandra.query.slow.query.latency.threshold.percentile");

    if (!percentileLatencyConf.isPresent() && !constantThreshold.isPresent()) {
        percentileLatencyConf = Optional.of(QueryLogger.DEFAULT_SLOW_QUERY_THRESHOLD_PERCENTILE);
    }/*from  ww  w. jav a  2s .  co  m*/

    percentileLatencyConf.ifPresent(slowQueryLatencyThresholdPercentile -> {
        PerHostPercentileTracker tracker = PerHostPercentileTracker
                .builder(CASSANDRA_HIGHEST_TRACKABLE_LATENCY_MILLIS).build();

        builder.withDynamicThreshold(tracker, slowQueryLatencyThresholdPercentile);
    });

    return builder.build();
}

From source file:org.apache.james.jmap.DownloadServlet.java

private void addContentDispositionHeader(Optional<String> optionalName, HttpServletResponse resp) {
    optionalName.ifPresent(name -> addContentDispositionHeaderRegardingEncoding(name, resp));
}

From source file:org.apache.james.server.core.MailImpl.java

public MailImpl(String name, Optional<MailAddress> sender, Collection<MailAddress> recipients) {
    this();//from   ww w  .  j  a  v  a  2 s . com
    setName(name);
    sender.ifPresent(this::setSender);

    // Copy the recipient list
    if (recipients != null) {
        setRecipients(recipients);
    }
}

From source file:org.apache.james.sieve.jpa.JPASieveRepository.java

private void switchOffActiveScript(User user, EntityManager entityManager) throws StorageException {
    Optional<JPASieveScript> activeSieveScript = findActiveSieveScript(user, entityManager);
    activeSieveScript.ifPresent(JPASieveScript::deactivate);
}

From source file:org.apache.james.sieve.jpa.JPASieveRepository.java

private void removeQuotaForUser(String username) throws StorageException {
    transactionRunner.runAndHandleException(Throwing.consumer(entityManager -> {
        Optional<JPASieveQuota> quotaForUser = findQuotaForUser(username, entityManager);
        quotaForUser.ifPresent(entityManager::remove);
    }), throwStorageException("Unable to remove quota for user " + username));
}

From source file:org.apache.metron.profiler.DefaultMessageDistributor.java

/**
 * Flush all of the profiles maintained in a cache.
 *
 * @param cache The cache to flush.//from  www  .j av a  2  s  .c o m
 * @return The measurements captured when flushing the profiles.
 */
private List<ProfileMeasurement> flushCache(Cache<Integer, ProfileBuilder> cache) {

    List<ProfileMeasurement> measurements = new ArrayList<>();
    for (ProfileBuilder profileBuilder : cache.asMap().values()) {

        // only need to flush, if the profile has been initialized
        if (profileBuilder.isInitialized()) {

            // flush the profiler and save the measurement, if one exists
            Optional<ProfileMeasurement> measurement = profileBuilder.flush();
            measurement.ifPresent(m -> measurements.add(m));
        }
    }

    return measurements;
}

From source file:org.apache.pulsar.compaction.TwoPhaseCompactor.java

private void phaseTwoLoop(RawReader reader, MessageId to, Map<String, MessageId> latestForKey, LedgerHandle lh,
        Semaphore outstanding, CompletableFuture<Void> promise) {
    reader.readNextAsync().whenCompleteAsync((m, exception) -> {
        if (exception != null) {
            promise.completeExceptionally(exception);
            return;
        } else if (promise.isDone()) {
            return;
        }/*from   ww w  .j  a va2  s  .c o  m*/
        MessageId id = m.getMessageId();
        Optional<RawMessage> messageToAdd = Optional.empty();
        if (RawBatchConverter.isReadableBatch(m)) {
            try {
                messageToAdd = RawBatchConverter.rebatchMessage(m,
                        (key, subid) -> latestForKey.get(key).equals(subid));
            } catch (IOException ioe) {
                log.info("Error decoding batch for message {}. Whole batch will be included in output", id,
                        ioe);
                messageToAdd = Optional.of(m);
            }
        } else {
            Pair<String, Integer> keyAndSize = extractKeyAndSize(m);
            MessageId msg;
            if (keyAndSize == null) { // pass through messages without a key
                messageToAdd = Optional.of(m);
            } else if ((msg = latestForKey.get(keyAndSize.getLeft())) != null && msg.equals(id)) { // consider message only if present into latestForKey map
                if (keyAndSize.getRight() <= 0) {
                    promise.completeExceptionally(new IllegalArgumentException(
                            "Compaction phase found empty record from sorted key-map"));
                }
                messageToAdd = Optional.of(m);
            } else {
                m.close();
                // Reached to last-id and phase-one found it deleted-message while iterating on ledger so, not
                // present under latestForKey. Complete the compaction.
                if (to.equals(id)) {
                    promise.complete(null);
                }
            }
        }

        messageToAdd.ifPresent((toAdd) -> {
            try {
                outstanding.acquire();
                CompletableFuture<Void> addFuture = addToCompactedLedger(lh, toAdd)
                        .whenComplete((res, exception2) -> {
                            outstanding.release();
                            if (exception2 != null) {
                                promise.completeExceptionally(exception2);
                            }
                        });
                if (to.equals(id)) {
                    addFuture.whenComplete((res, exception2) -> {
                        if (exception2 == null) {
                            promise.complete(null);
                        }
                    });
                }
            } catch (InterruptedException ie) {
                Thread.currentThread().interrupt();
                promise.completeExceptionally(ie);
            }
        });
        phaseTwoLoop(reader, to, latestForKey, lh, outstanding, promise);
    }, scheduler);
}

From source file:org.apache.tinkerpop.gremlin.structure.io.gryo.GryoPool.java

/**
 * Create a pool of a readers, writers or both of the specified size with an optional {@link IoRegistry} object
 * which would allow custom serializers to be registered to the pool.
 *
 * @param poolSize initial size of the pool.
 * @param type the type of pool.//from w w  w  .j a v  a2s  .  c o  m
 * @param ioRegistry the registry to assign to each {@link GryoReader} and {@link GryoWriter} instances.
 */
public GryoPool(final int poolSize, final Type type, final Optional<IoRegistry> ioRegistry) {
    final GryoMapper.Builder mapperBuilder = GryoMapper.build();
    ioRegistry.ifPresent(mapperBuilder::addRegistry);

    // should be able to re-use the GryoMapper - it creates fresh kryo instances from its createMapper method
    mapper = mapperBuilder.create();
    if (type.equals(Type.READER) || type.equals(Type.READER_WRITER)) {
        this.gryoReaders = new LinkedBlockingQueue<>(poolSize);
        for (int i = 0; i < poolSize; i++) {
            this.gryoReaders.add(GryoReader.build().mapper(mapper).create());
        }
    }
    if (type.equals(Type.WRITER) || type.equals(Type.READER_WRITER)) {
        this.gryoWriters = new LinkedBlockingQueue<>(poolSize);
        for (int i = 0; i < poolSize; i++) {
            this.gryoWriters.add(GryoWriter.build().mapper(mapper).create());
        }
    }
}