Example usage for java.util Map computeIfAbsent

List of usage examples for java.util Map computeIfAbsent

Introduction

In this page you can find the example usage for java.util Map computeIfAbsent.

Prototype

default V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) 

Source Link

Document

If the specified key is not already associated with a value (or is mapped to null ), attempts to compute its value using the given mapping function and enters it into this map unless null .

Usage

From source file:at.gridtec.lambda4j.operator.binary.IntBinaryOperator2.java

/**
 * Returns a memoized (caching) version of this {@link IntBinaryOperator2}. Whenever it is called, the mapping
 * between the input parameters and the return value is preserved in a cache, making subsequent calls returning the
 * memoized value instead of computing the return value again.
 * <p>/*  w w w .  j a v a 2  s  .  co m*/
 * Unless the operator and therefore the used cache will be garbage-collected, it will keep all memoized values
 * forever.
 *
 * @return A memoized (caching) version of this {@code IntBinaryOperator2}.
 * @implSpec This implementation does not allow the input parameters or return value to be {@code null} for the
 * resulting memoized operator, as the cache used internally does not permit {@code null} keys or values.
 * @implNote The returned memoized operator can be safely used concurrently from multiple threads which makes it
 * thread-safe.
 */
@Nonnull
default IntBinaryOperator2 memoized() {
    if (isMemoized()) {
        return this;
    } else {
        final Map<Pair<Integer, Integer>, Integer> cache = new ConcurrentHashMap<>();
        final Object lock = new Object();
        return (IntBinaryOperator2 & Memoized) (value1, value2) -> {
            final int returnValue;
            synchronized (lock) {
                returnValue = cache.computeIfAbsent(Pair.of(value1, value2),
                        key -> applyAsInt(key.getLeft(), key.getRight()));
            }
            return returnValue;
        };
    }
}

From source file:at.gridtec.lambda4j.function.bi.to.ThrowableToDoubleBiFunction.java

/**
 * Returns a memoized (caching) version of this {@link ThrowableToDoubleBiFunction}. Whenever it is called, the
 * mapping between the input parameters and the return value is preserved in a cache, making subsequent calls
 * returning the memoized value instead of computing the return value again.
 * <p>//  ww  w  . j a  va  2s .c o  m
 * Unless the function and therefore the used cache will be garbage-collected, it will keep all memoized values
 * forever.
 *
 * @return A memoized (caching) version of this {@code ThrowableToDoubleBiFunction}.
 * @implSpec This implementation does not allow the input parameters or return value to be {@code null} for the
 * resulting memoized function, as the cache used internally does not permit {@code null} keys or values.
 * @implNote The returned memoized function can be safely used concurrently from multiple threads which makes it
 * thread-safe.
 */
@Nonnull
default ThrowableToDoubleBiFunction<T, U, X> memoized() {
    if (isMemoized()) {
        return this;
    } else {
        final Map<Pair<T, U>, Double> cache = new ConcurrentHashMap<>();
        final Object lock = new Object();
        return (ThrowableToDoubleBiFunction<T, U, X> & Memoized) (t, u) -> {
            final double returnValue;
            synchronized (lock) {
                returnValue = cache.computeIfAbsent(Pair.of(t, u),
                        ThrowableFunction.of(key -> applyAsDoubleThrows(key.getLeft(), key.getRight())));
            }
            return returnValue;
        };
    }
}

From source file:com.thinkbiganalytics.metadata.jobrepo.nifi.provenance.NifiStatsJmsReceiver.java

/**
 * Save the running totals for the feed//from  w  ww. j  av a  2  s. com
 */
private Map<String, JpaNifiFeedStats> saveFeedStats(AggregatedFeedProcessorStatisticsHolderV2 holder,
        List<NifiFeedProcessorStats> summaryStats) {
    Map<String, JpaNifiFeedStats> feedStatsMap = new HashMap<>();

    if (summaryStats != null) {
        Map<String, Long> feedLatestTimestamp = summaryStats.stream().collect(Collectors.toMap(
                NifiFeedProcessorStats::getFeedName, stats -> stats.getMinEventTime().getMillis(), Long::max));
        feedLatestTimestamp.entrySet().stream().forEach(e -> {
            String feedName = e.getKey();
            Long timestamp = e.getValue();
            JpaNifiFeedStats stats = feedStatsMap.computeIfAbsent(feedName,
                    name -> new JpaNifiFeedStats(feedName));
            stats.setLastActivityTimestamp(timestamp);
        });
    }
    if (holder.getProcessorIdRunningFlows() != null) {
        holder.getProcessorIdRunningFlows().entrySet().stream().forEach(e -> {
            String feedProcessorId = e.getKey();
            Long runningCount = e.getValue();
            String feedName = provenanceEventFeedUtil.getFeedName(feedProcessorId); //ensure not null
            if (StringUtils.isNotBlank(feedName)) {
                JpaNifiFeedStats stats = feedStatsMap.computeIfAbsent(feedName,
                        name -> new JpaNifiFeedStats(feedName));
                OpsManagerFeed opsManagerFeed = provenanceEventFeedUtil.getFeed(feedName);
                if (opsManagerFeed != null) {
                    stats.setFeedId(new JpaNifiFeedStats.OpsManagerFeedId(opsManagerFeed.getId().toString()));
                }
                stats.addRunningFeedFlows(runningCount);
                stats.setTime(DateTime.now().getMillis());
            }
        });
    }
    //group stats to save together by feed name
    if (!feedStatsMap.isEmpty()) {
        nifiFeedStatisticsProvider.saveLatestFeedStats(new ArrayList<>(feedStatsMap.values()));
    }
    return feedStatsMap;
}

From source file:at.gridtec.lambda4j.function.bi.ThrowableBiDoubleFunction.java

/**
 * Returns a memoized (caching) version of this {@link ThrowableBiDoubleFunction}. Whenever it is called, the
 * mapping between the input parameters and the return value is preserved in a cache, making subsequent calls
 * returning the memoized value instead of computing the return value again.
 * <p>//from   w ww  .  j  ava2 s . co m
 * Unless the function and therefore the used cache will be garbage-collected, it will keep all memoized values
 * forever.
 *
 * @return A memoized (caching) version of this {@code ThrowableBiDoubleFunction}.
 * @implSpec This implementation does not allow the input parameters or return value to be {@code null} for the
 * resulting memoized function, as the cache used internally does not permit {@code null} keys or values.
 * @implNote The returned memoized function can be safely used concurrently from multiple threads which makes it
 * thread-safe.
 */
@Nonnull
default ThrowableBiDoubleFunction<R, X> memoized() {
    if (isMemoized()) {
        return this;
    } else {
        final Map<Pair<Double, Double>, R> cache = new ConcurrentHashMap<>();
        final Object lock = new Object();
        return (ThrowableBiDoubleFunction<R, X> & Memoized) (value1, value2) -> {
            final R returnValue;
            synchronized (lock) {
                returnValue = cache.computeIfAbsent(Pair.of(value1, value2),
                        ThrowableFunction.of(key -> applyThrows(key.getLeft(), key.getRight())));
            }
            return returnValue;
        };
    }
}

From source file:at.gridtec.lambda4j.operator.binary.CharBinaryOperator.java

/**
 * Returns a memoized (caching) version of this {@link CharBinaryOperator}. Whenever it is called, the mapping
 * between the input parameters and the return value is preserved in a cache, making subsequent calls returning the
 * memoized value instead of computing the return value again.
 * <p>//from  w  w w. j  av  a 2 s.co  m
 * Unless the operator and therefore the used cache will be garbage-collected, it will keep all memoized values
 * forever.
 *
 * @return A memoized (caching) version of this {@code CharBinaryOperator}.
 * @implSpec This implementation does not allow the input parameters or return value to be {@code null} for the
 * resulting memoized operator, as the cache used internally does not permit {@code null} keys or values.
 * @implNote The returned memoized operator can be safely used concurrently from multiple threads which makes it
 * thread-safe.
 */
@Nonnull
default CharBinaryOperator memoized() {
    if (isMemoized()) {
        return this;
    } else {
        final Map<Pair<Character, Character>, Character> cache = new ConcurrentHashMap<>();
        final Object lock = new Object();
        return (CharBinaryOperator & Memoized) (value1, value2) -> {
            final char returnValue;
            synchronized (lock) {
                returnValue = cache.computeIfAbsent(Pair.of(value1, value2),
                        key -> applyAsChar(key.getLeft(), key.getRight()));
            }
            return returnValue;
        };
    }
}

From source file:at.gridtec.lambda4j.operator.binary.DoubleBinaryOperator2.java

/**
 * Returns a memoized (caching) version of this {@link DoubleBinaryOperator2}. Whenever it is called, the mapping
 * between the input parameters and the return value is preserved in a cache, making subsequent calls returning the
 * memoized value instead of computing the return value again.
 * <p>//from  www.  java2 s.co m
 * Unless the operator and therefore the used cache will be garbage-collected, it will keep all memoized values
 * forever.
 *
 * @return A memoized (caching) version of this {@code DoubleBinaryOperator2}.
 * @implSpec This implementation does not allow the input parameters or return value to be {@code null} for the
 * resulting memoized operator, as the cache used internally does not permit {@code null} keys or values.
 * @implNote The returned memoized operator can be safely used concurrently from multiple threads which makes it
 * thread-safe.
 */
@Nonnull
default DoubleBinaryOperator2 memoized() {
    if (isMemoized()) {
        return this;
    } else {
        final Map<Pair<Double, Double>, Double> cache = new ConcurrentHashMap<>();
        final Object lock = new Object();
        return (DoubleBinaryOperator2 & Memoized) (value1, value2) -> {
            final double returnValue;
            synchronized (lock) {
                returnValue = cache.computeIfAbsent(Pair.of(value1, value2),
                        key -> applyAsDouble(key.getLeft(), key.getRight()));
            }
            return returnValue;
        };
    }
}

From source file:at.gridtec.lambda4j.operator.binary.BooleanBinaryOperator.java

/**
 * Returns a memoized (caching) version of this {@link BooleanBinaryOperator}. Whenever it is called, the mapping
 * between the input parameters and the return value is preserved in a cache, making subsequent calls returning the
 * memoized value instead of computing the return value again.
 * <p>//www.jav a2 s.co m
 * Unless the operator and therefore the used cache will be garbage-collected, it will keep all memoized values
 * forever.
 *
 * @return A memoized (caching) version of this {@code BooleanBinaryOperator}.
 * @implSpec This implementation does not allow the input parameters or return value to be {@code null} for the
 * resulting memoized operator, as the cache used internally does not permit {@code null} keys or values.
 * @implNote The returned memoized operator can be safely used concurrently from multiple threads which makes it
 * thread-safe.
 */
@Nonnull
default BooleanBinaryOperator memoized() {
    if (isMemoized()) {
        return this;
    } else {
        final Map<Pair<Boolean, Boolean>, Boolean> cache = new ConcurrentHashMap<>();
        final Object lock = new Object();
        return (BooleanBinaryOperator & Memoized) (value1, value2) -> {
            final boolean returnValue;
            synchronized (lock) {
                returnValue = cache.computeIfAbsent(Pair.of(value1, value2),
                        key -> applyAsBoolean(key.getLeft(), key.getRight()));
            }
            return returnValue;
        };
    }
}

From source file:at.gridtec.lambda4j.function.bi.ThrowableBiCharFunction.java

/**
 * Returns a memoized (caching) version of this {@link ThrowableBiCharFunction}. Whenever it is called, the mapping
 * between the input parameters and the return value is preserved in a cache, making subsequent calls returning the
 * memoized value instead of computing the return value again.
 * <p>/*  w w  w . ja va2  s. c  o  m*/
 * Unless the function and therefore the used cache will be garbage-collected, it will keep all memoized values
 * forever.
 *
 * @return A memoized (caching) version of this {@code ThrowableBiCharFunction}.
 * @implSpec This implementation does not allow the input parameters or return value to be {@code null} for the
 * resulting memoized function, as the cache used internally does not permit {@code null} keys or values.
 * @implNote The returned memoized function can be safely used concurrently from multiple threads which makes it
 * thread-safe.
 */
@Nonnull
default ThrowableBiCharFunction<R, X> memoized() {
    if (isMemoized()) {
        return this;
    } else {
        final Map<Pair<Character, Character>, R> cache = new ConcurrentHashMap<>();
        final Object lock = new Object();
        return (ThrowableBiCharFunction<R, X> & Memoized) (value1, value2) -> {
            final R returnValue;
            synchronized (lock) {
                returnValue = cache.computeIfAbsent(Pair.of(value1, value2),
                        ThrowableFunction.of(key -> applyThrows(key.getLeft(), key.getRight())));
            }
            return returnValue;
        };
    }
}

From source file:at.gridtec.lambda4j.function.bi.obj.ThrowableObjByteFunction.java

/**
 * Returns a memoized (caching) version of this {@link ThrowableObjByteFunction}. Whenever it is called, the mapping
 * between the input parameters and the return value is preserved in a cache, making subsequent calls returning the
 * memoized value instead of computing the return value again.
 * <p>//from   ww  w  . j av a 2 s  .c  o m
 * Unless the function and therefore the used cache will be garbage-collected, it will keep all memoized values
 * forever.
 *
 * @return A memoized (caching) version of this {@code ThrowableObjByteFunction}.
 * @implSpec This implementation does not allow the input parameters or return value to be {@code null} for the
 * resulting memoized function, as the cache used internally does not permit {@code null} keys or values.
 * @implNote The returned memoized function can be safely used concurrently from multiple threads which makes it
 * thread-safe.
 */
@Nonnull
default ThrowableObjByteFunction<T, R, X> memoized() {
    if (isMemoized()) {
        return this;
    } else {
        final Map<Pair<T, Byte>, R> cache = new ConcurrentHashMap<>();
        final Object lock = new Object();
        return (ThrowableObjByteFunction<T, R, X> & Memoized) (t, value) -> {
            final R returnValue;
            synchronized (lock) {
                returnValue = cache.computeIfAbsent(Pair.of(t, value),
                        ThrowableFunction.of(key -> applyThrows(key.getLeft(), key.getRight())));
            }
            return returnValue;
        };
    }
}

From source file:at.gridtec.lambda4j.function.bi.obj.ThrowableObjLongFunction.java

/**
 * Returns a memoized (caching) version of this {@link ThrowableObjLongFunction}. Whenever it is called, the mapping
 * between the input parameters and the return value is preserved in a cache, making subsequent calls returning the
 * memoized value instead of computing the return value again.
 * <p>/*from w w  w .  jav a  2s. c  o m*/
 * Unless the function and therefore the used cache will be garbage-collected, it will keep all memoized values
 * forever.
 *
 * @return A memoized (caching) version of this {@code ThrowableObjLongFunction}.
 * @implSpec This implementation does not allow the input parameters or return value to be {@code null} for the
 * resulting memoized function, as the cache used internally does not permit {@code null} keys or values.
 * @implNote The returned memoized function can be safely used concurrently from multiple threads which makes it
 * thread-safe.
 */
@Nonnull
default ThrowableObjLongFunction<T, R, X> memoized() {
    if (isMemoized()) {
        return this;
    } else {
        final Map<Pair<T, Long>, R> cache = new ConcurrentHashMap<>();
        final Object lock = new Object();
        return (ThrowableObjLongFunction<T, R, X> & Memoized) (t, value) -> {
            final R returnValue;
            synchronized (lock) {
                returnValue = cache.computeIfAbsent(Pair.of(t, value),
                        ThrowableFunction.of(key -> applyThrows(key.getLeft(), key.getRight())));
            }
            return returnValue;
        };
    }
}