Example usage for java.util Map merge

List of usage examples for java.util Map merge

Introduction

In this page you can find the example usage for java.util Map merge.

Prototype

default V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) 

Source Link

Document

If the specified key is not already associated with a value or is associated with null, associates it with the given non-null value.

Usage

From source file:Main.java

public static void main(String[] args) {
    Map<Integer, String> map = new HashMap<>();

    for (int i = 0; i < 10; i++) {
        map.putIfAbsent(i, "val" + i);
    }//from   w w w.j a va 2 s .  c  om

    map.forEach((id, val) -> System.out.println(val));

    map.merge(9, "val9", (value, newValue) -> value.concat(newValue));
    System.out.println(map.get(9)); // val9

    map.merge(9, "concat", (value, newValue) -> value.concat(newValue));
    System.out.println(map.get(9)); // val9concat
}

From source file:com.querydsl.webhooks.GithubReviewWindow.java

private static void replaceCompletionTask(Map<String, ScheduledFuture<?>> tasks,
        ScheduledFuture<?> completionTask, Ref head) {

    boolean interrupt = false;
    tasks.merge(head.getSha(), completionTask, (oldTask, newTask) -> {
        oldTask.cancel(interrupt);/*  w  w  w.  j av  a 2s  .com*/
        return newTask;
    });
}

From source file:org.apache.fluo.recipes.core.combine.it.CombineQueueTreeIT.java

private static Map<String, Long> merge(Map<String, Long> m1, Map<String, Long> m2) {
    Map<String, Long> ret = new HashMap<>(m1);
    m2.forEach((k, v) -> ret.merge(k, v, Long::sum));
    return ret;/*  w  w  w  . ja  v  a2  s  .  c om*/
}

From source file:org.apache.fluo.recipes.core.combine.it.CombineQueueTreeIT.java

private static Map<String, Long> rollup(Map<String, Long> m, String rollupFields) {
    boolean useX = rollupFields.contains("x");
    boolean useY = rollupFields.contains("y");
    boolean useTime = rollupFields.contains("t");

    Map<String, Long> ret = new HashMap<>();
    m.forEach((k, v) -> {/*from w  w w .j a  v  a 2  s . c o m*/
        String[] fields = k.split(":");
        String nk = (useX ? fields[0] : "") + (useY ? ((useX ? ":" : "") + fields[1]) : "")
                + (useTime ? ((useX || useY ? ":" : "") + fields[2]) : "");

        ret.merge(nk, v, Long::sum);
    });
    return ret;
}

From source file:com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCacheDataConverter.java

public static CacheData mergeCacheData(CacheData current, CacheData added) {
    String id = current.getId();//  www.jav a 2s . co  m
    Map<String, Object> attributes = new HashMap<>();
    attributes.putAll(added.getAttributes());
    attributes.putAll(current.getAttributes());
    // Behavior is: if no ttl is set on either, the merged key won't expire
    int ttl = Math.min(current.getTtlSeconds(), added.getTtlSeconds());

    Map<String, Collection<String>> relationships = new HashMap<>();
    relationships.putAll(current.getRelationships());
    added.getRelationships().entrySet()
            .forEach(entry -> relationships.merge(entry.getKey(), entry.getValue(), (a, b) -> {
                Set<String> result = new HashSet<>();
                result.addAll(a);
                result.addAll(b);
                return result;
            }));

    return new DefaultCacheData(id, ttl, attributes, relationships);
}

From source file:com.netflix.spinnaker.halyard.core.problem.v1.ProblemSet.java

public Map<String, List<Problem>> groupByLocation() {
    Map<String, List<Problem>> result = new HashMap<>();
    for (Problem problem : problems) {
        result.merge(problem.getLocation(), new ArrayList<Problem>() {
            {/* ww w . j ava 2 s  .  com*/
                add(problem);
            }
        }, (List a, List b) -> {
            a.addAll(b);
            return a;
        });
    }

    return result;
}

From source file:com.ejisto.util.collector.MockedFieldCollector.java

@Override
public BinaryOperator<Map<String, List<MockedField>>> combiner() {
    return (m1, m2) -> {
        Map<String, List<MockedField>> result = new TreeMap<>();
        result.putAll(m1);//from   www .j a va  2 s . com
        m2.entrySet().stream().forEach(e -> result.merge(e.getKey(), e.getValue(), ListUtils::union));
        return result;
    };
}

From source file:io.github.alechenninger.monarch.Main.java

private Map<String, Map<String, Object>> readDataForHierarchy(Path dataDir, Hierarchy hierarchy) {
    Map<String, Map<String, Object>> data = new HashMap<>();
    Map<String, List<String>> sourcesByExtension = new HashMap<>();

    for (String source : hierarchy.descendants()) {
        sourcesByExtension.merge(MonarchParsers.getExtensionForFileName(source), asGrowableList(source),
                (l1, l2) -> {//ww w. ja v  a2  s . co  m
                    l1.addAll(l2);
                    return l1;
                });
    }

    for (Map.Entry<String, List<String>> extensionSources : sourcesByExtension.entrySet()) {
        String extension = extensionSources.getKey();
        List<String> sources = extensionSources.getValue();
        Map<String, Map<String, Object>> dataForExtension = parsers.forExtension(extension).readData(sources,
                dataDir);
        data.putAll(dataForExtension);
    }

    return data;
}

From source file:com.coveo.spillway.storage.AsyncLimitUsageStorage.java

public void sendAndCacheRequests(Collection<AddAndGetRequest> requests) {
    try {//from  w  w w.j  av a2s  . co  m
        Map<LimitKey, Integer> responses = wrappedLimitUsageStorage.addAndGet(requests);

        // Flatten all requests into a single list of overrides.
        Map<Pair<LimitKey, Instant>, Integer> rawOverrides = new HashMap<>();
        for (AddAndGetRequest request : requests) {
            LimitKey limitEntry = LimitKey.fromRequest(request);
            Instant expirationDate = request.getBucket().plus(request.getExpiration());

            rawOverrides.merge(Pair.of(limitEntry, expirationDate), responses.get(limitEntry), Integer::sum);
        }
        List<OverrideKeyRequest> overrides = rawOverrides.entrySet().stream().map(
                kvp -> new OverrideKeyRequest(kvp.getKey().getLeft(), kvp.getKey().getRight(), kvp.getValue()))
                .collect(Collectors.toList());
        cache.overrideKeys(overrides);
    } catch (RuntimeException ex) {
        logger.warn("Failed to send and cache requests.", ex);
    }
}

From source file:org.sleuthkit.autopsy.timeline.db.EventDB.java

/**
 * merge the events in the given list if they are within the same period
 * General algorithm is as follows:/*from ww w  .j  a va2 s. c  o  m*/
 *
 * 1) sort them into a map from (type, description)-> List<aggevent>
 * 2) for each key in map, merge the events and accumulate them in a list to
 * return
 *
 * @param timeUnitLength
 * @param preMergedEvents
 *
 * @return
 */
static private List<EventStripe> mergeClustersToStripes(Period timeUnitLength,
        List<EventCluster> preMergedEvents) {

    //effectively map from type to (map from description to events)
    Map<EventType, SetMultimap<String, EventCluster>> typeMap = new HashMap<>();

    for (EventCluster aggregateEvent : preMergedEvents) {
        typeMap.computeIfAbsent(aggregateEvent.getEventType(), eventType -> HashMultimap.create())
                .put(aggregateEvent.getDescription(), aggregateEvent);
    }
    //result list to return
    ArrayList<EventCluster> aggEvents = new ArrayList<>();

    //For each (type, description) key, merge agg events
    for (SetMultimap<String, EventCluster> descrMap : typeMap.values()) {
        //for each description ...
        for (String descr : descrMap.keySet()) {
            //run through the sorted events, merging together adjacent events
            Iterator<EventCluster> iterator = descrMap.get(descr).stream()
                    .sorted(Comparator.comparing(event -> event.getSpan().getStartMillis())).iterator();
            EventCluster current = iterator.next();
            while (iterator.hasNext()) {
                EventCluster next = iterator.next();
                Interval gap = current.getSpan().gap(next.getSpan());

                //if they overlap or gap is less one quarter timeUnitLength
                //TODO: 1/4 factor is arbitrary. review! -jm
                if (gap == null || gap.toDuration()
                        .getMillis() <= timeUnitLength.toDurationFrom(gap.getStart()).getMillis() / 4) {
                    //merge them
                    current = EventCluster.merge(current, next);
                } else {
                    //done merging into current, set next as new current
                    aggEvents.add(current);
                    current = next;
                }
            }
            aggEvents.add(current);
        }
    }

    //merge clusters to stripes
    Map<ImmutablePair<EventType, String>, EventStripe> stripeDescMap = new HashMap<>();

    for (EventCluster eventCluster : aggEvents) {
        stripeDescMap.merge(ImmutablePair.of(eventCluster.getEventType(), eventCluster.getDescription()),
                new EventStripe(eventCluster), EventStripe::merge);
    }

    return stripeDescMap.values().stream().sorted(Comparator.comparing(EventStripe::getStartMillis))
            .collect(Collectors.toList());
}