Example usage for java.util Map merge

List of usage examples for java.util Map merge

Introduction

In this page you can find the example usage for java.util Map merge.

Prototype

default V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) 

Source Link

Document

If the specified key is not already associated with a value or is associated with null, associates it with the given non-null value.

Usage

From source file:org.deeplearning4j.examples.tictactoe.TicTacToeData.java

/**
 * Function to calculate Temporal Difference. Refer ReadMe.txt for detailed explanation.
 *///from  ww  w  . j a v a2 s.c  om
private void calculateReward(List<INDArray> arrayList, Map<INDArray, Double> valueMap) {

    double probabilityValue = 0;
    for (int p = (arrayList.size() - 1); p >= 0; p--) {
        if (p == (arrayList.size() - 1)) {
            probabilityValue = 1.0;
        } else {
            probabilityValue = 0.5 + 0.1 * (probabilityValue - 0.5);
        }
        INDArray stateAsINDArray = arrayList.get(p);
        valueMap.merge(stateAsINDArray, probabilityValue,
                (oldValue, newValue) -> oldValue > newValue ? oldValue : newValue);
    }
}

From source file:ddf.catalog.metacard.validation.MetacardValidityMarkerPlugin.java

private void getValidationProblems(String validatorName, ValidationException e, Set<Serializable> errors,
        Set<Serializable> warnings, Set<Serializable> errorValidators, Set<Serializable> warningValidators,
        Map<String, Integer> counter) {
    boolean validationErrorsExist = CollectionUtils.isNotEmpty(e.getErrors());
    boolean validationWarningsExist = CollectionUtils.isNotEmpty(e.getWarnings());
    if (validationErrorsExist || validationWarningsExist) {
        if (validationErrorsExist) {
            errors.addAll(e.getErrors());
            errorValidators.add(validatorName);
            counter.merge(Validation.VALIDATION_ERRORS, 1, Integer::sum);
        }// w  w w .  j a  v a  2  s.  co m
        if (validationWarningsExist) {
            warnings.addAll(e.getWarnings());
            warningValidators.add(validatorName);
            counter.merge(Validation.VALIDATION_WARNINGS, 1, Integer::sum);
        }
    } else {
        LOGGER.debug(
                "Metacard validator {} did not have any warnings or errors but it threw a validation exception."
                        + " There is likely something wrong with your implementation. This will result in the metacard not"
                        + " being properly marked as invalid.",
                validatorName);
    }
}

From source file:org.apache.storm.kafka.spout.KafkaSpoutRetryExponentialBackoff.java

@Override
public Map<TopicPartition, Long> earliestRetriableOffsets() {
    final Map<TopicPartition, Long> tpToEarliestRetriableOffset = new HashMap<>();
    final long currentTimeNanos = Time.nanoTime();
    for (RetrySchedule retrySchedule : retrySchedules) {
        if (retrySchedule.retry(currentTimeNanos)) {
            final KafkaSpoutMessageId msgId = retrySchedule.msgId;
            final TopicPartition tpForMessage = new TopicPartition(msgId.topic(), msgId.partition());
            tpToEarliestRetriableOffset.merge(tpForMessage, msgId.offset(), Math::min);
        } else {//from  w w w  .  j a v a  2s.co m
            break; // Stop searching as soon as passed current time
        }
    }
    LOG.debug("Topic partitions with entries ready to be retried [{}] ", tpToEarliestRetriableOffset);
    return tpToEarliestRetriableOffset;
}

From source file:com.homeadvisor.kafdrop.service.CuratorKafkaMonitor.java

private Map<Integer, Long> getConsumerOffsets(String groupId, TopicVO topic) {
    try {//from  w w  w  . ja  v  a  2s  .com
        // Kafka doesn't really give us an indication of whether a consumer is
        // using Kafka or Zookeeper based offset tracking. So look up the offsets
        // for both and assume that the largest offset is the correct one.

        ForkJoinTask<Map<Integer, Long>> kafkaTask = threadPool
                .submit(() -> getConsumerOffsets(groupId, topic, false));

        ForkJoinTask<Map<Integer, Long>> zookeeperTask = threadPool
                .submit(() -> getConsumerOffsets(groupId, topic, true));

        Map<Integer, Long> zookeeperOffsets = zookeeperTask.get();
        Map<Integer, Long> kafkaOffsets = kafkaTask.get();
        zookeeperOffsets.entrySet()
                .forEach(entry -> kafkaOffsets.merge(entry.getKey(), entry.getValue(), Math::max));
        return kafkaOffsets;
    } catch (InterruptedException ex) {
        Thread.currentThread().interrupt();
        throw Throwables.propagate(ex);
    } catch (ExecutionException ex) {
        throw Throwables.propagate(ex.getCause());
    }
}

From source file:org.apache.fluo.recipes.core.map.it.CollisionFreeMapIT.java

private Map<String, Long> computeWordCounts(FluoClient fc) {
    Map<String, Long> counts = new HashMap<>();

    try (Snapshot snap = fc.newSnapshot()) {

        CellScanner scanner = snap.scanner().over(Span.prefix("d:")).fetch(new Column("content", "current"))
                .build();//from   w  ww .  j  a  v a  2  s .c  o m

        for (RowColumnValue rcv : scanner) {
            String[] words = rcv.getsValue().split("\\s+");
            for (String word : words) {
                if (word.isEmpty()) {
                    continue;
                }

                counts.merge(word, 1L, Long::sum);
            }
        }
    }

    return counts;
}

From source file:it.polimi.diceH2020.plugin.control.FileManager.java

private static void setMapJobProfile(InstanceDataMultiProvider data, Configuration conf) {
    // Set MapJobProfile
    JobProfilesMap classdesc = JobProfilesMapGenerator.build();

    Map<String, Map<String, Map<String, JobProfile>>> classMap = new HashMap<>();

    for (ClassDesc c : conf.getClasses()) {
        Map<String, Map<String, JobProfile>> alternative = new HashMap<>();

        for (String alt : c.getAltDtsm().keySet()) {
            TreeMap<String, Double> profile = new TreeMap<>();
            String split[] = alt.split("-");

            JobProfile jp;//from w w w  .  ja v  a 2 s . co m
            if (conf.getTechnology().equals("Hadoop Map-reduce") || conf.getTechnology().equals("Spark")) {
                jp = JobProfileGenerator.build(c.getAltDtsmHadoop().get(alt).keySet().size() - 1);

                for (String par : c.getAltDtsmHadoop().get(alt).keySet()) {
                    if (!par.equals("file")) {
                        profile.put(par, Double.parseDouble(c.getAltDtsmHadoop().get(alt).get(par)));
                    }
                }
            } else {
                jp = JobProfileGenerator.build(3); // TODO: how many
                // parameters do we
                // need?
                profile.put("datasize", 66.6);
                profile.put("mavg", 666.6);
                profile.put("mmax", 666.6);
            }

            jp.setProfileMap(profile);

            final String provider = Configuration.getCurrent().getIsPrivate() ? "inHouse" : split[0];
            final String vmType = Configuration.getCurrent().getIsPrivate() ? split[0] : split[1];

            Map<String, JobProfile> profilemap = new HashMap<>();
            profilemap.put(vmType, jp);

            alternative.merge(provider, profilemap, (oldValue, newValue) -> {
                oldValue.putAll(newValue);
                return oldValue;
            });
        }

        classMap.put(String.valueOf(c.getId()), alternative);
    }

    classdesc.setMapJobProfile(classMap);
    data.setMapJobProfiles(classdesc);
}

From source file:org.apache.fluo.recipes.map.CollisionFreeMapIT.java

private Map<String, Long> computeWordCounts(FluoClient fc) {
    Map<String, Long> counts = new HashMap<>();

    try (Snapshot snap = fc.newSnapshot()) {
        RowIterator scanner = snap.get(new ScannerConfiguration().setSpan(Span.prefix("d:"))
                .fetchColumn(Bytes.of("content"), Bytes.of("current")));
        while (scanner.hasNext()) {
            Entry<Bytes, ColumnIterator> row = scanner.next();

            ColumnIterator colIter = row.getValue();

            while (colIter.hasNext()) {
                Entry<Column, Bytes> entry = colIter.next();

                String[] words = entry.getValue().toString().split("\\s+");
                for (String word : words) {
                    if (word.isEmpty()) {
                        continue;
                    }/* w  w w.  ja va 2  s  .c  o m*/

                    counts.merge(word, 1L, Long::sum);
                }
            }
        }
    }

    return counts;
}

From source file:com.github.totyumengr.minicubes.cluster.TimeSeriesMiniCubeManagerHzImpl.java

@Override
public Map<Integer, Long> count(String indName, String groupByDimName, Map<String, List<Integer>> filterDims) {

    try {// w  ww . j a  v a  2s . c o m
        Set<String> cubeIds = cubeIds();

        // Do execute
        List<Map<Integer, Long>> results = execute(new Count2(indName, groupByDimName, filterDims), cubeIds,
                hzExecutorTimeout);
        LOGGER.debug("Group counting {} on {} with filter {} results is {}", indName, cubeIds, filterDims,
                results);

        Map<Integer, Long> result = new HashMap<Integer, Long>();
        results.stream().forEach(new Consumer<Map<Integer, Long>>() {

            @Override
            public void accept(Map<Integer, Long> t) {
                t.forEach((k, v) -> result.merge(k, v, Long::sum));
            }
        });
        LOGGER.debug("Count {} on {} with filter {} results is {}", indName, cubeIds, filterDims, result);

        return result;
    } finally {
        AGG_CONTEXT.remove();
    }
}

From source file:com.github.totyumengr.minicubes.cluster.TimeSeriesMiniCubeManagerHzImpl.java

@Override
public Map<Integer, BigDecimal> sum(String indName, String groupByDimName,
        Map<String, List<Integer>> filterDims) {

    try {//from w ww.  jav a  2s . c o m
        Set<String> cubeIds = cubeIds();

        // Do execute
        List<Map<Integer, BigDecimal>> results = execute(new Sum2(indName, groupByDimName, filterDims), cubeIds,
                hzExecutorTimeout);
        LOGGER.debug("Group {} on {} with filter {} results is {}", indName, cubeIds, filterDims, results);

        Map<Integer, BigDecimal> result = new HashMap<Integer, BigDecimal>();
        results.stream().forEach(new Consumer<Map<Integer, BigDecimal>>() {

            @Override
            public void accept(Map<Integer, BigDecimal> t) {
                t.forEach((k, v) -> result.merge(k, v, BigDecimal::add));
            }
        });
        LOGGER.debug("Sum {} on {} with filter {} results is {}", indName, cubeIds, filterDims, result);

        return result;
    } finally {
        AGG_CONTEXT.remove();
    }
}

From source file:com.github.totyumengr.minicubes.cluster.TimeSeriesMiniCubeManagerHzImpl.java

@Override
public Map<Integer, RoaringBitmap> distinct(String distinctName, boolean isDim, String groupByDimName,
        Map<String, List<Integer>> filterDims) {

    try {/*  w  w w  . jav  a 2s . c  o m*/
        Set<String> cubeIds = cubeIds();

        // Do execute
        List<Map<Integer, RoaringBitmap>> results = execute(
                new Distinct(distinctName, isDim, groupByDimName, filterDims), cubeIds, hzExecutorTimeout);
        LOGGER.debug("Distinct {} on {} with filter {} results is {}", distinctName, cubeIds, filterDims,
                results);

        Map<Integer, RoaringBitmap> result = new HashMap<Integer, RoaringBitmap>(results.size());
        results.stream().forEach(new Consumer<Map<Integer, RoaringBitmap>>() {

            @Override
            public void accept(Map<Integer, RoaringBitmap> t) {
                t.forEach((k, v) -> result.merge(k, v,
                        new BiFunction<RoaringBitmap, RoaringBitmap, RoaringBitmap>() {

                            @Override
                            public RoaringBitmap apply(RoaringBitmap t, RoaringBitmap u) {
                                return RoaringBitmap.or(t, u);
                            }
                        }));
            }
        });
        LOGGER.debug("Distinct {} on {} with filter {} results is {}", distinctName, cubeIds, filterDims,
                result);
        return result;
    } finally {
        AGG_CONTEXT.remove();
    }
}