List of usage examples for java.util Map forEach
default void forEach(BiConsumer<? super K, ? super V> action)
From source file:Main.java
public static void main(String[] args) throws Exception { List<Person> persons = Arrays.asList(new Person("Max", 18), new Person("Peter", 23), new Person("Pamela", 23), new Person("David", 12)); Map<Integer, List<Person>> personsByAge = persons.stream().collect(Collectors.groupingBy(p -> p.age)); personsByAge.forEach((age, p) -> System.out.format("age %s: %s\n", age, p)); }
From source file:Main.java
public static void main(String[] args) { Map<Integer, String> map = new HashMap<>(); for (int i = 0; i < 10; i++) { map.putIfAbsent(i, "val" + i); }// w ww . ja v a 2 s . c om map.forEach((id, val) -> System.out.println(val)); }
From source file:Main.java
public static void main(String[] args) { Map<Integer, String> map = new HashMap<>(); for (int i = 0; i < 10; i++) { map.putIfAbsent(i, "val" + i); }//w w w .j av a2 s.c o m map.forEach((id, val) -> System.out.println(val)); System.out.println(map.getOrDefault(42, "not found")); // not found }
From source file:Main.java
public static void main(String[] args) { Map<Integer, String> map = new HashMap<>(); for (int i = 0; i < 10; i++) { map.putIfAbsent(i, "val" + i); }// w w w .j a v a2 s .com map.forEach((id, val) -> System.out.println(val)); map.remove(3, "val3"); System.out.println(map.get(3)); // val33 map.remove(3, "val33"); System.out.println(map.get(3)); // null }
From source file:Main.java
public static void main(String[] args) { Map<Integer, String> map = new HashMap<>(); for (int i = 0; i < 10; i++) { map.putIfAbsent(i, "val" + i); }/*from www .j a v a 2 s. c om*/ map.forEach((id, val) -> System.out.println(val)); map.merge(9, "val9", (value, newValue) -> value.concat(newValue)); System.out.println(map.get(9)); // val9 map.merge(9, "concat", (value, newValue) -> value.concat(newValue)); System.out.println(map.get(9)); // val9concat }
From source file:Main.java
public static void main(String[] args) { Map<Integer, String> map = new HashMap<>(); for (int i = 0; i < 10; i++) { map.putIfAbsent(i, "val" + i); }// w w w .ja v a 2s . c o m map.forEach((id, val) -> System.out.println(val)); map.computeIfPresent(3, (num, val) -> val + num); System.out.println(map.get(3)); // val33 map.computeIfPresent(9, (num, val) -> null); System.out.println(map.containsKey(9)); // false map.computeIfAbsent(23, num -> "val" + num); System.out.println(map.containsKey(23)); // true map.computeIfAbsent(3, num -> "bam"); System.out.println(map.get(3)); // val33 }
From source file:com.ryft.spark.connector.examples.SimplePairRDDExampleJ.java
public static void main(String[] args) { final SparkConf sparkConf = new SparkConf().setAppName("SimplePairRDDExampleJ").setMaster("local[2]"); final SparkContext sc = new SparkContext(sparkConf); final SparkContextJavaFunctions javaFunctions = RyftJavaUtil.javaFunctions(sc); final byte fuzziness = 0; final int surrounding = 10; final List queries = toScalaList(RyftQueryUtil.toSimpleQueries("Jones", "Thomas")); final RyftPairJavaRDD rdd = javaFunctions.ryftPairJavaRDD(queries, RyftQueryOptions.apply("passengers.txt", surrounding, fuzziness), RyftJavaUtil.ryftQueryToEmptyList, RyftJavaUtil.stringToEmptySet); final Map<String, Long> countByKey = rdd.countByKey(); final StrBuilder sb = new StrBuilder(); countByKey.forEach((key, value) -> sb.append(key + " -> " + value + "\n")); logger.info("RDD count by key: \n{}", sb.toString()); }
From source file:Main.java
public static void main(String[] args) { Map<String, String> map = new HashMap<>(); map.put("CSS", "style"); map.put("HTML", "mark up"); map.put("Oracle", "database"); map.put("XML", "data"); map.forEach((String key, String value) -> { System.out.println("key=" + key + ", value=" + value); });// w w w. j av a 2 s .co m }
From source file:pl.prutkowski.java.playground.java8.TestCollectors.java
/** * @param args the command line arguments *///from w ww .ja va 2 s. c o m public static void main(String[] args) { Map<String, Integer> monthByLen = months.stream() .collect(Collectors.toMap(String::toUpperCase, m -> StringUtils.countMatches(m, "e"))); monthByLen.forEach((month, eCount) -> System.out.println(month + " -> " + eCount)); System.out.println("---------------------------------"); Map<Object, List<String>> monthByLen2 = months.stream() .collect(Collectors.groupingBy(m -> StringUtils.countMatches(m, "e"))); monthByLen2.forEach((count, groupedMonths) -> System.out.println(count + " -> " + groupedMonths)); System.out.println("---------------------------------"); Double averageLength = months.stream().collect(Collectors.averagingDouble(String::length)); System.out.println("Average length: " + averageLength); System.out.println("---------------------------------"); Double max = months.stream().collect(Collectors.summarizingDouble(String::length)).getMax(); System.out.println("Max length: " + max); System.out.println("---------------------------------"); String reduced = months.stream().collect(Collectors.reducing((m1, m2) -> (m1 + ", " + m2))).get(); System.out.println("Reduced: " + reduced); System.out.println("---------------------------------"); System.out.println(String.join(", ", months)); System.out.println("---------------------------------"); List<String> monthsWithZ = months.stream().filter(m -> m.contains("z")).collect(new ListCollector<>()); System.out.println(monthsWithZ); }
From source file:examples.cnn.ImagesClassification.java
public static void main(String[] args) { SparkConf conf = new SparkConf(); conf.setAppName("Images CNN Classification"); conf.setMaster(String.format("local[%d]", NUM_CORES)); conf.set(SparkDl4jMultiLayer.AVERAGE_EACH_ITERATION, String.valueOf(true)); try (JavaSparkContext sc = new JavaSparkContext(conf)) { JavaRDD<String> raw = sc.textFile("data/images-data-rgb.csv"); String first = raw.first(); JavaPairRDD<String, String> labelData = raw.filter(f -> f.equals(first) == false).mapToPair(r -> { String[] tab = r.split(";"); return new Tuple2<>(tab[0], tab[1]); });/*from w w w.ja v a2 s.c o m*/ Map<String, Long> labels = labelData.map(t -> t._1).distinct().zipWithIndex() .mapToPair(t -> new Tuple2<>(t._1, t._2)).collectAsMap(); log.info("Number of labels {}", labels.size()); labels.forEach((a, b) -> log.info("{}: {}", a, b)); NetworkTrainer trainer = new NetworkTrainer.Builder().model(ModelLibrary.net1) .networkToSparkNetwork(net -> new SparkDl4jMultiLayer(sc, net)).numLabels(labels.size()) .cores(NUM_CORES).build(); JavaRDD<Tuple2<INDArray, double[]>> labelsWithData = labelData.map(t -> { INDArray label = FeatureUtil.toOutcomeVector(labels.get(t._1).intValue(), labels.size()); double[] arr = Arrays.stream(t._2.split(" ")).map(normalize1).mapToDouble(Double::doubleValue) .toArray(); return new Tuple2<>(label, arr); }); JavaRDD<Tuple2<INDArray, double[]>>[] splited = labelsWithData.randomSplit(new double[] { .8, .2 }, seed); JavaRDD<DataSet> testDataset = splited[1].map(t -> { INDArray features = Nd4j.create(t._2, new int[] { 1, t._2.length }); return new DataSet(features, t._1); }).cache(); log.info("Number of test images {}", testDataset.count()); JavaRDD<DataSet> plain = splited[0].map(t -> { INDArray features = Nd4j.create(t._2, new int[] { 1, t._2.length }); return new DataSet(features, t._1); }); /* * JavaRDD<DataSet> flipped = splited[0].randomSplit(new double[] { .5, .5 }, seed)[0]. */ JavaRDD<DataSet> flipped = splited[0].map(t -> { double[] arr = t._2; int idx = 0; double[] farr = new double[arr.length]; for (int i = 0; i < arr.length; i += trainer.width) { double[] temp = Arrays.copyOfRange(arr, i, i + trainer.width); ArrayUtils.reverse(temp); for (int j = 0; j < trainer.height; ++j) { farr[idx++] = temp[j]; } } INDArray features = Nd4j.create(farr, new int[] { 1, farr.length }); return new DataSet(features, t._1); }); JavaRDD<DataSet> trainDataset = plain.union(flipped).cache(); log.info("Number of train images {}", trainDataset.count()); trainer.train(trainDataset, testDataset); } }