Example usage for java.util.stream Collectors toCollection

List of usage examples for java.util.stream Collectors toCollection

Introduction

In this page you can find the example usage for java.util.stream Collectors toCollection.

Prototype

public static <T, C extends Collection<T>> Collector<T, ?, C> toCollection(Supplier<C> collectionFactory) 

Source Link

Document

Returns a Collector that accumulates the input elements into a new Collection , in encounter order.

Usage

From source file:com.evolveum.midpoint.task.quartzimpl.work.WorkStateManager.java

private List<WorkBucketType> cloneNoId(List<WorkBucketType> buckets) {
    return buckets.stream().map(this::cloneNoId)
            .collect(Collectors.toCollection(() -> new ArrayList<>(buckets.size())));
}

From source file:Executable.LinkImputeR.java

private static List<SingleGenotypeCall> getCorrectCalls(double[][][] called,
        List<? extends SingleGenotypePosition> list) {
    ProbToCall p2c = new ProbToCall();
    return list.stream()
            .map(sgp -> new SingleGenotypeCall(sgp.getSample(), sgp.getSNP(),
                    p2c.callSingle(called[sgp.getSample()][sgp.getSNP()])))
            .collect(Collectors.toCollection(ArrayList::new));
}

From source file:Executable.LinkImputeR.java

private static List<SingleGenotypeReads> getMaskedReads(List<SingleGenotypeMasked> masked) {
    return masked.stream().map(sgm -> new SingleGenotypeReads(sgm.getSample(), sgm.getSNP(), sgm.getMasked()))
            .collect(Collectors.toCollection(ArrayList::new));
}

From source file:Executable.LinkImputeR.java

private static List<SingleGenotypeReads> getOriginalReads(List<SingleGenotypeMasked> masked) {
    return masked.stream().map(sgm -> new SingleGenotypeReads(sgm.getSample(), sgm.getSNP(), sgm.getOriginal()))
            .collect(Collectors.toCollection(ArrayList::new));
}

From source file:com.ikanow.aleph2.analytics.services.TestDeduplicationService.java

@SuppressWarnings("unchecked")
@Test//from w w w.  ja v a  2s.c  om
public void test_handleDuplicateRecord() {

    final IEnrichmentModuleContext enrich_context = Mockito.mock(IEnrichmentModuleContext.class);

    Mockito.when(enrich_context.emitImmutableObject(Mockito.any(Long.class), Mockito.any(JsonNode.class),
            Mockito.any(Optional.class), Mockito.any(Optional.class), Mockito.any(Optional.class)))
            .thenReturn(Validation.success(_mapper.createObjectNode()));

    TestDedupEnrichmentModule test_module = new TestDedupEnrichmentModule();

    final String ts_field = "@timestamp";

    final ObjectNode old_json = _mapper.createObjectNode();
    old_json.put("_id", "old_record");
    old_json.put("@timestamp", 0L);
    old_json.put("url", "test");

    final ObjectNode new_json = _mapper.createObjectNode();
    new_json.put("@timestamp", 1L);
    new_json.put("url", "test");

    final ObjectNode new_json_but_same_time = _mapper.createObjectNode();
    new_json_but_same_time.put("@timestamp", 0L);
    new_json_but_same_time.put("url", "test");

    Tuple3<Long, IBatchRecord, ObjectNode> new_record = Tuples._3T(0L,
            new BatchRecordUtils.JsonBatchRecord(new_json), _mapper.createObjectNode());
    Tuple3<Long, IBatchRecord, ObjectNode> new_record_but_same_time = Tuples._3T(0L,
            new BatchRecordUtils.JsonBatchRecord(new_json_but_same_time), _mapper.createObjectNode());

    new_record._2().getContent(); //(code coverage!)

    final TextNode key = new TextNode("url");

    LinkedHashMap<JsonNode, LinkedList<Tuple3<Long, IBatchRecord, ObjectNode>>> mutable_obj_map = new LinkedHashMap<>();

    final LinkedList<Tuple3<Long, IBatchRecord, ObjectNode>> new_records = Stream.of(new_record)
            .collect(Collectors.toCollection(LinkedList::new));
    final LinkedList<Tuple3<Long, IBatchRecord, ObjectNode>> new_records_but_same_time = Stream
            .of(new_record_but_same_time).collect(Collectors.toCollection(LinkedList::new));

    // Simple case Leave policy
    {
        //(reset)
        mutable_obj_map.clear();
        mutable_obj_map.put(new TextNode("never_changed"), new_records);
        mutable_obj_map.put(new TextNode("url"), new_records);
        assertEquals(2, mutable_obj_map.size());
        new_record._3().removeAll();
        new_record_but_same_time._3().removeAll();
        _called_batch.set(0);

        DocumentSchemaBean config = BeanTemplateUtils.build(DocumentSchemaBean.class)
                .with(DocumentSchemaBean::deduplication_policy, DeduplicationPolicy.leave).done().get();
        DeduplicationEnrichmentContext test_context = new DeduplicationEnrichmentContext(enrich_context, config,
                j -> Optional.empty());

        final Stream<JsonNode> ret_val = DeduplicationService.handleDuplicateRecord(config,
                Optional.of(Tuples._2T(test_module, test_context)), ts_field, new_records,
                Arrays.asList(old_json), key, mutable_obj_map);
        assertEquals(0L, ret_val.count());

        // Nothing emitted
        Mockito.verify(enrich_context, Mockito.times(0)).emitImmutableObject(Mockito.any(Long.class),
                Mockito.any(JsonNode.class), Mockito.any(Optional.class), Mockito.any(Optional.class),
                Mockito.any(Optional.class));
        // No custom processing performed
        assertEquals(0, _called_batch.get());
        // No annotations/mutations
        assertEquals("{}", new_record._3().toString());
        // Object removed from mutable map
        assertEquals(1, mutable_obj_map.size());
    }
    // Simple case update policy - time updates
    final Consumer<Boolean> test_time_updates = delete_unhandled -> {
        //(reset)
        mutable_obj_map.clear();
        mutable_obj_map.put(new TextNode("never_changed"), new_records);
        mutable_obj_map.put(new TextNode("url"), new_records);
        assertEquals(2, mutable_obj_map.size());
        new_record._3().removeAll();
        new_record_but_same_time._3().removeAll();
        _called_batch.set(0);

        DocumentSchemaBean config = BeanTemplateUtils.build(DocumentSchemaBean.class)
                .with(DocumentSchemaBean::deduplication_policy, DeduplicationPolicy.update)
                .with(DocumentSchemaBean::delete_unhandled_duplicates, delete_unhandled).done().get();
        DeduplicationEnrichmentContext test_context = new DeduplicationEnrichmentContext(enrich_context, config,
                j -> Optional.empty());

        // (add the same object twice to test the "return ids to delete" functionality)
        final Stream<JsonNode> ret_val = DeduplicationService.handleDuplicateRecord(config,
                Optional.of(Tuples._2T(test_module, test_context)), ts_field, new_records,
                Arrays.asList(old_json, old_json), key, mutable_obj_map);
        if (delete_unhandled) {
            assertEquals(Arrays.asList("old_record"), ret_val.sorted()
                    .map(j -> DeduplicationService.jsonToObject(j)).collect(Collectors.toList()));
        } else {
            assertEquals(0L, ret_val.count());
        }

        // Nothing emitted
        Mockito.verify(enrich_context, Mockito.times(0)).emitImmutableObject(Mockito.any(Long.class),
                Mockito.any(JsonNode.class), Mockito.any(Optional.class), Mockito.any(Optional.class),
                Mockito.any(Optional.class));
        // No custom processing performed
        assertEquals(0, _called_batch.get());
        // _id
        assertEquals("{\"_id\":\"old_record\"}", new_record._3().toString());
        // Object removed from mutable map
        assertEquals(2, mutable_obj_map.size());
    };
    test_time_updates.accept(true);
    test_time_updates.accept(false);

    // Simple case update policy - times the same
    {
        //(reset)
        mutable_obj_map.clear();
        mutable_obj_map.put(new TextNode("never_changed"), new_records);
        mutable_obj_map.put(new TextNode("url"), new_records);
        new_record._3().removeAll();
        new_record_but_same_time._3().removeAll();
        _called_batch.set(0);

        DocumentSchemaBean config = BeanTemplateUtils.build(DocumentSchemaBean.class)
                .with(DocumentSchemaBean::deduplication_policy, DeduplicationPolicy.update)
                .with(DocumentSchemaBean::delete_unhandled_duplicates, false).done().get();
        DeduplicationEnrichmentContext test_context = new DeduplicationEnrichmentContext(enrich_context, config,
                j -> Optional.empty());

        final Stream<JsonNode> ret_val = DeduplicationService.handleDuplicateRecord(config,
                Optional.of(Tuples._2T(test_module, test_context)), ts_field, new_records_but_same_time,
                Arrays.asList(old_json), key, mutable_obj_map);
        assertEquals(0L, ret_val.count());

        // Nothing emitted
        Mockito.verify(enrich_context, Mockito.times(0)).emitImmutableObject(Mockito.any(Long.class),
                Mockito.any(JsonNode.class), Mockito.any(Optional.class), Mockito.any(Optional.class),
                Mockito.any(Optional.class));
        // No custom processing performed
        assertEquals(0, _called_batch.get());
        // No annotations/mutations
        assertEquals("{}", new_record_but_same_time._3().toString());
        // Object removed from mutable map
        assertEquals(1, mutable_obj_map.size());
    }
    // overwrite
    final Consumer<Boolean> test_overwrites = delete_unhandled -> {
        //(reset)
        mutable_obj_map.clear();
        mutable_obj_map.put(new TextNode("never_changed"), new_records);
        mutable_obj_map.put(new TextNode("url"), new_records);
        assertEquals(2, mutable_obj_map.size());
        new_record._3().removeAll();
        new_record_but_same_time._3().removeAll();
        _called_batch.set(0);

        DocumentSchemaBean config = BeanTemplateUtils.build(DocumentSchemaBean.class)
                .with(DocumentSchemaBean::deduplication_policy, DeduplicationPolicy.overwrite)
                .with(DocumentSchemaBean::delete_unhandled_duplicates, delete_unhandled).done().get();
        DeduplicationEnrichmentContext test_context = new DeduplicationEnrichmentContext(enrich_context, config,
                j -> Optional.empty());

        final Stream<JsonNode> ret_val = DeduplicationService.handleDuplicateRecord(config,
                Optional.of(Tuples._2T(test_module, test_context)), ts_field, new_records,
                Arrays.asList(old_json, old_json), key, mutable_obj_map);
        if (delete_unhandled) {
            assertEquals(Arrays.asList("old_record"), ret_val.sorted()
                    .map(j -> DeduplicationService.jsonToObject(j)).collect(Collectors.toList()));
        } else {
            assertEquals(0L, ret_val.count());
        }

        // Nothing emitted
        Mockito.verify(enrich_context, Mockito.times(0)).emitImmutableObject(Mockito.any(Long.class),
                Mockito.any(JsonNode.class), Mockito.any(Optional.class), Mockito.any(Optional.class),
                Mockito.any(Optional.class));
        // No custom processing performed
        assertEquals(0, _called_batch.get());
        // _id
        assertEquals("{\"_id\":\"old_record\"}", new_record._3().toString());
        // Object removed from mutable map
        assertEquals(2, mutable_obj_map.size());
    };
    test_overwrites.accept(true);
    test_overwrites.accept(false);

    //(check ignores times)
    {
        //(reset)
        mutable_obj_map.clear();
        mutable_obj_map.put(new TextNode("never_changed"), new_records);
        mutable_obj_map.put(new TextNode("url"), new_records);
        assertEquals(2, mutable_obj_map.size());
        new_record._3().removeAll();
        new_record_but_same_time._3().removeAll();
        _called_batch.set(0);

        DocumentSchemaBean config = BeanTemplateUtils.build(DocumentSchemaBean.class)
                .with(DocumentSchemaBean::deduplication_policy, DeduplicationPolicy.overwrite)
                .with(DocumentSchemaBean::delete_unhandled_duplicates, false).done().get();
        DeduplicationEnrichmentContext test_context = new DeduplicationEnrichmentContext(enrich_context, config,
                j -> Optional.empty());

        final Stream<JsonNode> ret_val = DeduplicationService.handleDuplicateRecord(config,
                Optional.of(Tuples._2T(test_module, test_context)), ts_field, new_records_but_same_time,
                Arrays.asList(old_json), key, mutable_obj_map);
        assertEquals(0L, ret_val.count());

        // Nothing emitted
        Mockito.verify(enrich_context, Mockito.times(0)).emitImmutableObject(Mockito.any(Long.class),
                Mockito.any(JsonNode.class), Mockito.any(Optional.class), Mockito.any(Optional.class),
                Mockito.any(Optional.class));
        // No custom processing performed
        assertEquals(0, _called_batch.get());
        // _id
        assertEquals("{\"_id\":\"old_record\"}", new_record_but_same_time._3().toString());
        // Object removed from mutable map
        assertEquals(2, mutable_obj_map.size());
    }
    // custom
    {
        //(reset)
        mutable_obj_map.clear();
        mutable_obj_map.put(new TextNode("never_changed"), new_records);
        mutable_obj_map.put(new TextNode("url"), new_records);
        assertEquals(2, mutable_obj_map.size());
        new_record._3().removeAll();
        new_record_but_same_time._3().removeAll();
        _called_batch.set(0);

        DocumentSchemaBean config = BeanTemplateUtils.build(DocumentSchemaBean.class)
                .with(DocumentSchemaBean::deduplication_policy, DeduplicationPolicy.custom)
                .with(DocumentSchemaBean::delete_unhandled_duplicates, false).done().get();
        DeduplicationEnrichmentContext test_context = new DeduplicationEnrichmentContext(enrich_context, config,
                j -> Optional.empty());

        final Stream<JsonNode> ret_val = DeduplicationService.handleDuplicateRecord(config,
                Optional.of(Tuples._2T(test_module, test_context)), ts_field, new_records,
                Arrays.asList(old_json), key, mutable_obj_map);
        assertEquals(0L, ret_val.count());

        // Nothing emitted
        Mockito.verify(enrich_context, Mockito.times(0)).emitImmutableObject(Mockito.any(Long.class),
                Mockito.any(JsonNode.class), Mockito.any(Optional.class), Mockito.any(Optional.class),
                Mockito.any(Optional.class));
        // No custom processing performed
        assertEquals(2, _called_batch.get()); //(old + new)
        // _id
        assertEquals("{}", new_record._3().toString()); // up to the custom code to do this
        // Object removed from mutable map
        assertEquals(1, mutable_obj_map.size()); //(remove since it's the responsibility of the custom code to emit)
    }
    //(check ignores times)
    {
        //(reset)
        mutable_obj_map.clear();
        mutable_obj_map.put(new TextNode("never_changed"), new_records);
        mutable_obj_map.put(new TextNode("url"), new_records);
        assertEquals(2, mutable_obj_map.size());
        new_record._3().removeAll();
        new_record_but_same_time._3().removeAll();
        _called_batch.set(0);

        DocumentSchemaBean config = BeanTemplateUtils.build(DocumentSchemaBean.class)
                .with(DocumentSchemaBean::deduplication_policy, DeduplicationPolicy.custom)
                .with(DocumentSchemaBean::delete_unhandled_duplicates, false).done().get();
        DeduplicationEnrichmentContext test_context = new DeduplicationEnrichmentContext(enrich_context, config,
                j -> Optional.empty());

        final Stream<JsonNode> ret_val = DeduplicationService.handleDuplicateRecord(config,
                Optional.of(Tuples._2T(test_module, test_context)), ts_field, new_records_but_same_time,
                Arrays.asList(old_json), key, mutable_obj_map);
        assertEquals(0L, ret_val.count());

        // Nothing emitted
        Mockito.verify(enrich_context, Mockito.times(0)).emitImmutableObject(Mockito.any(Long.class),
                Mockito.any(JsonNode.class), Mockito.any(Optional.class), Mockito.any(Optional.class),
                Mockito.any(Optional.class));
        // No custom processing performed
        assertEquals(2, _called_batch.get()); //(old + new)
        // _id
        assertEquals("{}", new_record_but_same_time._3().toString()); // up to the custom code to do this
        // Object removed from mutable map
        assertEquals(1, mutable_obj_map.size()); //(remove since it's the responsibility of the custom code to emit)
    }
    // Simple case *custom* update policy - time updates
    {
        //(reset)
        mutable_obj_map.clear();
        mutable_obj_map.put(new TextNode("never_changed"), new_records);
        mutable_obj_map.put(new TextNode("url"), new_records);
        assertEquals(2, mutable_obj_map.size());
        new_record._3().removeAll();
        new_record_but_same_time._3().removeAll();
        _called_batch.set(0);

        DocumentSchemaBean config = BeanTemplateUtils.build(DocumentSchemaBean.class)
                .with(DocumentSchemaBean::deduplication_policy, DeduplicationPolicy.custom_update)
                .with(DocumentSchemaBean::delete_unhandled_duplicates, false).done().get();
        DeduplicationEnrichmentContext test_context = new DeduplicationEnrichmentContext(enrich_context, config,
                j -> Optional.empty());

        final Stream<JsonNode> ret_val = DeduplicationService.handleDuplicateRecord(config,
                Optional.of(Tuples._2T(test_module, test_context)), ts_field, new_records,
                Arrays.asList(old_json), key, mutable_obj_map);
        assertEquals(0L, ret_val.count());

        // Nothing emitted
        Mockito.verify(enrich_context, Mockito.times(0)).emitImmutableObject(Mockito.any(Long.class),
                Mockito.any(JsonNode.class), Mockito.any(Optional.class), Mockito.any(Optional.class),
                Mockito.any(Optional.class));
        // No custom processing performed
        assertEquals(2, _called_batch.get()); //(old + new)
        // _id
        assertEquals("{}", new_record._3().toString()); // up to the custom code to do this
        // Object removed from mutable map
        assertEquals(1, mutable_obj_map.size()); //(remove since it's the responsibility of the custom code to emit)
    }
    // Simple case *custom* update policy - times the same
    {
        //(reset)
        mutable_obj_map.clear();
        mutable_obj_map.put(new TextNode("never_changed"), new_records);
        mutable_obj_map.put(new TextNode("url"), new_records);
        assertEquals(2, mutable_obj_map.size());
        new_record._3().removeAll();
        new_record_but_same_time._3().removeAll();
        _called_batch.set(0);

        DocumentSchemaBean config = BeanTemplateUtils.build(DocumentSchemaBean.class)
                .with(DocumentSchemaBean::deduplication_policy, DeduplicationPolicy.custom_update)
                .with(DocumentSchemaBean::delete_unhandled_duplicates, false).done().get();
        DeduplicationEnrichmentContext test_context = new DeduplicationEnrichmentContext(enrich_context, config,
                j -> Optional.empty());

        final Stream<JsonNode> ret_val = DeduplicationService.handleDuplicateRecord(config,
                Optional.of(Tuples._2T(test_module, test_context)), ts_field, new_records_but_same_time,
                Arrays.asList(old_json), key, mutable_obj_map);
        assertEquals(0L, ret_val.count());

        // Nothing emitted
        Mockito.verify(enrich_context, Mockito.times(0)).emitImmutableObject(Mockito.any(Long.class),
                Mockito.any(JsonNode.class), Mockito.any(Optional.class), Mockito.any(Optional.class),
                Mockito.any(Optional.class));
        // No custom processing performed
        assertEquals(0, _called_batch.get());
        // No annotations/mutations
        assertEquals("{}", new_record_but_same_time._3().toString());
        // Object removed from mutable map
        assertEquals(1, mutable_obj_map.size());
    }

}

From source file:UI.MainStageController.java

/**
 * Prompts an alert that the selected file is already part of the current project.
 *//*from  ww  w .j  a  v a2 s.  c  om*/
private void showFileAlreadyLoadedAlert(ArrayList<String> fileNames) {
    if (fileNames.size() > 1) {
        fileNames = fileNames.stream().map(string -> "'" + string + "'")
                .collect(Collectors.toCollection(ArrayList::new));
    }
    String name = String.join(",\n", fileNames);

    String oneFileAlreadyLoaded = "The file \n'" + name + "'\nis already loaded in your project.";
    String multipleFilesAlreadyLoaded = "The files\n" + name + "\n are already loaded in your project.";
    fileAlreadyLoadedAlert = new Alert(Alert.AlertType.ERROR);
    fileAlreadyLoadedAlert.setTitle("File not loaded.");
    fileAlreadyLoadedAlert
            .setContentText(fileNames.size() == 1 ? oneFileAlreadyLoaded : multipleFilesAlreadyLoaded);
    fileAlreadyLoadedAlert.show();
}

From source file:com.net2plan.libraries.GraphUtils.java

/** Returns the K minimum cost service chains between two nodes (summing costs of links and resources traversed), traversing a given set of resource types, satisfying some user-defined constraints.
 * If only <i>n</i> shortest path are found (n&lt;K), those are returned. If none is found an empty list is returned. 
 * The subpaths (the set of links between two resources, or the first(last) resource and the origin (destination) node, are constrained to be loopless 
 * (the algorithm uses Yen's scheme for subpaths enumeration).
 * @param links The set of links which can be used for the chain
 * @param originNode The origin node of the chain
 * @param destinationNode The destination node of the chain (could be the same as the origin node)
 * @param sequenceOfResourceTypesToTraverse the types of the sequence of resources to traverse
 * @param linkCost the cost of each link (if null, all links have cost one), all numbers must be strictly positive
 * @param resourceCost a map with the cost of each resource (if null, all resources have cost zero). A resources with Double.MAX_VALUE cost cannot be traversed (as if it was not there). All costs must be nonnegative. If a resource is not present in the map, its cost is zero.  
 * @param K The maximum number of service chains to return (less than K may be returned if there are no different paths).
 * @param maxCostServiceChain Service chains with a cost higher than this are not enumerated
 * @param maxLengthInKmPerSubpath The maximum length in km in each subpath. Service chains not satisfying this are not enumerated
 * @param maxNumHopsPerSubpath The maximum number of traversed links in each subpath. Service chains not satisfying this are not enumerated
 * @param maxPropDelayInMsPerSubpath The propagation delay summing the links in each subpath. Service chains not satisfying this are not enumerated
 * @param cacheSubpathLists A map which associated to node pairs, the k-shortest paths (only considering links) already computed to be used. 
 * The algorithm will add new entries here for those pairs of nodes for which no per-computed values exist, and that are needed in the algorithm 
 * (e.g. for origin node to all nodes of the first resource type, nodes of the first resource type to the second...). If null, then no entries are 
 * precomputed AND also no new entries are returned.   
 * @return the (at most) K minimum cost service chains.
 *//*from   ww  w.  j ava  2s  .  co  m*/
public static List<Pair<List<NetworkElement>, Double>> getKMinimumCostServiceChains(List<Link> links,
        Node originNode, Node destinationNode, List<String> sequenceOfResourceTypesToTraverse,
        DoubleMatrix1D linkCost, Map<Resource, Double> resourceCost, int K, double maxCostServiceChain,
        double maxLengthInKmPerSubpath, int maxNumHopsPerSubpath, double maxPropDelayInMsPerSubpath,
        Map<Pair<Node, Node>, List<Pair<List<Link>, Double>>> cacheSubpathLists) {
    if (maxLengthInKmPerSubpath <= 0)
        maxLengthInKmPerSubpath = Double.MAX_VALUE;
    if (maxNumHopsPerSubpath <= 0)
        maxNumHopsPerSubpath = Integer.MAX_VALUE;
    if (maxPropDelayInMsPerSubpath <= 0)
        maxPropDelayInMsPerSubpath = Double.MAX_VALUE;
    if (maxCostServiceChain < 0)
        maxCostServiceChain = Double.MAX_VALUE;
    final int E = links.size();
    if (E == 0)
        return new LinkedList<Pair<List<NetworkElement>, Double>>();
    final NetPlan netPlan = links.get(0).getNetPlan();
    if (linkCost == null)
        linkCost = DoubleFactory1D.dense.make(E, 1.0);
    if (linkCost.size() != E)
        throw new Net2PlanException("Wrong size of cost array");
    if (linkCost.getMinLocation()[0] <= 0)
        throw new Net2PlanException("All link costs must be strictly positive");
    if (resourceCost != null)
        for (Double val : resourceCost.values())
            if (val < 0)
                throw new Net2PlanException("All resource costs must be non-negative");

    /* initialize the link cost map */
    Map<Link, Double> linkCostMap = new HashMap<Link, Double>();
    for (int cont = 0; cont < E; cont++)
        linkCostMap.put(links.get(cont), linkCost.get(cont));

    /* initialize the nodes per phase. One element per resource type to traverse, plus one for the last node  */
    List<Set<Node>> nodesPerPhase = new ArrayList<Set<Node>>();
    for (String resourceType : sequenceOfResourceTypesToTraverse) {
        Set<Resource> resourcesNotInfiniteCostThisType = netPlan.getResources(resourceType);
        if (resourceCost != null)
            resourcesNotInfiniteCostThisType.removeIf(e -> resourceCost.get(e) == Double.MAX_VALUE);
        if (resourcesNotInfiniteCostThisType.isEmpty())
            return new LinkedList<Pair<List<NetworkElement>, Double>>();
        final Set<Node> nodesWithResourcesNotInfiniteCostThisType = resourcesNotInfiniteCostThisType.stream()
                .map(e -> e.getHostNode()).collect(Collectors.toCollection(HashSet::new));
        nodesPerPhase.add(nodesWithResourcesNotInfiniteCostThisType);
    }
    nodesPerPhase.add(Collections.singleton(destinationNode));

    /* initialize the path lists. This includes (n,n) pairs with one path of empty seq links and zero cost */
    if (cacheSubpathLists == null)
        cacheSubpathLists = new HashMap<Pair<Node, Node>, List<Pair<List<Link>, Double>>>();
    for (int contPhase = 0; contPhase < nodesPerPhase.size(); contPhase++) {
        final Set<Node> outputNodes = nodesPerPhase.get(contPhase);
        final Set<Node> inputNodes = contPhase == 0 ? Collections.singleton(originNode)
                : nodesPerPhase.get(contPhase - 1);
        for (Node nIn : inputNodes)
            for (Node nOut : outputNodes)
                if (!cacheSubpathLists.containsKey(Pair.of(nIn, nOut)))
                    if (nIn != nOut) {
                        List<List<Link>> kPaths = getKLooplessShortestPaths(netPlan.getNodes(), links, nIn,
                                nOut, linkCostMap, K, maxLengthInKmPerSubpath, maxNumHopsPerSubpath,
                                maxPropDelayInMsPerSubpath, -1, -1, -1);
                        List<Pair<List<Link>, Double>> pathsInfo = new ArrayList<Pair<List<Link>, Double>>();
                        double previousCost = 0;
                        for (List<Link> path : kPaths) {
                            final double thisCost = path.stream().mapToDouble(e -> linkCostMap.get(e)).sum();
                            if (previousCost > thisCost + 0.001)
                                throw new RuntimeException(
                                        "thisCost: " + thisCost + ", previousCost: " + previousCost + ", Bad");
                            if (thisCost > maxCostServiceChain)
                                break; // the maximum cost is exceeded, do not add this as subpath
                            pathsInfo.add(Pair.of(path, thisCost));
                            previousCost = thisCost;
                        }
                        cacheSubpathLists.put(Pair.of(nIn, nOut), pathsInfo);
                    } else
                        cacheSubpathLists.put(Pair.of(nIn, nIn),
                                Collections.singletonList(Pair.of(new LinkedList<Link>(), 0.0)));
    }

    /* Start the main loop */

    /* Initialize the SCs per out node, with those from origin node, to each node with resources of the first type (or end node if this is not a SC) */
    Map<Node, List<Pair<List<NetworkElement>, Double>>> outNodeToKSCsMap = new HashMap<Node, List<Pair<List<NetworkElement>, Double>>>();
    for (Node outNode : nodesPerPhase.get(0)) {
        List<Pair<List<NetworkElement>, Double>> thisFirstStageNodeSCs = new ArrayList<Pair<List<NetworkElement>, Double>>();
        for (Pair<List<Link>, Double> path : cacheSubpathLists.get(Pair.of(originNode, outNode)))
            if (path.getSecond() <= maxCostServiceChain)
                thisFirstStageNodeSCs
                        .add(Pair.of(new LinkedList<NetworkElement>(path.getFirst()), path.getSecond()));
        outNodeToKSCsMap.put(outNode, thisFirstStageNodeSCs);
    }

    final Comparator<Pair<List<NetworkElement>, Double>> scComparator = new Comparator<Pair<List<NetworkElement>, Double>>() {
        public int compare(Pair<List<NetworkElement>, Double> t1, Pair<List<NetworkElement>, Double> t2) {
            return Double.compare(t1.getSecond(), t2.getSecond());
        }
    };

    for (int nextPhase = 1; nextPhase < nodesPerPhase.size(); nextPhase++) {
        final Set<Node> thisPhaseNodes = nodesPerPhase.get(nextPhase - 1);
        final Set<Node> nextPhaseNodes = nodesPerPhase.get(nextPhase);
        final String intermediateNodeResourceType = sequenceOfResourceTypesToTraverse.get(nextPhase - 1);
        Map<Node, List<Pair<List<NetworkElement>, Double>>> new_outNodeToKSCsMap = new HashMap<Node, List<Pair<List<NetworkElement>, Double>>>();
        for (Node newOutNode : nextPhaseNodes) {
            List<Pair<List<NetworkElement>, Double>> kSCsToThisOutNode = new ArrayList<Pair<List<NetworkElement>, Double>>();
            for (Node intermediateNode : thisPhaseNodes) {
                for (Pair<List<NetworkElement>, Double> scOriginToIntermediateInfo : outNodeToKSCsMap
                        .get(intermediateNode)) {
                    final List<NetworkElement> scOriginToIntermediate = scOriginToIntermediateInfo.getFirst();
                    final double scOriginToIntermediateCost = scOriginToIntermediateInfo.getSecond();
                    for (Pair<List<Link>, Double> scIntermediateToOutInfo : cacheSubpathLists
                            .get(Pair.of(intermediateNode, newOutNode))) {
                        final List<NetworkElement> scIntermediateToOut = (List<NetworkElement>) (List<?>) scIntermediateToOutInfo
                                .getFirst();
                        final double scIntermediateToOutCost = scIntermediateToOutInfo.getSecond();
                        if (scOriginToIntermediateCost + scIntermediateToOutCost > maxCostServiceChain)
                            break; // do not add this SC, and no more interm->out paths: all are worse
                        if (kSCsToThisOutNode.size() == K)
                            if (kSCsToThisOutNode.get(K - 1).getSecond() <= scOriginToIntermediateCost
                                    + scIntermediateToOutCost)
                                break; // do not add this SC (already full), and no more interm->out paths: all are worse
                        /* Add as many concatenated SCs as resources here, but do not exceed maximum size k of total list. Resource costs may not be ordered  */
                        for (Resource intermediateResource : intermediateNode
                                .getResources(intermediateNodeResourceType)) {
                            final Double intermediateResourceCost = resourceCost == null ? 0.0
                                    : resourceCost.get(intermediateResource);
                            if (intermediateResourceCost == Double.MAX_VALUE)
                                continue; // resources with infinite cost cannot be used
                            final double totalSCCost = scOriginToIntermediateCost + scIntermediateToOutCost
                                    + ((intermediateResourceCost == null) ? 0.0 : intermediateResourceCost);
                            if (totalSCCost > maxCostServiceChain)
                                continue; // do not add this, but maybe other resources later are cheaper
                            if ((kSCsToThisOutNode.size() == K)
                                    && (totalSCCost > kSCsToThisOutNode.get(K - 1).getSecond()))
                                continue; // do not add this, but maybe other resources later are cheaper 
                            /* Add this SC */
                            List<NetworkElement> newSC = new LinkedList<NetworkElement>(scOriginToIntermediate);
                            newSC.add(intermediateResource);
                            newSC.addAll(scIntermediateToOut);
                            kSCsToThisOutNode
                                    .add(Pair.of(newSC, scOriginToIntermediateCost + scIntermediateToOutCost));
                            /* One SC was added, sort again, and remove the last SCs (higher cost), keep up to K */
                            Collections.sort(kSCsToThisOutNode, scComparator);
                            if (kSCsToThisOutNode.size() > K)
                                kSCsToThisOutNode = kSCsToThisOutNode.subList(0, K);
                        }
                    }
                }
            }
            new_outNodeToKSCsMap.put(newOutNode, kSCsToThisOutNode);
        }
        outNodeToKSCsMap = new_outNodeToKSCsMap;
    }
    if (!outNodeToKSCsMap.keySet().equals(Collections.singleton(destinationNode)))
        throw new RuntimeException("Bad");
    return outNodeToKSCsMap.get(destinationNode);
}