Example usage for java.util List forEach

List of usage examples for java.util List forEach

Introduction

In this page you can find the example usage for java.util List forEach.

Prototype

default void forEach(Consumer<? super T> action) 

Source Link

Document

Performs the given action for each element of the Iterable until all elements have been processed or the action throws an exception.

Usage

From source file:com.hurence.logisland.connect.opc.CommonUtils.java

/**
 * Validate the tag configuration.//w w w  . j a va 2s .  c o m
 *
 * @param tags  the list of tag id.
 * @param freqs the list of tag sampling frequencies.
 * @param modes the list of tag streaming modes.
 * @throws IllegalArgumentException in case something is bad.
 */
public static final void validateTagConfig(List<String> tags, List<String> freqs, List<String> modes) {
    //validate
    if (tags == null || tags.isEmpty()) {
        throw new IllegalArgumentException("Tag id list should not be empty");
    }
    if (freqs == null || freqs.size() != tags.size()) {
        throw new IllegalArgumentException("You should provide exactly one sampling rate per tag id");
    }
    if (modes == null || modes.size() != tags.size()) {
        throw new IllegalArgumentException("You should provide exactly one streaming mode per tag id");
    }
    freqs.forEach(freq -> {
        try {
            if (StringUtils.isNotBlank(freq)) {
                Duration.parse(freq);
            }
        } catch (Exception e) {
            throw new IllegalArgumentException("Unrecognized sampling rate: " + freq);
        }
    });
    modes.forEach(mode -> {
        try {
            if (StringUtils.isNotBlank(mode)) {
                StreamingMode.valueOf(mode);
            }
        } catch (Exception e) {
            throw new IllegalArgumentException("Unrecognized streaming mode: " + mode);
        }
    });
}

From source file:io.dropwizard.revolver.RevolverBundle.java

private static Map<String, RevolverHttpApiConfig> generateApiConfigMap(
        final RevolverHttpServiceConfig serviceConfiguration) {
    val tokenMatch = Pattern.compile("\\{(([^/])+\\})");
    List<RevolverHttpApiConfig> apis = serviceConfiguration.getApis().stream().collect(Collectors.toList());
    Collections.sort(apis, (o1, o2) -> {
        String o1Expr = generatePathExpression(o1.getPath());
        String o2Expr = generatePathExpression(o2.getPath());
        return tokenMatch.matcher(o2Expr).groupCount() - tokenMatch.matcher(o1Expr).groupCount();
    });/* www. ja  v  a2s  . c o m*/
    Collections.sort(apis, (o1, o2) -> o1.getPath().compareTo(o2.getPath()));
    apis.forEach(apiConfig -> serviceToPathMap.add(serviceConfiguration.getService(),
            ApiPathMap.builder().api(apiConfig).path(generatePathExpression(apiConfig.getPath())).build()));
    final ImmutableMap.Builder<String, RevolverHttpApiConfig> configMapBuilder = ImmutableMap.builder();
    apis.forEach(apiConfig -> configMapBuilder.put(apiConfig.getApi(), apiConfig));
    return configMapBuilder.build();
}

From source file:jp.co.opentone.bsol.linkbinder.util.elasticsearch.CorresponDocumentConverter.java

/**
 * ??./*from w ww  .  java2s  . co m*/
 * @param correspon 
 * @param attachments 
 * @return ??
 */
public static List<ElasticsearchDocument> convert(Correspon correspon, List<Attachment> attachments) {
    List<ElasticsearchDocument> result = Lists.newArrayList();
    CorresponElasticsearchDocument doc = new CorresponElasticsearchDocument();
    doc.id = String.valueOf(correspon.getId());
    doc.type = SystemConfig.getValue(Constants.KEY_ELASTICSEARCH_TYPE_NAME);
    doc.title = correspon.getSubject();
    doc.body = Jsoup.parse(correspon.getBody()).text();
    doc.lastModified = DateUtil.convertDateToString(correspon.getUpdatedAt());
    doc.workflowStatus = String.valueOf(correspon.getWorkflowStatus().getValue());
    doc.forLearning = String.valueOf(correspon.getForLearning().getValue());

    result.add(doc);

    attachments.forEach(a -> {
        CorresponElasticsearchDocument attachmentDoc = new CorresponElasticsearchDocument();
        attachmentDoc.id = ObjectUtils.toString(a.getId()) + "@" + correspon.getId();
        attachmentDoc.type = doc.type;
        attachmentDoc.title = StringUtils.EMPTY;
        attachmentDoc.body = StringUtils.EMPTY;
        attachmentDoc.lastModified = doc.lastModified;
        attachmentDoc.workflowStatus = doc.workflowStatus;
        attachmentDoc.workflowStatus = doc.forLearning;

        attachmentDoc.attachments = Lists
                .newArrayList(new CorresponElasticsearchDocument.Attachment(ObjectUtils.toString(a.getId()),
                        a.getFileName(), ConvertUtil.toBase64String(a.getContent()), a.getExtractedText()));

        result.add(attachmentDoc);
    });

    return result;
}

From source file:de.fosd.jdime.Main.java

/**
 * Perform a merge operation on the input files or directories.
 *
 * @param args/*from   www .j ava  2s  .c o  m*/
 *         command line arguments
 */
public static void run(String[] args) {
    MergeContext context = new MergeContext();

    if (!parseCommandLineArgs(context, args)) {
        return;
    }

    List<FileArtifact> inputFiles = context.getInputFiles();

    if (context.isInspect()) {
        inspectElement(inputFiles.get(0), context.getInspectArtifact(), context.getInspectionScope());
        return;
    }

    if (context.getDumpMode() != DumpMode.NONE) {
        inputFiles.forEach(artifact -> dump(artifact, context.getDumpMode()));
        return;
    }

    try {
        merge(context);
        output(context);
    } finally {
        outputStatistics(context);
    }

    if (LOG.isLoggable(Level.FINE)) {
        Map<MergeScenario<?>, Throwable> crashes = context.getCrashes();

        if (crashes.isEmpty()) {
            LOG.fine("No crashes occurred while merging.");
        } else {
            String ls = System.lineSeparator();
            StringBuilder sb = new StringBuilder();

            sb.append(String.format("%d crashes occurred while merging:%n", crashes.size()));

            for (Map.Entry<MergeScenario<?>, Throwable> entry : crashes.entrySet()) {
                sb.append("* ").append(entry.getValue().toString()).append(ls);
                sb.append("    ").append(entry.getKey().toString().replace(" ", ls + "    ")).append(ls);
            }

            LOG.fine(sb.toString());
        }
    }
}

From source file:com.vmware.photon.controller.api.frontend.utils.SecurityGroupUtils.java

/**
 * Merge the 'self' security groups to existing security groups.
 *
 * @param existingSecurityGroups Existing security groups including both inherited and self ones.
 * @param selfSecurityGroups     'self' security groups to be merged.
 * @return The merging result and security groups not being merged.
 */// w w  w  .j  a  v a  2  s .  c o m
public static Pair<List<SecurityGroup>, List<String>> mergeSelfSecurityGroups(
        List<SecurityGroup> existingSecurityGroups, List<String> selfSecurityGroups) {

    checkNotNull(existingSecurityGroups, "Provided value for existingSecurityGroups is unacceptably null");
    checkNotNull(selfSecurityGroups, "Provided value for selfSecurityGroups is unacceptably null");

    List<SecurityGroup> mergedSecurityGroups = new ArrayList<>();
    List<String> securityGroupsNotMerged = new ArrayList<>();
    Set<String> inheritedSecurityGroupNames = new HashSet<>();

    existingSecurityGroups.stream().filter(g -> g.isInherited()).forEach(g -> {
        mergedSecurityGroups.add(g);
        inheritedSecurityGroupNames.add(g.getName());
    });

    selfSecurityGroups.forEach(g -> {
        if (!inheritedSecurityGroupNames.contains(g)) {
            mergedSecurityGroups.add(new SecurityGroup(g, false));
        } else {
            securityGroupsNotMerged.add(g);
        }
    });

    return Pair.of(mergedSecurityGroups, securityGroupsNotMerged);
}

From source file:ai.grakn.graph.internal.computer.GraknSparkExecutor.java

public static <M> JavaPairRDD<Object, ViewIncomingPayload<M>> executeVertexProgramIteration(
        final JavaPairRDD<Object, VertexWritable> graphRDD,
        final JavaPairRDD<Object, ViewIncomingPayload<M>> viewIncomingRDD, final GraknSparkMemory memory,
        final Configuration apacheConfiguration) {

    // the graphRDD and the viewRDD must have the same partitioner
    if (null != viewIncomingRDD)
        assert graphRDD.partitioner().get().equals(viewIncomingRDD.partitioner().get());
    final JavaPairRDD<Object, ViewOutgoingPayload<M>> viewOutgoingRDD = (((null == viewIncomingRDD)
            ? graphRDD.mapValues(// w w w.j av a2  s.c  o  m
                    vertexWritable -> new Tuple2<>(vertexWritable, Optional.<ViewIncomingPayload<M>>absent()))
            : // first iteration will not have any views or messages
            graphRDD.leftOuterJoin(viewIncomingRDD)) // every other iteration may have views and messages
                    // for each partition of vertices emit a view and their outgoing messages
                    .mapPartitionsToPair(partitionIterator -> {
                        HadoopPools.initialize(apacheConfiguration);
                        final VertexProgram<M> workerVertexProgram = VertexProgram
                                .<VertexProgram<M>>createVertexProgram(HadoopGraph.open(apacheConfiguration),
                                        apacheConfiguration); // each partition(Spark)/worker(TP3) has a local copy of the vertex program (a worker's task)
                        final Set<String> elementComputeKeys = workerVertexProgram.getElementComputeKeys(); // the compute keys as a set
                        final String[] elementComputeKeysArray = elementComputeKeys.size() == 0 ? EMPTY_ARRAY
                                : elementComputeKeys.toArray(new String[elementComputeKeys.size()]); // the compute keys as an array
                        final SparkMessenger<M> messenger = new SparkMessenger<>();
                        workerVertexProgram.workerIterationStart(memory.asImmutable()); // start the worker
                        return () -> IteratorUtils.map(partitionIterator, vertexViewIncoming -> {
                            final StarGraph.StarVertex vertex = vertexViewIncoming._2()._1().get(); // get the vertex from the vertex writable
                            synchronized (vertex) {
                                // drop any computed properties that are cached in memory
                                if (elementComputeKeysArray.length > 0) {
                                    vertex.dropVertexProperties(elementComputeKeysArray);
                                }
                                final boolean hasViewAndMessages = vertexViewIncoming._2()._2().isPresent(); // if this is the first iteration, then there are no views or messages
                                final List<DetachedVertexProperty<Object>> previousView = hasViewAndMessages
                                        ? vertexViewIncoming._2()._2().get().getView()
                                        : Collections.emptyList();
                                final List<M> incomingMessages = hasViewAndMessages
                                        ? vertexViewIncoming._2()._2().get().getIncomingMessages()
                                        : Collections.emptyList();
                                previousView
                                        .forEach(property -> property.attach(Attachable.Method.create(vertex))); // attach the view to the vertex
                                // previousView.clear(); // no longer needed so kill it from memory
                                ///
                                messenger.setVertexAndIncomingMessages(vertex, incomingMessages); // set the messenger with the incoming messages
                                workerVertexProgram.execute(
                                        ComputerGraph.vertexProgram(vertex, workerVertexProgram), messenger,
                                        memory); // execute the vertex program on this vertex for this iteration
                                // incomingMessages.clear(); // no longer needed so kill it from memory
                                ///
                                final List<DetachedVertexProperty<Object>> nextView = elementComputeKeysArray.length == 0
                                        ? // not all vertex programs have compute keys
                                Collections.emptyList()
                                        : IteratorUtils.list(
                                                IteratorUtils.map(vertex.properties(elementComputeKeysArray),
                                                        property -> DetachedFactory.detach(property, true)));
                                final List<Tuple2<Object, M>> outgoingMessages = messenger
                                        .getOutgoingMessages(); // get the outgoing messages

                                // if no more vertices in the partition, end the worker's iteration
                                if (!partitionIterator.hasNext()) {
                                    workerVertexProgram.workerIterationEnd(memory.asImmutable());
                                }
                                return new Tuple2<>(vertex.id(),
                                        new ViewOutgoingPayload<>(nextView, outgoingMessages));
                            }
                        });
                    }, true)); // true means that the partition is preserved
    // the graphRDD and the viewRDD must have the same partitioner
    assert graphRDD.partitioner().get().equals(viewOutgoingRDD.partitioner().get());
    // "message pass" by reducing on the vertex object id of the view and message payloads
    final MessageCombiner<M> messageCombiner = VertexProgram
            .<VertexProgram<M>>createVertexProgram(HadoopGraph.open(apacheConfiguration), apacheConfiguration)
            .getMessageCombiner().orElse(null);
    final JavaPairRDD<Object, ViewIncomingPayload<M>> newViewIncomingRDD = viewOutgoingRDD
            .flatMapToPair(tuple -> () -> IteratorUtils.<Tuple2<Object, Payload>>concat(
                    IteratorUtils.of(new Tuple2<>(tuple._1(), tuple._2().getView())), // emit the view payload
                    IteratorUtils.map(tuple._2().getOutgoingMessages().iterator(),
                            message -> new Tuple2<>(message._1(), new MessagePayload<>(message._2()))))) // emit the outgoing message payloads one by one
            .reduceByKey(graphRDD.partitioner().get(), (a, b) -> { // reduce the view and outgoing messages into a single payload object representing the new view and incoming messages for a vertex
                if (a instanceof ViewIncomingPayload) {
                    ((ViewIncomingPayload<M>) a).mergePayload(b, messageCombiner);
                    return a;
                } else if (b instanceof ViewIncomingPayload) {
                    ((ViewIncomingPayload<M>) b).mergePayload(a, messageCombiner);
                    return b;
                } else {
                    final ViewIncomingPayload<M> c = new ViewIncomingPayload<>(messageCombiner);
                    c.mergePayload(a, messageCombiner);
                    c.mergePayload(b, messageCombiner);
                    return c;
                }
            }).filter(payload -> !(payload._2() instanceof MessagePayload)) // this happens if there is a message to a vertex that does not exist
            .filter(payload -> !((payload._2() instanceof ViewIncomingPayload)
                    && !((ViewIncomingPayload<M>) payload._2()).hasView())) // this happens if there are many messages to a vertex that does not exist
            .mapValues(payload -> payload instanceof ViewIncomingPayload ? (ViewIncomingPayload<M>) payload : // this happens if there is a vertex with incoming messages
                    new ViewIncomingPayload<>((ViewPayload) payload)); // this happens if there is a vertex with no incoming messages
    // the graphRDD and the viewRDD must have the same partitioner
    assert graphRDD.partitioner().get().equals(newViewIncomingRDD.partitioner().get());
    newViewIncomingRDD.foreachPartition(partitionIterator -> {
        HadoopPools.initialize(apacheConfiguration);
    }); // need to complete a task so its BSP and the memory for this iteration is updated
    return newViewIncomingRDD;
}

From source file:edu.usu.sdl.openstorefront.doc.JaxrsProcessor.java

private static void mapComplexTypes(List<APITypeModel> typeModels, Field fields[], boolean onlyConsumeField) {
    //Should strip duplicate types
    Set<String> typesInList = new HashSet<>();
    typeModels.forEach(type -> {
        typesInList.add(type.getName());
    });//www  . jav a 2  s .  co  m

    ObjectMapper objectMapper = new ObjectMapper();
    objectMapper.enable(SerializationFeature.INDENT_OUTPUT);
    objectMapper.disable(SerializationFeature.FAIL_ON_EMPTY_BEANS);
    for (Field field : fields) {
        boolean capture = true;
        if (onlyConsumeField) {
            ConsumeField consumeField = (ConsumeField) field.getAnnotation(ConsumeField.class);
            if (consumeField == null) {
                capture = false;
            }
        }

        if (capture) {

            Class fieldClass = field.getType();
            DataType dataType = (DataType) field.getAnnotation(DataType.class);
            if (dataType != null) {
                fieldClass = dataType.value();
            }

            if (ReflectionUtil.isComplexClass(fieldClass)) {

                APITypeModel typeModel = new APITypeModel();
                typeModel.setName(fieldClass.getSimpleName());

                APIDescription aPIDescription = (APIDescription) fieldClass.getAnnotation(APIDescription.class);
                if (aPIDescription != null) {
                    typeModel.setDescription(aPIDescription.value());
                }

                Set<String> fieldList = mapValueField(typeModel.getFields(), fieldClass.getDeclaredFields(),
                        onlyConsumeField);
                if (fieldClass.isEnum()) {
                    typeModel.setObject(Arrays.toString(fieldClass.getEnumConstants()));
                } else {
                    if (fieldClass.isInterface() == false) {
                        try {
                            typeModel.setObject(objectMapper.writeValueAsString(fieldClass.newInstance()));

                            String cleanUpJson = StringProcessor.stripeFieldJSON(typeModel.getObject(),
                                    fieldList);
                            typeModel.setObject(cleanUpJson);

                        } catch (InstantiationException | IllegalAccessException | JsonProcessingException ex) {
                            log.log(Level.WARNING,
                                    "Unable to process/map complex field: " + fieldClass.getSimpleName(), ex);
                            typeModel.setObject("{ Unable to view }");
                        }
                        mapComplexTypes(typeModels, fieldClass.getDeclaredFields(), onlyConsumeField);
                    }
                }
                typeModels.add(typeModel);
                typesInList.add(typeModel.getName());
            }

        }
    }
}

From source file:com.evolveum.midpoint.schema.util.WfContextUtil.java

private static void collectNotifyBefore(List<Pair<Duration, AbstractWorkItemActionType>> rv,
        List<Duration> beforeTimes, AbstractWorkItemActionType action) {
    beforeTimes.forEach(beforeTime -> rv.add(new ImmutablePair<>(beforeTime, action)));
}

From source file:delfos.rs.trustbased.WeightedGraph.java

private static void validateWeightsGraph(AdjMatrixEdgeWeightedDigraph adjMatrixEdgeWeightedDigraph) {

    List<DirectedEdge> allEdges = IntStream.range(0, adjMatrixEdgeWeightedDigraph.V()).boxed().parallel()
            .map(vertex -> {/*from  w  w  w .  jav  a  2 s . c  o m*/
                Iterable<DirectedEdge> iterator = adjMatrixEdgeWeightedDigraph.adj(vertex);
                ArrayList<DirectedEdge> listOfEdges = new ArrayList<>();
                for (DirectedEdge edge : iterator) {
                    listOfEdges.add(edge);
                }
                return listOfEdges;
            }).flatMap(listOfEdges -> listOfEdges.parallelStream()).collect(Collectors.toList());

    List<DirectedEdge> badEdges = allEdges.parallelStream()
            .filter(edge -> (edge.weight() < 0) || (edge.weight() > 1)).collect(Collectors.toList());

    if (!badEdges.isEmpty()) {
        System.out.println("List of bad edges:");
        badEdges.forEach(edge -> System.out.println("\t" + edge));
        throw new IllegalStateException("arg");
    }

}

From source file:com.wrmsr.search.dsl.util.DerivedSuppliers.java

public static <T> Class<? extends Supplier<T>> compile(java.lang.reflect.Method target,
        ClassLoader parentClassLoader) throws ReflectiveOperationException {
    checkArgument((target.getModifiers() & STATIC.getModifier()) > 0);
    List<TargetParameter> targetParameters = IntStream.range(0, target.getParameterCount()).boxed()
            .map(i -> new TargetParameter(target, i)).collect(toImmutableList());

    java.lang.reflect.Type targetReturnType = target.getGenericReturnType();
    java.lang.reflect.Type suppliedType = boxType(targetReturnType);
    checkArgument(suppliedType instanceof Class);

    ClassDefinition classDefinition = new ClassDefinition(a(PUBLIC, FINAL),
            CompilerUtils.makeClassName(
                    "DerivedSupplier__" + target.getDeclaringClass().getName() + "__" + target.getName()),
            type(Object.class), type(Supplier.class, fromReflectType(suppliedType)));

    targetParameters.forEach(p -> classDefinition.addField(a(PRIVATE, FINAL), p.name,
            type(Supplier.class, p.parameterizedBoxedType)));
    Map<String, FieldDefinition> classFieldDefinitionMap = classDefinition.getFields().stream()
            .collect(toImmutableMap(f -> f.getName(), f -> f));

    compileConstructor(classDefinition, classFieldDefinitionMap, targetParameters);
    compileGetter(classDefinition, classFieldDefinitionMap, target, targetParameters);

    Class clazz = defineClass(classDefinition, Object.class, ImmutableMap.of(),
            new DynamicClassLoader(parentClassLoader));
    return clazz;
}