Example usage for java.util Collection forEach

List of usage examples for java.util Collection forEach

Introduction

In this page you can find the example usage for java.util Collection forEach.

Prototype

default void forEach(Consumer<? super T> action) 

Source Link

Document

Performs the given action for each element of the Iterable until all elements have been processed or the action throws an exception.

Usage

From source file:com.vmware.photon.controller.deployer.xenon.workflow.BatchCreateManagementWorkflowService.java

private void failTask(Collection<Throwable> failures) {
    failures.forEach(e -> ServiceUtils.logSevere(this, e));
    TaskUtils.sendSelfPatch(this, buildPatch(TaskState.TaskStage.FAILED, null, failures.iterator().next()));
}

From source file:com.orange.ngsi2.server.Ngsi2BaseController.java

private void validateSyntax(Collection<String> strings) {
    if (strings != null) {
        strings.forEach(this::validateSyntax);
    }/* w  w w  . j av a  2s  .  c om*/
}

From source file:com.github.viktornar.task.PrintTask.java

private void executeCommand(Atlas atlas) {
    createAtlasFolder(atlas.getAtlasFolder());

    Collection<Future<?>> futures = new LinkedList<>();
    IntStream.range(1, atlas.getRows() + 1).forEachOrdered(row -> {
        IntStream.range(1, atlas.getColumns() + 1).forEachOrdered(column -> {
            futures.add(executorService.submit(() -> {
                final Process process;
                try {
                    logger.info(format(" [x] Start printing job [row:'%s', column: '%s'] : '%s'", row, column,
                            atlas.toString()));
                    Extent pageExtent = getExtentOfPage(atlas, column, row);
                    Atlas atlasPage = new Atlas();
                    atlasPage.copyBean(atlas);
                    atlasPage.setExtent(pageExtent);
                    logger.info(format(" [x] Printing job command: '%s'", getCommand(atlasPage, row, column)));
                    process = Runtime.getRuntime().exec(getCommand(atlasPage, row, column));
                    process.waitFor();/*  ww w  .ja v a 2s.c o m*/
                } catch (Exception e) {
                    e.printStackTrace();
                }
            }));
        });
    });

    futures.forEach(future -> {
        try {
            future.get();
            Atlas _atlas = repository.getAtlasById(atlas.getId());
            _atlas.setProgress(_atlas.getProgress() + 1);
            repository.updateAtlas(_atlas);
            logger.info(format(" [x] Finished printing job: '%s'", atlas.toString()));
            Thread.sleep(1000); // Sleep thread for 1 s for printing progress successful update
        } catch (InterruptedException | ExecutionException e) {
            logger.error(format(" [x] Error on printing job: '%s'", atlas.toString()));
            throw new RuntimeException(e);
        }
    });

    mergePages(atlas.getAtlasFolder(), format("%s.pdf", atlas.getAtlasName()));
}

From source file:org.eclipse.hawkbit.mgmt.rest.resource.MgmtDistributionSetResourceTest.java

@Test
@Description("Ensures that a DS assigned target search with controllerId==1 parameter returns only the target with the given ID.")
public void searchDistributionSetAssignedTargetsRsql() throws Exception {
    // prepare distribution set
    final Set<DistributionSet> createDistributionSetsAlphabetical = createDistributionSetsAlphabetical(1);
    final DistributionSet createdDs = createDistributionSetsAlphabetical.iterator().next();
    // prepare targets
    final Collection<String> knownTargetIds = Arrays.asList("1", "2", "3", "4", "5");

    knownTargetIds.forEach(controllerId -> targetManagement
            .create(entityFactory.target().create().controllerId(controllerId)));

    // assign already one target to DS
    assignDistributionSet(createdDs.getId(), knownTargetIds.iterator().next());

    final String rsqlFindTargetId1 = "controllerId==1";

    mvc.perform(get(MgmtRestConstants.DISTRIBUTIONSET_V1_REQUEST_MAPPING + "/" + createdDs.getId()
            + "/assignedTargets?q=" + rsqlFindTargetId1).contentType(MediaType.APPLICATION_JSON))
            .andExpect(status().isOk()).andExpect(jsonPath("total", equalTo(1)))
            .andExpect(jsonPath("size", equalTo(1)))
            .andExpect(jsonPath("content[0].controllerId", equalTo("1")));
}

From source file:org.apache.bookkeeper.mledger.impl.EntryCacheImpl.java

@SuppressWarnings({ "unchecked", "rawtypes" })
private void asyncReadEntry0(ReadHandle lh, long firstEntry, long lastEntry, boolean isSlowestReader,
        final ReadEntriesCallback callback, Object ctx) {
    final long ledgerId = lh.getId();
    final int entriesToRead = (int) (lastEntry - firstEntry) + 1;
    final PositionImpl firstPosition = PositionImpl.get(lh.getId(), firstEntry);
    final PositionImpl lastPosition = PositionImpl.get(lh.getId(), lastEntry);

    if (log.isDebugEnabled()) {
        log.debug("[{}] Reading entries range ledger {}: {} to {}", ml.getName(), ledgerId, firstEntry,
                lastEntry);//from  w  ww .j  a  v  a  2s . co  m
    }

    Collection<EntryImpl> cachedEntries = entries.getRange(firstPosition, lastPosition);

    if (cachedEntries.size() == entriesToRead) {
        long totalCachedSize = 0;
        final List<EntryImpl> entriesToReturn = Lists.newArrayListWithExpectedSize(entriesToRead);

        // All entries found in cache
        for (EntryImpl entry : cachedEntries) {
            entriesToReturn.add(EntryImpl.create(entry));
            totalCachedSize += entry.getLength();
            entry.release();
        }

        manager.mlFactoryMBean.recordCacheHits(entriesToReturn.size(), totalCachedSize);
        if (log.isDebugEnabled()) {
            log.debug("[{}] Ledger {} -- Found in cache entries: {}-{}", ml.getName(), ledgerId, firstEntry,
                    lastEntry);
        }

        callback.readEntriesComplete((List) entriesToReturn, ctx);

    } else {
        if (!cachedEntries.isEmpty()) {
            cachedEntries.forEach(entry -> entry.release());
        }

        // Read all the entries from bookkeeper
        lh.readAsync(firstEntry, lastEntry).whenCompleteAsync((ledgerEntries, exception) -> {
            if (exception != null) {
                if (exception instanceof BKException
                        && ((BKException) exception).getCode() == BKException.Code.TooManyRequestsException) {
                    callback.readEntriesFailed(createManagedLedgerException(exception), ctx);
                } else {
                    ml.invalidateLedgerHandle(lh, exception);
                    ManagedLedgerException mlException = createManagedLedgerException(exception);
                    callback.readEntriesFailed(mlException, ctx);
                }
                return;
            }

            checkNotNull(ml.getName());
            checkNotNull(ml.getExecutor());

            try {
                // We got the entries, we need to transform them to a List<> type
                long totalSize = 0;
                final List<EntryImpl> entriesToReturn = Lists.newArrayListWithExpectedSize(entriesToRead);
                for (LedgerEntry e : ledgerEntries) {
                    EntryImpl entry = EntryImpl.create(e);

                    entriesToReturn.add(entry);
                    totalSize += entry.getLength();
                }

                manager.mlFactoryMBean.recordCacheMiss(entriesToReturn.size(), totalSize);
                ml.getMBean().addReadEntriesSample(entriesToReturn.size(), totalSize);

                callback.readEntriesComplete((List) entriesToReturn, ctx);
            } finally {
                ledgerEntries.close();
            }
        }, ml.getExecutor().chooseThread(ml.getName()));
    }
}

From source file:org.onosproject.segmentrouting.grouphandler.DefaultGroupHandler.java

/**
 * Creates a single broadcast group from a given vlan id and list of ports.
 *
 * @param vlanId vlan id//from  w  w  w  .  j a v  a  2s. com
 * @param ports list of ports in the subnet
 */
public void createBcastGroupFromVlan(VlanId vlanId, Collection<PortNumber> ports) {
    VlanNextObjectiveStoreKey key = new VlanNextObjectiveStoreKey(deviceId, vlanId);

    if (vlanNextObjStore.containsKey(key)) {
        log.debug("Broadcast group for device {} and subnet {} exists", deviceId, vlanId);
        return;
    }

    TrafficSelector metadata = DefaultTrafficSelector.builder().matchVlanId(vlanId).build();

    int nextId = flowObjectiveService.allocateNextId();

    NextObjective.Builder nextObjBuilder = DefaultNextObjective.builder().withId(nextId)
            .withType(NextObjective.Type.BROADCAST).fromApp(appId).withMeta(metadata);

    ports.forEach(port -> {
        TrafficTreatment.Builder tBuilder = DefaultTrafficTreatment.builder();
        if (toPopVlan(port, vlanId)) {
            tBuilder.popVlan();
        }
        tBuilder.setOutput(port);
        nextObjBuilder.addTreatment(tBuilder.build());
    });

    ObjectiveContext context = new DefaultObjectiveContext(
            (objective) -> log.debug("createBroadcastGroupFromVlan installed " + "NextObj {} on {}", nextId,
                    deviceId),
            (objective, error) -> log.warn(
                    "createBroadcastGroupFromVlan failed to install" + " NextObj {} on {}: {}", nextId,
                    deviceId, error));
    NextObjective nextObj = nextObjBuilder.add(context);
    flowObjectiveService.next(deviceId, nextObj);
    log.debug("createBcastGroupFromVlan: Submited next objective {} in device {}", nextId, deviceId);

    vlanNextObjStore.put(key, nextId);
}

From source file:com.evolveum.midpoint.testing.story.TestStrings.java

private Map<String, Message> sortByRecipientsSingle(Collection<Message> messages) {
    Map<String, Message> rv = new HashMap<>();
    messages.forEach(m -> m.getTo().forEach(to -> rv.put(to, m)));
    return rv;//  w  w  w . ja va2  s .co  m
}

From source file:org.apache.samza.execution.JobNodeConfigurationGenerator.java

/**
 * Serializes the {@link Serde} instances for operators, adds them to the provided config, and
 * sets the serde configuration for the input/output/intermediate streams appropriately.
 *
 * We try to preserve the number of Serde instances before and after serialization. However we don't
 * guarantee that references shared between these serdes instances (e.g. an Jackson ObjectMapper shared
 * between two json serdes) are shared after deserialization too.
 *
 * Ideally all the user defined objects in the application should be serialized and de-serialized in one pass
 * from the same output/input stream so that we can maintain reference sharing relationships.
 *
 * @param configs the configs to add serialized serde instances and stream serde configs to
 *//*w  w w .  jav  a 2s  .co  m*/
private void configureSerdes(Map<String, String> configs, Map<String, StreamEdge> inEdges,
        Map<String, StreamEdge> outEdges, List<StoreDescriptor> stores, Collection<String> tables,
        JobNode jobNode) {
    // collect all key and msg serde instances for streams
    Map<String, Serde> streamKeySerdes = new HashMap<>();
    Map<String, Serde> streamMsgSerdes = new HashMap<>();
    inEdges.keySet().forEach(streamId -> addSerdes(jobNode.getInputSerdes(streamId), streamId, streamKeySerdes,
            streamMsgSerdes));
    outEdges.keySet().forEach(streamId -> addSerdes(jobNode.getOutputSerde(streamId), streamId, streamKeySerdes,
            streamMsgSerdes));

    Map<String, Serde> storeKeySerdes = new HashMap<>();
    Map<String, Serde> storeMsgSerdes = new HashMap<>();
    stores.forEach(storeDescriptor -> {
        storeKeySerdes.put(storeDescriptor.getStoreName(), storeDescriptor.getKeySerde());
        storeMsgSerdes.put(storeDescriptor.getStoreName(), storeDescriptor.getMsgSerde());
    });

    Map<String, Serde> tableKeySerdes = new HashMap<>();
    Map<String, Serde> tableMsgSerdes = new HashMap<>();
    tables.forEach(tableId -> {
        addSerdes(jobNode.getTableSerdes(tableId), tableId, tableKeySerdes, tableMsgSerdes);
    });

    // for each unique stream or store serde instance, generate a unique name and serialize to config
    HashSet<Serde> serdes = new HashSet<>(streamKeySerdes.values());
    serdes.addAll(streamMsgSerdes.values());
    serdes.addAll(storeKeySerdes.values());
    serdes.addAll(storeMsgSerdes.values());
    serdes.addAll(tableKeySerdes.values());
    serdes.addAll(tableMsgSerdes.values());
    SerializableSerde<Serde> serializableSerde = new SerializableSerde<>();
    Base64.Encoder base64Encoder = Base64.getEncoder();
    Map<Serde, String> serdeUUIDs = new HashMap<>();
    serdes.forEach(serde -> {
        String serdeName = serdeUUIDs.computeIfAbsent(serde,
                s -> serde.getClass().getSimpleName() + "-" + UUID.randomUUID().toString());
        configs.putIfAbsent(String.format(SerializerConfig.SERDE_SERIALIZED_INSTANCE(), serdeName),
                base64Encoder.encodeToString(serializableSerde.toBytes(serde)));
    });

    // set key and msg serdes for streams to the serde names generated above
    streamKeySerdes.forEach((streamId, serde) -> {
        String streamIdPrefix = String.format(StreamConfig.STREAM_ID_PREFIX(), streamId);
        String keySerdeConfigKey = streamIdPrefix + StreamConfig.KEY_SERDE();
        configs.put(keySerdeConfigKey, serdeUUIDs.get(serde));
    });

    streamMsgSerdes.forEach((streamId, serde) -> {
        String streamIdPrefix = String.format(StreamConfig.STREAM_ID_PREFIX(), streamId);
        String valueSerdeConfigKey = streamIdPrefix + StreamConfig.MSG_SERDE();
        configs.put(valueSerdeConfigKey, serdeUUIDs.get(serde));
    });

    // set key and msg serdes for stores to the serde names generated above
    storeKeySerdes.forEach((storeName, serde) -> {
        String keySerdeConfigKey = String.format(StorageConfig.KEY_SERDE, storeName);
        configs.put(keySerdeConfigKey, serdeUUIDs.get(serde));
    });

    storeMsgSerdes.forEach((storeName, serde) -> {
        String msgSerdeConfigKey = String.format(StorageConfig.MSG_SERDE, storeName);
        configs.put(msgSerdeConfigKey, serdeUUIDs.get(serde));
    });

    // set key and msg serdes for stores to the serde names generated above
    tableKeySerdes.forEach((tableId, serde) -> {
        String keySerdeConfigKey = String.format(JavaTableConfig.STORE_KEY_SERDE, tableId);
        configs.put(keySerdeConfigKey, serdeUUIDs.get(serde));
    });

    tableMsgSerdes.forEach((tableId, serde) -> {
        String valueSerdeConfigKey = String.format(JavaTableConfig.STORE_MSG_SERDE, tableId);
        configs.put(valueSerdeConfigKey, serdeUUIDs.get(serde));
    });
}

From source file:com.streamsets.pipeline.stage.cloudstorage.destination.GoogleCloudStorageTarget.java

@Override
public void write(Batch batch) throws StageException {
    String pathExpression = GcsUtil.normalizePrefix(gcsTargetConfig.commonPrefix)
            + gcsTargetConfig.partitionTemplate;
    if (gcsTargetConfig.dataFormat == DataFormat.WHOLE_FILE) {
        handleWholeFileFormat(batch, elVars);
    } else {//w  w w.  j a  v a  2 s . c o m
        Multimap<String, Record> pathToRecordMap = ELUtils.partitionBatchByExpression(partitionEval, elVars,
                pathExpression, timeDriverElEval, elVars, gcsTargetConfig.timeDriverTemplate,
                Calendar.getInstance(TimeZone.getTimeZone(ZoneId.of(gcsTargetConfig.timeZoneID))), batch);

        pathToRecordMap.keySet().forEach(path -> {
            Collection<Record> records = pathToRecordMap.get(path);
            String fileName = GcsUtil.normalizePrefix(path) + gcsTargetConfig.fileNamePrefix + '_'
                    + UUID.randomUUID();
            if (StringUtils.isNotEmpty(gcsTargetConfig.fileNameSuffix)) {
                fileName = fileName + "." + gcsTargetConfig.fileNameSuffix;
            }
            try {
                ByteArrayOutputStream bOut = new ByteArrayOutputStream();
                OutputStream os = bOut;
                if (gcsTargetConfig.compress) {
                    fileName = fileName + ".gz";
                    os = new GZIPOutputStream(bOut);
                }
                BlobId blobId = BlobId.of(gcsTargetConfig.bucketTemplate, fileName);
                BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setContentType(getContentType()).build();
                final AtomicInteger recordsWithoutErrors = new AtomicInteger(0);
                try (DataGenerator dg = gcsTargetConfig.dataGeneratorFormatConfig.getDataGeneratorFactory()
                        .getGenerator(os)) {
                    records.forEach(record -> {
                        try {
                            dg.write(record);
                            recordsWithoutErrors.incrementAndGet();
                        } catch (DataGeneratorException | IOException e) {
                            LOG.error("Error writing record {}. Reason {}", record.getHeader().getSourceId(),
                                    e);
                            getContext().toError(record, Errors.GCS_02, record.getHeader().getSourceId(), e);
                        }
                    });
                } catch (IOException e) {
                    LOG.error("Error happened when creating Output stream. Reason {}", e);
                    records.forEach(record -> getContext().toError(record, e));
                }

                try {
                    if (recordsWithoutErrors.get() > 0) {
                        Blob blob = storage.create(blobInfo, bOut.toByteArray());
                        GCSEvents.GCS_OBJECT_WRITTEN.create(getContext())
                                .with(GCSEvents.BUCKET, blob.getBucket())
                                .with(GCSEvents.OBJECT_KEY, blob.getName())
                                .with(GCSEvents.RECORD_COUNT, recordsWithoutErrors.longValue()).createAndSend();
                    }
                } catch (StorageException e) {
                    LOG.error("Error happened when writing to Output stream. Reason {}", e);
                    records.forEach(record -> getContext().toError(record, e));
                }
            } catch (IOException e) {
                LOG.error("Error happened when creating Output stream. Reason {}", e);
                records.forEach(record -> getContext().toError(record, e));
            }
        });
    }
}

From source file:com.evolveum.midpoint.testing.story.TestStrings.java

private ArrayListValuedHashMap<String, Message> sortByRecipients(Collection<Message> messages) {
    ArrayListValuedHashMap<String, Message> rv = new ArrayListValuedHashMap<>();
    messages.forEach(m -> m.getTo().forEach(to -> rv.put(to, m)));
    return rv;/*  w ww . j  ava2 s.  c  om*/
}