Example usage for java.util Map forEach

List of usage examples for java.util Map forEach

Introduction

In this page you can find the example usage for java.util Map forEach.

Prototype

default void forEach(BiConsumer<? super K, ? super V> action) 

Source Link

Document

Performs the given action for each entry in this map until all entries have been processed or the action throws an exception.

Usage

From source file:org.commonjava.indy.filer.def.migrate.PackageTypedStorageMigrationAction.java

private boolean doMigrate() throws IndyLifecycleException {
    Set<ArtifactStore> stores;
    try {//from   w  w w.ja  v  a2s  .c  o m
        stores = storeDataManager.getAllArtifactStores();
    } catch (IndyDataException e) {
        throw new IndyLifecycleException(
                "Cannot retrieve list of repositories and groups in order to review storage locations. Reason: %s",
                e, e.getMessage());
    }

    File storageRoot = config.getStorageRootDirectory();
    File nfsStorageRoot = config.getNFSStorageRootDirectory();

    int migrations = 0;
    Map<File, File> unmigratedNfs = new HashMap<>();
    for (ArtifactStore store : stores) {
        File old = deprecatedStoragePath(storageRoot, store);
        File migrated = packageTypedStoragePath(storageRoot, store);

        if (old.exists()) {
            logger.info("Attempting to migrate existing storage from old directory structure: {} "
                    + "to package-typed structure: {}", old, migrated);

            try {
                if (migrated.exists()) {
                    FileUtils.copyDirectory(old, migrated);
                    FileUtils.forceDelete(old);
                } else {
                    FileUtils.moveDirectory(old, migrated);
                }

                migrations++;
            } catch (IOException e) {
                throw new IndyLifecycleException("Failed to migrate: %s to: %s. Reason: %s", e, old, migrated);
            }
        }

        if (nfsStorageRoot != null) {
            File oldNfs = deprecatedStoragePath(nfsStorageRoot, store);
            File migratedNfs = packageTypedStoragePath(nfsStorageRoot, store);
            if (oldNfs.exists() && !migratedNfs.exists()) {
                unmigratedNfs.put(oldNfs, migratedNfs);
            }
        }
    }

    if (!unmigratedNfs.isEmpty()) {
        StringBuilder sb = new StringBuilder();
        sb.append("ERROR: Un-migrated directories detected on NFS storage!!!!");
        sb.append("\n\nThese directories still use the old <type>/<name> directory format. Indy now supports");
        sb.append(
                "\nmultiple package types, and the storage format has changed accordingly. The new format is:");
        sb.append("\n\n    <package-type>/<type>/<name>");
        sb.append("\n\nPlease migrate these NFS directories manually. For Maven repositories:");
        sb.append("\n\n    maven/<type>/<name>");
        sb.append("\n\nFor HTTProx repositories (httprox_*):");
        sb.append("\n\n    generic-http/<type>/<name>");
        sb.append("\n\nThe following directories were detected:\n");
        unmigratedNfs.forEach((o, n) -> sb.append("\n    ").append(o).append("  =>  ").append(n));
        sb.append("\n\n");

        logger.error(sb.toString());

        throw new IndyLifecycleException(
                "Un-migrated NFS directories detected. Indy cannot start until this has been resolved.");
    }

    return migrations > 0;
}

From source file:org.nanoframework.orm.jedis.sharded.RedisClientImpl.java

@Override
public Map<String, Boolean> setByNX(final Map<String, Object> map) {
    Assert.notEmpty(map);//from www .  jav a2 s .co m

    ShardedJedis jedis = null;
    try {
        jedis = POOL.getJedis(config.getRedisType());
        final ShardedJedisPipeline pipelined = jedis.pipelined();
        final Map<String, Response<Long>> responses = Maps.newHashMap();
        for (Entry<String, Object> entry : map.entrySet()) {
            responses.put(entry.getKey(), pipelined.setnx(entry.getKey(), toJSONString(entry.getValue())));
        }

        pipelined.sync();
        final Map<String, Boolean> values = Maps.newHashMap();
        responses.forEach((key, response) -> values.put(key, isSuccess(response.get())));
        return values;
    } catch (final Throwable e) {
        throw new RedisClientException(e.getMessage(), e);
    } finally {
        POOL.close(jedis);
    }
}

From source file:org.nanoframework.orm.jedis.sharded.RedisClientImpl.java

@Override
public Map<String, Boolean> hsetByNX(final String key, final Map<String, Object> map) {
    Assert.hasText(key);//from  w  w w .  ja  va2 s.  c o m
    Assert.notEmpty(map);

    ShardedJedis jedis = null;
    try {
        jedis = POOL.getJedis(config.getRedisType());
        final ShardedJedisPipeline pipeline = jedis.pipelined();
        final Map<String, Response<Long>> responses = Maps.newHashMap();
        for (Entry<String, Object> entry : map.entrySet()) {
            responses.put(entry.getKey(), pipeline.hsetnx(key, entry.getKey(), toJSONString(entry.getValue())));
        }

        pipeline.sync();
        final Map<String, Boolean> values = Maps.newHashMap();
        responses.forEach((field, response) -> values.put(field, isSuccess(response.get())));
        return values;
    } catch (final Throwable e) {
        throw new RedisClientException(e.getMessage(), e);
    } finally {
        POOL.close(jedis);
    }
}

From source file:org.openecomp.sdc.be.model.operations.impl.CapabilityOperation.java

public TitanOperationStatus getAllCapabilitiesRecusive(NodeTypeEnum nodeType, String resourceId,
        boolean recursively, Map<String, CapabilityDefinition> capabilities,
        Set<String> caseInsensitiveCapabilityNames, boolean inTransaction) {

    TitanOperationStatus findStatus;/*from  w  ww. ja v a  2s  .c  om*/

    if (recursively) {
        findStatus = findAllCapabilitiesRecursively(resourceId, capabilities, caseInsensitiveCapabilityNames);

    } else {
        findStatus = getCapabilitisOfResourceOnly(resourceId, capabilities, caseInsensitiveCapabilityNames);
    }
    if (!findStatus.equals(TitanOperationStatus.OK)) {
        return findStatus;
    }

    List<String> derivedFromList = new ArrayList<>();
    TitanOperationStatus fillResourceDerivedListFromGraph = fillResourceDerivedListFromGraph(resourceId,
            derivedFromList);
    if (!fillResourceDerivedListFromGraph.equals(TitanOperationStatus.OK)) {
        log.debug("fail to find all valid sources of capability. status = {}",
                fillResourceDerivedListFromGraph.name());
        return fillResourceDerivedListFromGraph;
    }
    capabilities.forEach((name, capability) -> capability.setCapabilitySources(derivedFromList));
    return TitanOperationStatus.OK;
}

From source file:org.apache.samza.execution.JobNodeConfigurationGenerator.java

/**
 * Serializes the {@link Serde} instances for operators, adds them to the provided config, and
 * sets the serde configuration for the input/output/intermediate streams appropriately.
 *
 * We try to preserve the number of Serde instances before and after serialization. However we don't
 * guarantee that references shared between these serdes instances (e.g. an Jackson ObjectMapper shared
 * between two json serdes) are shared after deserialization too.
 *
 * Ideally all the user defined objects in the application should be serialized and de-serialized in one pass
 * from the same output/input stream so that we can maintain reference sharing relationships.
 *
 * @param configs the configs to add serialized serde instances and stream serde configs to
 *//*from  w  ww  .ja v  a 2s  .  c  o m*/
private void configureSerdes(Map<String, String> configs, Map<String, StreamEdge> inEdges,
        Map<String, StreamEdge> outEdges, List<StoreDescriptor> stores, Collection<String> tables,
        JobNode jobNode) {
    // collect all key and msg serde instances for streams
    Map<String, Serde> streamKeySerdes = new HashMap<>();
    Map<String, Serde> streamMsgSerdes = new HashMap<>();
    inEdges.keySet().forEach(streamId -> addSerdes(jobNode.getInputSerdes(streamId), streamId, streamKeySerdes,
            streamMsgSerdes));
    outEdges.keySet().forEach(streamId -> addSerdes(jobNode.getOutputSerde(streamId), streamId, streamKeySerdes,
            streamMsgSerdes));

    Map<String, Serde> storeKeySerdes = new HashMap<>();
    Map<String, Serde> storeMsgSerdes = new HashMap<>();
    stores.forEach(storeDescriptor -> {
        storeKeySerdes.put(storeDescriptor.getStoreName(), storeDescriptor.getKeySerde());
        storeMsgSerdes.put(storeDescriptor.getStoreName(), storeDescriptor.getMsgSerde());
    });

    Map<String, Serde> tableKeySerdes = new HashMap<>();
    Map<String, Serde> tableMsgSerdes = new HashMap<>();
    tables.forEach(tableId -> {
        addSerdes(jobNode.getTableSerdes(tableId), tableId, tableKeySerdes, tableMsgSerdes);
    });

    // for each unique stream or store serde instance, generate a unique name and serialize to config
    HashSet<Serde> serdes = new HashSet<>(streamKeySerdes.values());
    serdes.addAll(streamMsgSerdes.values());
    serdes.addAll(storeKeySerdes.values());
    serdes.addAll(storeMsgSerdes.values());
    serdes.addAll(tableKeySerdes.values());
    serdes.addAll(tableMsgSerdes.values());
    SerializableSerde<Serde> serializableSerde = new SerializableSerde<>();
    Base64.Encoder base64Encoder = Base64.getEncoder();
    Map<Serde, String> serdeUUIDs = new HashMap<>();
    serdes.forEach(serde -> {
        String serdeName = serdeUUIDs.computeIfAbsent(serde,
                s -> serde.getClass().getSimpleName() + "-" + UUID.randomUUID().toString());
        configs.putIfAbsent(String.format(SerializerConfig.SERDE_SERIALIZED_INSTANCE(), serdeName),
                base64Encoder.encodeToString(serializableSerde.toBytes(serde)));
    });

    // set key and msg serdes for streams to the serde names generated above
    streamKeySerdes.forEach((streamId, serde) -> {
        String streamIdPrefix = String.format(StreamConfig.STREAM_ID_PREFIX(), streamId);
        String keySerdeConfigKey = streamIdPrefix + StreamConfig.KEY_SERDE();
        configs.put(keySerdeConfigKey, serdeUUIDs.get(serde));
    });

    streamMsgSerdes.forEach((streamId, serde) -> {
        String streamIdPrefix = String.format(StreamConfig.STREAM_ID_PREFIX(), streamId);
        String valueSerdeConfigKey = streamIdPrefix + StreamConfig.MSG_SERDE();
        configs.put(valueSerdeConfigKey, serdeUUIDs.get(serde));
    });

    // set key and msg serdes for stores to the serde names generated above
    storeKeySerdes.forEach((storeName, serde) -> {
        String keySerdeConfigKey = String.format(StorageConfig.KEY_SERDE, storeName);
        configs.put(keySerdeConfigKey, serdeUUIDs.get(serde));
    });

    storeMsgSerdes.forEach((storeName, serde) -> {
        String msgSerdeConfigKey = String.format(StorageConfig.MSG_SERDE, storeName);
        configs.put(msgSerdeConfigKey, serdeUUIDs.get(serde));
    });

    // set key and msg serdes for stores to the serde names generated above
    tableKeySerdes.forEach((tableId, serde) -> {
        String keySerdeConfigKey = String.format(JavaTableConfig.STORE_KEY_SERDE, tableId);
        configs.put(keySerdeConfigKey, serdeUUIDs.get(serde));
    });

    tableMsgSerdes.forEach((tableId, serde) -> {
        String valueSerdeConfigKey = String.format(JavaTableConfig.STORE_MSG_SERDE, tableId);
        configs.put(valueSerdeConfigKey, serdeUUIDs.get(serde));
    });
}

From source file:org.apache.bookkeeper.mledger.impl.ManagedCursorImpl.java

private List<LongProperty> buildPropertiesMap(Map<String, Long> properties) {
    if (properties.isEmpty()) {
        return Collections.emptyList();
    }//w ww . j  a  v  a2s. c  om

    List<LongProperty> longProperties = Lists.newArrayList();
    properties.forEach((name, value) -> {
        LongProperty lp = LongProperty.newBuilder().setName(name).setValue(value).build();
        longProperties.add(lp);
    });

    return longProperties;
}

From source file:org.apache.nifi.processors.standard.Notify.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {

    final ComponentLog logger = getLogger();
    final PropertyValue signalIdProperty = context.getProperty(RELEASE_SIGNAL_IDENTIFIER);
    final PropertyValue counterNameProperty = context.getProperty(SIGNAL_COUNTER_NAME);
    final PropertyValue deltaProperty = context.getProperty(SIGNAL_COUNTER_DELTA);
    final String attributeCacheRegex = context.getProperty(ATTRIBUTE_CACHE_REGEX).getValue();
    final Integer bufferCount = context.getProperty(SIGNAL_BUFFER_COUNT).asInteger();

    // the cache client used to interact with the distributed cache.
    final AtomicDistributedMapCacheClient cache = context.getProperty(DISTRIBUTED_CACHE_SERVICE)
            .asControllerService(AtomicDistributedMapCacheClient.class);
    final WaitNotifyProtocol protocol = new WaitNotifyProtocol(cache);

    final Map<String, SignalBuffer> signalBuffers = new HashMap<>();

    for (int i = 0; i < bufferCount; i++) {

        final FlowFile flowFile = session.get();
        if (flowFile == null) {
            break;
        }/*from w  w w  . j av a 2  s . co m*/

        // Signal id is computed from attribute 'RELEASE_SIGNAL_IDENTIFIER' with expression language support
        final String signalId = signalIdProperty.evaluateAttributeExpressions(flowFile).getValue();

        // if the computed value is null, or empty, we transfer the flow file to failure relationship
        if (StringUtils.isBlank(signalId)) {
            logger.error("FlowFile {} has no attribute for given Release Signal Identifier",
                    new Object[] { flowFile });
            session.transfer(flowFile, REL_FAILURE);
            continue;
        }

        String counterName = counterNameProperty.evaluateAttributeExpressions(flowFile).getValue();
        if (StringUtils.isEmpty(counterName)) {
            counterName = WaitNotifyProtocol.DEFAULT_COUNT_NAME;
        }

        int delta = 1;
        if (deltaProperty.isSet()) {
            final String deltaStr = deltaProperty.evaluateAttributeExpressions(flowFile).getValue();
            try {
                delta = Integer.parseInt(deltaStr);
            } catch (final NumberFormatException e) {
                logger.error("Failed to calculate delta for FlowFile {} due to {}",
                        new Object[] { flowFile, e }, e);
                session.transfer(flowFile, REL_FAILURE);
                continue;
            }
        }

        if (!signalBuffers.containsKey(signalId)) {
            signalBuffers.put(signalId, new SignalBuffer());
        }
        final SignalBuffer signalBuffer = signalBuffers.get(signalId);

        if (StringUtils.isNotEmpty(attributeCacheRegex)) {
            flowFile.getAttributes().entrySet().stream()
                    .filter(e -> (!e.getKey().equals("uuid") && e.getKey().matches(attributeCacheRegex)))
                    .forEach(e -> signalBuffer.attributesToCache.put(e.getKey(), e.getValue()));
        }

        signalBuffer.incrementDelta(counterName, delta);
        signalBuffer.flowFiles.add(flowFile);

        if (logger.isDebugEnabled()) {
            logger.debug("Cached release signal identifier {} counterName {} from FlowFile {}",
                    new Object[] { signalId, counterName, flowFile });
        }

    }

    signalBuffers.forEach((signalId, signalBuffer) -> {
        // In case of Exception, just throw the exception so that processor can
        // retry after yielding for a while.
        try {
            protocol.notify(signalId, signalBuffer.deltas, signalBuffer.attributesToCache);
            session.transfer(signalBuffer.flowFiles, REL_SUCCESS);
        } catch (IOException e) {
            throw new RuntimeException(
                    String.format("Unable to communicate with cache when processing %s due to %s", signalId, e),
                    e);
        }
    });
}

From source file:com.streamsets.datacollector.cluster.ShellClusterProvider.java

private List<String> generateSparkArgs(String clusterManager, String slaveMemory, String javaOpts,
        Map<String, String> extraSparkConfigs, String numExecutors, String libsTarGz, String etcTarGz,
        String resourcesTarGz, String log4jProperties, String bootstrapJar, Set<String> jarsToShip,
        String pipelineTitle, String clusterBootstrapJar, boolean secureKafka) {
    List<String> args = new ArrayList<>();
    args.add(clusterManager);// w w w  . j ava  2 s . co m
    args.add("start");
    // we only support yarn-cluster mode
    args.add("--master");
    args.add("yarn");
    args.add("--deploy-mode");
    args.add("cluster");
    args.add("--executor-memory");
    args.add(slaveMemory + "m");
    // one single sdc per executor
    args.add("--executor-cores");
    args.add("1");

    // Number of Executors based on the origin parallelism
    checkNumExecutors(numExecutors);
    args.add("--num-executors");
    args.add(numExecutors);

    // ship our stage libs and etc directory
    args.add("--archives");
    args.add(Joiner.on(",").join(libsTarGz, etcTarGz, resourcesTarGz));
    // required or else we won't be able to log on cluster
    args.add("--files");
    args.add(log4jProperties);
    args.add("--jars");
    StringBuilder libJarString = new StringBuilder(bootstrapJar);
    for (String jarToShip : jarsToShip) {
        libJarString.append(",").append(jarToShip);
    }
    args.add(libJarString.toString());

    // Add Security options
    if (getSecurityConfiguration() != null && getSecurityConfiguration().isKerberosEnabled()) {
        args.add("--keytab");
        args.add(getSecurityConfiguration().getKerberosKeytab());
        args.add("--principal");
        args.add(getSecurityConfiguration().getKerberosPrincipal());
    }

    if (secureKafka) {
        String jaasPath = System.getProperty(WebServerTask.JAVA_SECURITY_AUTH_LOGIN_CONFIG);
        String loginConf = "-Djava.security.auth.login.config";
        args.add("--conf");
        args.add(Joiner.on("=").join("spark.driver.extraJavaOptions", loginConf, jaasPath));
        javaOpts = Utils.format("{} {}={}", javaOpts, loginConf, jaasPath);
    }
    // use our javaagent and java opt configs
    args.add("--conf");
    args.add("spark.executor.extraJavaOptions="
            + Joiner.on(" ").join("-javaagent:./" + (new File(bootstrapJar)).getName(), javaOpts));
    extraSparkConfigs.forEach((k, v) -> {
        args.add("--conf");
        args.add(k + "=" + v);
    });
    // Job name in Resource Manager UI
    args.add("--name");
    args.add("StreamSets Data Collector: " + pipelineTitle);
    // main class
    args.add("--class");
    args.add("com.streamsets.pipeline.BootstrapClusterStreaming");
    args.add(clusterBootstrapJar);
    return args;
}

From source file:org.wso2.carbon.identity.application.authentication.framework.handler.request.impl.JITProvisioningPostAuthenticationHandler.java

/**
 * Call the relevant URL to add the new user.
 *
 * @param externalIdPConfig Relevant external IDP.
 * @param context           Authentication context.
 * @param localClaimValues  Local claim values.
 * @param response          HttpServlet response.
 * @param username          Relevant user name
 * @throws PostAuthenticationFailedException Post Authentication Failed Exception.
 *//*from   ww w.  j  ava2  s  .co m*/
private void redirectToAccountCreateUI(ExternalIdPConfig externalIdPConfig, AuthenticationContext context,
        Map<String, String> localClaimValues, HttpServletResponse response, String username,
        HttpServletRequest request) throws PostAuthenticationFailedException {

    try {
        URIBuilder uriBuilder;
        if (externalIdPConfig.isModifyUserNameAllowed()) {
            context.setProperty(FrameworkConstants.CHANGING_USERNAME_ALLOWED, true);
            uriBuilder = new URIBuilder(FrameworkUtils.getUserNameProvisioningUIUrl());
            uriBuilder.addParameter(FrameworkConstants.ALLOW_CHANGE_USER_NAME, String.valueOf(true));
            if (log.isDebugEnabled()) {
                log.debug(externalIdPConfig.getName() + " allow to change the username, redirecting to "
                        + "registration endpoint to provision the user: " + username);
            }
        } else {
            uriBuilder = new URIBuilder(FrameworkUtils.getPasswordProvisioningUIUrl());
            if (log.isDebugEnabled()) {
                if (externalIdPConfig.isPasswordProvisioningEnabled()) {
                    log.debug(externalIdPConfig.getName() + " supports password provisioning, redirecting to "
                            + "sign up endpoint to provision the user : " + username);
                }
            }
        }
        if (externalIdPConfig.isPasswordProvisioningEnabled()) {
            uriBuilder.addParameter(FrameworkConstants.PASSWORD_PROVISION_ENABLED, String.valueOf(true));
        }
        uriBuilder.addParameter(MultitenantConstants.TENANT_DOMAIN_HEADER_NAME, context.getTenantDomain());
        uriBuilder.addParameter(FrameworkConstants.SERVICE_PROVIDER,
                context.getSequenceConfig().getApplicationConfig().getApplicationName());
        uriBuilder.addParameter(FrameworkConstants.USERNAME, username);
        uriBuilder.addParameter(FrameworkConstants.SKIP_SIGN_UP_ENABLE_CHECK, String.valueOf(true));
        uriBuilder.addParameter(FrameworkConstants.SESSION_DATA_KEY, context.getContextIdentifier());
        addMissingClaims(uriBuilder, context);
        localClaimValues.forEach(uriBuilder::addParameter);
        response.sendRedirect(uriBuilder.build().toString());
    } catch (URISyntaxException | IOException e) {
        handleExceptions(
                String.format(ErrorMessages.ERROR_WHILE_TRYING_CALL_SIGN_UP_ENDPOINT_FOR_PASSWORD_PROVISIONING
                        .getMessage(), username, externalIdPConfig.getName()),
                ErrorMessages.ERROR_WHILE_TRYING_CALL_SIGN_UP_ENDPOINT_FOR_PASSWORD_PROVISIONING.getCode(), e);
    }
}

From source file:com.streamsets.pipeline.stage.processor.mapper.FieldMapperProcessor.java

private void transformFieldNames(Record record) throws StageException {
    final Map<Map<String, Field>, Map<String, String>> parentFieldToChildRenames = new HashMap<>();

    record.forEachField(fv -> {/*from   w  w  w.j  av a2s.c o m*/
        final String fieldPath = fv.getFieldPath();
        final String fieldName = fv.getFieldName();
        final Field field = fv.getField();
        if (checkSkipFieldAndSetContextVar(fieldPath, fieldName, field, false)) {
            return;
        }
        if (fieldMapperConfig.operateOn == OperateOn.FIELD_NAMES && fv.getParentField() != null
                && fv.getParentField().getType() == Field.Type.LIST) {
            // we are operating on field names, and the parent is a list, which means this field is an item in the list
            // don't attempt to rename this field, since it's nonsensical (the list field itself will be handled on its own
            // visit)
            return;
        }

        try {
            final String newName = mapperExpressionEval.eval(expressionVars,
                    fieldMapperConfig.mappingExpression, String.class);

            if (!StringUtils.equals(newName, fieldName)) {
                final Field parentField = fv.getParentField();
                if (parentField == null) {
                    throw new IllegalStateException(String.format(
                            "parentField is null in FieldVisitor when processing field path %s", fieldPath));
                } else {
                    Map<String, Field> parentFieldMapValue;
                    switch (parentField.getType()) {
                    case MAP:
                        parentFieldMapValue = parentField.getValueAsMap();
                        break;
                    case LIST_MAP:
                        parentFieldMapValue = parentField.getValueAsListMap();
                        break;
                    default:
                        throw new IllegalStateException(String.format(
                                "parentField is not a MAP or LIST_MAP in FieldVisitor when processing field path %s",
                                fieldPath));
                    }
                    if (!parentFieldToChildRenames.containsKey(parentFieldMapValue)) {
                        parentFieldToChildRenames.put(parentFieldMapValue, new LinkedHashMap<>());
                    }
                    parentFieldToChildRenames.get(parentFieldMapValue).put(fieldName, newName);
                }
            }
        } catch (ELEvalException e) {
            throw new RuntimeException(String.format("Failed to evaluate mapper expression %s: %s",
                    fieldMapperConfig.mappingExpression, e.getMessage()), e);
        }
    });

    parentFieldToChildRenames.forEach((fieldMap, nameMapping) -> {
        nameMapping.forEach((oldName, newName) -> {
            Field field;
            if (fieldMapperConfig.maintainOriginalPaths) {
                field = fieldMap.get(oldName);
            } else {
                field = fieldMap.remove(oldName);
            }
            fieldMap.put(newName, field);
        });
    });
}