List of usage examples for java.util Map forEach
default void forEach(BiConsumer<? super K, ? super V> action)
From source file:com.netflix.spinnaker.halyard.deploy.deployment.v1.DistributedDeployer.java
private <T extends Account> void reapOrcaServerGroups(AccountDeploymentDetails<T> details, SpinnakerRuntimeSettings runtimeSettings, DistributedService<Orca, T> orcaService) { Orca orca = orcaService.connectToPrimaryService(details, runtimeSettings); Map<String, ActiveExecutions> executions = orca.getActiveExecutions(); ServiceSettings orcaSettings = runtimeSettings.getServiceSettings(orcaService.getService()); RunningServiceDetails orcaDetails = orcaService.getRunningServiceDetails(details, runtimeSettings); Map<String, Integer> executionsByInstance = new HashMap<>(); executions.forEach((s, e) -> { String instanceId = s.split("@")[1]; executionsByInstance.put(instanceId, e.getCount()); });//www . j av a2 s . co m Map<Integer, Integer> executionsByServerGroupVersion = new HashMap<>(); orcaDetails.getInstances().forEach((s, is) -> { int count = is.stream().reduce(0, (c, i) -> c + executionsByInstance.getOrDefault(i.getId(), 0), (a, b) -> a + b); executionsByServerGroupVersion.put(s, count); }); // Omit the last deployed orcas from being deleted, since they are kept around for rollbacks. List<Integer> allOrcas = new ArrayList<>(executionsByServerGroupVersion.keySet()); allOrcas.sort(Integer::compareTo); int orcaCount = allOrcas.size(); if (orcaCount <= MAX_REMAINING_SERVER_GROUPS) { return; } allOrcas = allOrcas.subList(0, orcaCount - MAX_REMAINING_SERVER_GROUPS); for (Integer orcaVersion : allOrcas) { // TODO(lwander) consult clouddriver to ensure this orca isn't enabled if (executionsByServerGroupVersion.get(orcaVersion) == 0) { DaemonTaskHandler.message("Reaping old orca instance " + orcaVersion); orcaService.deleteVersion(details, orcaSettings, orcaVersion); } } }
From source file:com.hurence.logisland.connect.opc.CommonOpcSourceTask.java
@Override public void start(Map<String, String> props) { setConfigurationProperties(props);/* ww w .j a v a2 s . c o m*/ transferQueue = new LinkedTransferQueue<>(); opcOperations = new SmartOpcOperations<>(createOpcOperations()); ConnectionProfile connectionProfile = createConnectionProfile(); host = connectionProfile.getConnectionUri().getHost(); tagInfoMap = CommonUtils.parseTagsFromProperties(props).stream() .collect(Collectors.toMap(TagInfo::getTagId, Function.identity())); minWaitTime = Math.min(10, tagInfoMap.values().stream().map(TagInfo::getSamplingInterval) .mapToLong(Duration::toMillis).min().getAsLong()); opcOperations.connect(connectionProfile); if (!opcOperations.awaitConnected()) { throw new ConnectException("Unable to connect"); } //set up polling source emission pollingScheduler = Executors.newSingleThreadScheduledExecutor(); streamingThread = Executors.newSingleThreadExecutor(); Map<Duration, List<TagInfo>> pollingMap = tagInfoMap.values().stream() .filter(tagInfo -> StreamingMode.POLL.equals(tagInfo.getStreamingMode())) .collect(Collectors.groupingBy(TagInfo::getSamplingInterval)); final Map<String, OpcData> lastValues = Collections.synchronizedMap(new HashMap<>()); pollingMap.forEach((k, v) -> pollingScheduler.scheduleAtFixedRate(() -> { final Instant now = Instant.now(); v.stream().map(TagInfo::getTagId).map(lastValues::get).filter(Functions.not(Objects::isNull)) .map(data -> Pair.of(now, data)).forEach(transferQueue::add); }, 0, k.toNanos(), TimeUnit.NANOSECONDS)); //then subscribe for all final SubscriptionConfiguration subscriptionConfiguration = new SubscriptionConfiguration() .withDefaultSamplingInterval(Duration.ofMillis(10_000)); tagInfoMap.values().forEach(tagInfo -> subscriptionConfiguration .withTagSamplingIntervalForTag(tagInfo.getTagId(), tagInfo.getSamplingInterval())); running.set(true); streamingThread.submit(() -> { while (running.get()) { try { createSessionIfNeeded(); if (session == null) { return; } session.stream(subscriptionConfiguration, tagInfoMap.keySet().toArray(new String[tagInfoMap.size()])).forEach(opcData -> { if (tagInfoMap.get(opcData.getTag()).getStreamingMode() .equals(StreamingMode.SUBSCRIBE)) { transferQueue.add(Pair.of( hasServerSideSampling() ? opcData.getTimestamp() : Instant.now(), opcData)); } else { lastValues.put(opcData.getTag(), opcData); } }); } catch (Exception e) { if (running.get()) { logger.warn("Stream interrupted while reading from " + host, e); safeCloseSession(); lastValues.clear(); } } } }); }
From source file:org.talend.dataprep.schema.xls.XlsSchemaParser.java
/** * <p>//from ww w .j a v a 2s .c om * As we can try to be smart and user friendly and not those nerd devs who doesn't mind about users so we try to * guess the header size (we assume those bloody users don't have complicated headers!!) * </p> * <p> * We scan all entries to find a common header size value (i.e row line with value type change) more simple all * columns/lines with type String * </p> * * @param cellsTypeMatrix key: column number value: row where the type change from String to something else * @return The guessed header size. */ private int guessHeaderSize(Map<Integer, SortedMap<Integer, String>> cellsTypeMatrix) { SortedMap<Integer, Integer> cellTypeChange = new TreeMap<>(); cellsTypeMatrix.forEach((colId, typePerRow) -> { String firstType = null; int rowChange = 0; for (Map.Entry<Integer, String> typePerRowEntry : typePerRow.entrySet()) { if (firstType == null) { firstType = typePerRowEntry.getValue(); } else { if (!typePerRowEntry.getValue().equals(firstType) && !typePerRowEntry.getValue().equals(STRING.getName())) { rowChange = typePerRowEntry.getKey(); break; } } } cellTypeChange.put(colId, rowChange); firstType = null; rowChange = 0; }); // FIXME think more about header size calculation // currently can fail so force an header of size 1 int averageHeaderSize = 1; LOGGER.debug("averageHeaderSize (forced to): {}, cellTypeChange: {}", averageHeaderSize, cellTypeChange); return averageHeaderSize; }
From source file:com.ejisto.core.classloading.scan.ScanAction.java
private void scanGroups(Map<String, List<MockedField>> groups) { try {/*w w w . ja v a 2s. c om*/ ClassPool classPool = new ClassPool(); Path webInf = baseDirectory.resolve("WEB-INF"); classPool.appendClassPath(webInf.resolve("classes").toAbsolutePath().toString()); classPool.appendClassPath(webInf.resolve("lib").toAbsolutePath().toString() + "/*"); classPool.appendSystemPath(); ClassTransformerImpl transformer = new ClassTransformerImpl(contextPath, mockedFieldsRepository); groups.forEach((k, v) -> scanClass(v, classPool, transformer, normalize(webInf + File.separator + "classes/", true))); } catch (Exception e) { log.error("got exception: " + e.toString()); throw new ApplicationException(e); } }
From source file:com.baidu.rigel.biplatform.tesseract.meta.impl.CallbackDimensionMemberServiceImpl.java
/** * treeCallbackService TODO ????//w w w.jav a 2 s . c om */ // private static CallbackServiceImpl treeCallbackService = new CallbackServiceImpl(); @Override public List<MiniCubeMember> getMembers(Cube cube, Level level, DataSourceInfo dataSourceInfo, Member parentMember, Map<String, String> params) throws MiniCubeQueryException, MetaException { // MetaDataService.checkCube(cube); // MetaDataService.checkDataSourceInfo(dataSourceInfo); CallbackLevel callbackLevel = (CallbackLevel) level; Map<String, String> callbackParams = Maps.newHashMap(callbackLevel.getCallbackParams()); if (MapUtils.isNotEmpty(params)) { params.forEach((k, v) -> { if (callbackParams.containsKey(k)) { callbackParams.put(k, v); } }); // callbackParams.putAll(params); } CallbackResponse response = CallbackServiceInvoker.invokeCallback(callbackLevel.getCallbackUrl(), callbackParams, CallbackType.DIM); if (response.getStatus() == ResponseStatus.SUCCESS) { @SuppressWarnings("unchecked") List<CallbackDimTreeNode> posTree = (List<CallbackDimTreeNode>) response.getData(); List<MiniCubeMember> result = createMembersByPosTreeNode(posTree, level, null); if (parentMember == null) { return result; } else { return createMembersByPosTreeNode(posTree.get(0).getChildren(), level, null); } } else { log.error("[ERROR] --- --- " + response.getStatus() + "---" + response.getMessage()); // ?? throw new RuntimeException(response.getMessage()); } }
From source file:com.coverity.report.analysis.ProtecodeSCToolProcessor.java
private List<ComponentCriticalities> makeRolledUpList(Map<String, Map<Criticality, Integer>> map, final int rollupIndex) { // Transfer the data to a list List<ComponentCriticalities> pairs = new ArrayList<>(); map.forEach((k, v) -> pairs.add(new ComponentCriticalities(k, v))); // Sort by sum pairs.sort((a, b) -> b.sum() - a.sum()); // roll up the last n - rollupIndex pairs into another pair ComponentCriticalities other = new ComponentCriticalities("Other Components", new HashMap<>()); Arrays.stream(Criticality.values()).forEach(c -> other.map.put(c, 0)); for (int i = pairs.size() - 1; i >= rollupIndex; --i) { pairs.get(i).map.forEach((k, v) -> other.map.put(k, other.map.get(k) + v)); pairs.remove(i);//w w w. jav a 2 s.co m } // If other is nonempty, add it. if (other.sum() > 0) { pairs.add(other); } return pairs; }
From source file:org.snt.inmemantlr.comp.StringCodeGenPipeline.java
@SuppressWarnings("unchecked") public String getTokenVocabString() { Map<String, Integer> vocab = (Map<String, Integer>) tokenvocab.getAttribute("tokenvocab"); Map<String, Integer> lit = (Map<String, Integer>) tokenvocab.getAttribute("literals"); StringBuilder sb = new StringBuilder(); vocab.forEach((s, i) -> sb.append(s).append("=").append(i.toString()).append("\n")); lit.forEach((s, i) -> sb.append(s).append("=").append(i.toString()).append("\n")); return sb.toString(); }
From source file:org.springframework.boot.actuate.context.properties.ConfigurationPropertiesReportEndpoint.java
private ContextConfigurationProperties describeConfigurationProperties(ApplicationContext context, ObjectMapper mapper) {/* w w w. j av a 2 s. c o m*/ ConfigurationBeanFactoryMetadata beanFactoryMetadata = getBeanFactoryMetadata(context); Map<String, Object> beans = getConfigurationPropertiesBeans(context, beanFactoryMetadata); Map<String, ConfigurationPropertiesBeanDescriptor> beanDescriptors = new HashMap<>(); beans.forEach((beanName, bean) -> { String prefix = extractPrefix(context, beanFactoryMetadata, beanName); beanDescriptors.put(beanName, new ConfigurationPropertiesBeanDescriptor(prefix, sanitize(prefix, safeSerialize(mapper, bean, prefix)))); }); return new ContextConfigurationProperties(beanDescriptors, (context.getParent() != null) ? context.getParent().getId() : null); }
From source file:org.apache.gobblin.cluster.suite.IntegrationBasicSuite.java
private void copyJobConfFromResource() throws IOException { Map<String, Config> jobConfigs; try (InputStream resourceStream = this.jobConfResourceUrl.openStream()) { Reader reader = new InputStreamReader(resourceStream); Config rawJobConfig = ConfigFactory.parseReader(reader, ConfigParseOptions.defaults().setSyntax(ConfigSyntax.CONF)); jobConfigs = overrideJobConfigs(rawJobConfig); jobConfigs.forEach((jobName, jobConfig) -> { try { writeJobConf(jobName, jobConfig); } catch (IOException e) { log.error("Job " + jobName + " config cannot be written."); }/* ww w . j a v a2s. c om*/ }); } }
From source file:org.springframework.boot.actuate.endpoint.annotation.AnnotationEndpointDiscoverer.java
private Collection<DiscoveredEndpoint> mergeExposed(Map<Class<?>, DiscoveredEndpoint> endpoints, Map<Class<?>, DiscoveredExtension> extensions) { List<DiscoveredEndpoint> result = new ArrayList<>(); endpoints.forEach((endpointClass, endpoint) -> { if (endpoint.isExposed()) { DiscoveredExtension extension = extensions.remove(endpointClass); result.add(endpoint.merge(extension)); }/*from w ww . java 2s . co m*/ }); return result; }