List of usage examples for java.util Set forEach
default void forEach(Consumer<? super T> action)
From source file:com.ggvaidya.scinames.model.Project.java
/** * Returns a list of dataset rows across all datasets for a particular name. * //from www.j ava 2s.com * Note that while the API signature allows for duplicate rows to be summarized, this hasn't * yet been implemented: we'll return a unique dataset row from multiple datasets even where * the data in those rows is identical. * * @param n The name to identify across all datasets. * @return A Map<DatasetRow, Set<Dataset>> indicating rows to be returned. */ public Map<DatasetRow, Set<Dataset>> getRowsForName(Name n) { Map<Dataset, Set<DatasetRow>> timepointsPerRow = getDatasets().stream() .collect(Collectors.toMap(tp -> tp, tp -> tp.getRowsByName(n))); final Map<DatasetRow, Set<Dataset>> results = new HashMap<>(); for (Dataset tp : timepointsPerRow.keySet()) { Set<DatasetRow> rowsForTP = timepointsPerRow.get(tp); rowsForTP.forEach(r -> { if (!results.containsKey(r)) results.put(r, new HashSet<>()); results.get(r).add(tp); }); } return results; }
From source file:com.hortonworks.streamline.streams.security.service.SecurityCatalogResource.java
@POST @Path("/roles/{parentRoleName}/children") @Timed/*from w w w.j ava 2s . com*/ public Response addChildRoles(@PathParam("parentRoleName") String parentRoleName, Set<String> childRoleNames, @Context SecurityContext securityContext) throws Exception { SecurityUtil.checkRole(authorizer, securityContext, ROLE_SECURITY_ADMIN); Long parentId = getIdFromRoleName(parentRoleName); Set<Long> childIds = new HashSet<>(); childRoleNames.forEach(childRoleName -> { if (childRoleName.equals(parentRoleName)) { throw new IllegalArgumentException("Child role(s) contain parent role"); } childIds.add(getIdFromRoleName(childRoleName)); }); Set<RoleHierarchy> res = new HashSet<>(); childIds.forEach(childId -> res.add(catalogService.addChildRole(parentId, childId))); return WSUtils.respondEntities(res, OK); }
From source file:org.wso2.carbon.uuf.internal.deployment.AppCreator.java
private Component createComponent(DependencyNode componentNode, AppReference appReference, DependencyNode rootNode, String appContextPath, Map<String, Component> createdComponents, Bindings bindings, I18nResources i18nResources) { final String componentName = componentNode.getArtifactId(); final String componentVersion = componentNode.getVersion(); final String componentContextPath = (componentNode == rootNode) ? Component.ROOT_COMPONENT_CONTEXT_PATH : componentNode.getContextPath(); ComponentReference componentReference = appReference.getComponentReference(componentContextPath); ClassLoader classLoader = classLoaderProvider.getClassLoader(componentName, componentVersion, componentReference);//from w ww.j ava 2 s . c om // Dependency components. final Set<Component> dependencies = componentNode.getDependencies().stream() .map(dependencyNode -> createdComponents.get(dependencyNode.getArtifactId())).collect(toSet()); // Create layouts in the component. final Set<Layout> layouts = componentReference.getLayouts(supportedExtensions) .map(layoutReference -> createLayout(layoutReference, componentName)).collect(toSet()); // Create pages in the component. final Set<Fragment> fragments = componentReference.getFragments(supportedExtensions) .map(fragmentReference -> createFragment(fragmentReference, componentName, classLoader)) .collect(toSet()); // Create pages in the component. Map<String, Layout> availableLayouts = new HashMap<>(); layouts.forEach(layout -> availableLayouts.put(layout.getName(), layout)); dependencies.forEach(cmp -> cmp.getLayouts().forEach(l -> availableLayouts.put(l.getName(), l))); final SortedSet<Page> pages = componentReference.getPages(supportedExtensions) .map(pageReference -> createPage(pageReference, classLoader, availableLayouts, componentName)) .collect(toCollection(TreeSet::new)); // Handle component's configurations. ComponentConfig componentConfig = YamlFileParser.parse(componentReference.getConfiguration(), ComponentConfig.class); addBindings(componentConfig.getBindings(), bindings, componentName, fragments, dependencies); addAPIs(componentConfig.getApis(), appContextPath, componentContextPath, componentName, classLoader); addI18nResources(componentReference.getI18nFiles(), i18nResources); return new Component(componentName, componentVersion, componentContextPath, pages, fragments, layouts, dependencies, componentReference.getPath()); }
From source file:org.silverpeas.core.webapi.calendar.CalendarResource.java
/** * Converts the given participation occurrences into a list of {@link * ParticipantCalendarEventOccurrencesEntity} entity. * @param users the users./* w w w . jav a 2 s . co m*/ * @param occurrences occurrence by users. * @return a list of {@link ParticipantCalendarEventOccurrencesEntity} WEB entity. */ private List<ParticipantCalendarEventOccurrencesEntity> asParticipantOccurrencesEntities(final Set<User> users, final Map<String, List<CalendarEventOccurrence>> occurrences) { List<ParticipantCalendarEventOccurrencesEntity> webEntities = new ArrayList<>(); users.forEach(user -> webEntities .add(ParticipantCalendarEventOccurrencesEntity.from(user).withOccurrences(asOccurrenceWebEntities( Optional.ofNullable(occurrences.get(user.getId())).orElse(Collections.emptyList()))))); return webEntities; }
From source file:org.nanoframework.concurrent.scheduler.SchedulerFactory.java
public void rebalance(final String groupName) { Assert.hasLength(groupName);//w ww . ja va 2 s . c om final Set<BaseScheduler> groupScheduler = group.get(groupName); if (!CollectionUtils.isEmpty(groupScheduler)) { final AtomicInteger idx = new AtomicInteger(0); groupScheduler.forEach(scheduler -> { scheduler.getConfig().setNum(idx.getAndIncrement()); scheduler.getConfig().setTotal(groupScheduler.size()); }); } }
From source file:com.yahoo.pulsar.broker.service.BrokerService.java
/** * It unloads all owned namespacebundles gracefully. * <ul>/* w w w . jav a 2 s .c o m*/ * <li>First it makes current broker unavailable and isolates from the clusters so, it will not serve any new * requests.</li> * <li>Second it starts unloading namespace bundle one by one without closing the connection in order to avoid * disruption for other namespacebundles which are sharing the same connection from the same client.</li> * <ul> * */ public void unloadNamespaceBundlesGracefully() { try { // make broker-node unavailable from the cluster if (pulsar.getLoadManager() != null) { pulsar.getLoadManager().disableBroker(); } // unload all namespace-bundles gracefully long closeTopicsStartTime = System.nanoTime(); Set<NamespaceBundle> serviceUnits = pulsar.getNamespaceService().getOwnedServiceUnits(); serviceUnits.forEach(su -> { if (su instanceof NamespaceBundle) { try { pulsar.getNamespaceService().unloadNamespaceBundle((NamespaceBundle) su); } catch (Exception e) { log.warn("Failed to unload namespace bundle {}", su, e); } } }); double closeTopicsTimeSeconds = TimeUnit.NANOSECONDS .toMillis((System.nanoTime() - closeTopicsStartTime)) / 1000.0; log.info("Unloading {} namespace-bundles completed in {} seconds", serviceUnits.size(), closeTopicsTimeSeconds); } catch (Exception e) { log.error("Failed to disable broker from loadbalancer list {}", e.getMessage(), e); } }
From source file:software.reinvent.dependency.parser.service.CsvWriter.java
/** * Creates the csv files//from ww w .j a v a 2 s. com * <ul> * <li>Internal_{date}.csv</li> * <li>External_{date}.csv</li> * <li>Artifacts_{date}.csv</li> * </ul> * with all important information's about the {@link Artifact}s and their {@link ArtifactDependency}'s. * * @param internalGroupId the internal maven group id * @param resultDir the dir where the CSV files will be written * @param prefix any optional prefix for the CSV files * * @throws IOException */ public void writeDependencies(final String internalGroupId, final File resultDir, final String prefix) throws IOException { final Set<ArtifactDependency> allDependencies = artifacts.stream().map(Artifact::getDependencies) .flatMap(Collection::stream).collect(Collectors.toSet()); final Set<ArtifactDependency> internalDependencies = allDependencies.stream() .filter(isInternalPredicate(internalGroupId)) .sorted(Comparator.comparing(ArtifactDependency::getGroupId)).collect(toSet()); final Set<ArtifactDependency> externalDependencies = Sets .newHashSet(CollectionUtils.subtract(allDependencies, internalDependencies)); final Multimap<ArtifactDependency, Artifact> dependencyToArtifact = HashMultimap.create(); allDependencies.forEach( dependency -> artifacts.stream().filter(artifact -> artifact.getDependencies().contains(dependency)) .forEach(x -> dependencyToArtifact.put(dependency, x))); CSVWriter internalWriter = null; CSVWriter externalWriter = null; CSVWriter artifactWriter = null; try { resultDir.mkdirs(); final File internalResultFile = new File(resultDir, prefix + "Internal_" + LocalDate.now().toString() + ".csv"); final File externalResultFile = new File(resultDir, prefix + "External_" + LocalDate.now().toString() + ".csv"); final File artifactResultFile = new File(resultDir, prefix + "Artifacts_" + LocalDate.now().toString() + ".csv"); logger.info("Will write results to {} and {}.", internalResultFile, externalResultFile); internalWriter = new CSVWriter(new FileWriter(internalResultFile), separator); writeDependencyHeader(internalWriter); externalWriter = new CSVWriter(new FileWriter(externalResultFile), separator); writeDependencyHeader(externalWriter); artifactWriter = new CSVWriter(new FileWriter(artifactResultFile), separator); artifactWriter .writeNext(("groupId#artifactId#version#package#internalDependencies" + "#externalDependencies") .split("#")); final CSVWriter finalInternalWriter = internalWriter; final CSVWriter finalExternalWriter = externalWriter; dependencyToArtifact.keySet().stream().sorted(Comparator.comparing(ArtifactDependency::getGroupId) .thenComparing(ArtifactDependency::getArtifactId)).forEach(dependency -> { final List<String> dependentArtifacts = dependencyToArtifact.get(dependency).stream() .map(Artifact::getArtifactId).sorted().collect(toList()); final String artifactLicenses = defaultIfBlank( Joiner.on("\n").join(dependency.getArtifactLicenses()), "n/a in pom"); final ArrayList<String> newLine = Lists.newArrayList(dependency.getGroupId(), dependency.getArtifactId(), Joiner.on("\n").join(dependency.getVersions()), artifactLicenses, dependency.getDescription(), Joiner.on("\n").join(dependentArtifacts)); final String[] csvLine = Iterables.toArray(newLine, String.class); if (isInternal(internalGroupId, dependency)) { finalInternalWriter.writeNext(csvLine); } else { finalExternalWriter.writeNext(csvLine); } }); final CSVWriter finalArtifactWriter = artifactWriter; artifacts.stream() .sorted(Comparator.comparing(Artifact::getGroupId).thenComparing(Artifact::getArtifactId)) .forEachOrdered(artifact -> { final String intDependencies = getDependencyColumn(artifact, internalDependencies, ArtifactDependency::getArtifactId); final String extDependencies = getDependencyColumn(artifact, externalDependencies, ArtifactDependency::toString); final ArrayList<String> newLine = Lists.newArrayList(artifact.getGroupId(), artifact.getArtifactId(), Joiner.on(",").join(artifact.getVersions()), defaultString(artifact.getPackaging()), intDependencies, extDependencies); final String[] csvLine = Iterables.toArray(newLine, String.class); finalArtifactWriter.writeNext(csvLine); }); } catch (IOException e) { logger.error("Could not write csv.", e); } finally { if (internalWriter != null) { internalWriter.close(); } if (externalWriter != null) { externalWriter.close(); } if (artifactWriter != null) { artifactWriter.close(); } } logger.info("Found {} dependencies. {} internal and {} external", allDependencies.size(), internalDependencies.size(), externalDependencies.size()); }
From source file:com.thinkbiganalytics.alerts.spi.defaults.DefaultAlertManager.java
protected void notifyReceivers(int count) { Set<AlertNotifyReceiver> receivers; synchronized (this.alertReceivers) { receivers = new HashSet<>(this.alertReceivers); }/* w ww . java 2s. c o m*/ receivers.forEach(a -> a.alertsAvailable(count)); }
From source file:io.atomix.protocols.gossip.map.AntiEntropyMapDelegate.java
private void handleUpdateRequests(UpdateRequest<String> request) { final Set<String> keys = request.keys(); final MemberId sender = request.sender(); final List<MemberId> peers = ImmutableList.of(sender); keys.forEach(key -> queueUpdate(new UpdateEntry(key, items.get(key)), peers)); }