List of usage examples for java.util Set forEach
default void forEach(Consumer<? super T> action)
From source file:org.dataconservancy.packaging.tool.integration.PackageGenerationTest.java
@Test public void custodialDomainObjectReferenceTest() throws Exception { PackageState initialState = initializer.initialize(DCS_PROFILE); OpenedPackage opened = packager.createPackage(initialState, folder.getRoot()); Path baseDir = opened.getBaseDirectory().getParentFile().getCanonicalFile().toPath(); /*//from w w w . j av a 2s . co m * Opened package re-map bah URIs to files. We need the original bag * URIs, so re-create them! */ Set<String> fileLocations = new HashSet<>(); opened.getPackageTree().walk(node -> { if (node.getFileInfo() != null && node.getFileInfo().isFile()) { try { URI bagURIForFile = UriUtility.makeBagUriString( Paths.get(node.getFileInfo().getLocation()).toFile().getCanonicalFile(), baseDir.toFile()); fileLocations.add(bagURIForFile.toString()); } catch (Exception e) { throw new RuntimeException(e); } } }); assertFalse(fileLocations.isEmpty()); Model custodialDomainObjects = custodialDomainObjects(opened); Set<URI> bagURIs = new HashSet<>(); custodialDomainObjects.listSubjects().mapWith(Resource::getURI).mapWith(URI::create) .filterKeep(UriUtility::isBagUri).forEachRemaining(bagURIs::add); custodialDomainObjects.listObjects().filterKeep(RDFNode::isURIResource).mapWith(RDFNode::asResource) .mapWith(Resource::getURI).mapWith(URI::create).filterKeep(UriUtility::isBagUri) .forEachRemaining(bagURIs::add); bagURIs.forEach(bagURI -> { fileLocations.remove(bagURI.toString()); File linkedFile = UriUtility.resolveBagUri(baseDir, bagURI).toFile(); assertTrue(linkedFile.exists()); assertTrue(linkedFile.isFile()); }); assertTrue( "Expected no file locations but found: " + fileLocations.stream().collect(Collectors.joining("\n", "\n", "")), fileLocations.isEmpty()); }
From source file:com.abixen.platform.service.businessintelligence.multivisualisation.service.impl.AbstractDatabaseService.java
private List<Map<String, DataValueWeb>> getData(Connection connection, DatabaseDataSource databaseDataSource, Set<String> chartColumnsSet, ChartConfigurationForm chartConfigurationForm, Integer limit) { ResultSet rs;/*from ww w .jav a 2 s.co m*/ List<Map<String, DataValueWeb>> data = new ArrayList<>(); try { Statement statement = connection.createStatement(); ResultSetMetaData resultSetMetaData = getDatabaseMetaData(connection, databaseDataSource.getTable()); if (chartConfigurationForm != null) { rs = statement.executeQuery(buildQueryForChartData(databaseDataSource, chartColumnsSet, resultSetMetaData, chartConfigurationForm, limit)); } else { rs = statement.executeQuery( buildQueryForDataSourceData(databaseDataSource, chartColumnsSet, resultSetMetaData, limit) .toString()); } while (rs.next()) { final ResultSet row = rs; Map<String, DataValueWeb> rowMap = new HashMap<>(); chartColumnsSet.forEach(chartColumnsSetElement -> { rowMap.put(chartColumnsSetElement, getDataFromColumn(row, chartColumnsSetElement)); }); data.add(rowMap); } } catch (SQLException e) { throw new DataParsingException("Error when parsing data from db. " + e.getMessage()); } return data; }
From source file:org.silverpeas.core.webapi.calendar.CalendarWebManager.java
/** * Gets the next event occurrences from now. * @param componentIds identifiers of aimed component instance. * @param calendarIdsToExclude identifier of calendars which linked occurrences must be excluded * from the result.//w ww . ja v a 2s . c o m * @param usersToInclude identifiers of users which linked occurrences must be included into the * result * @param calendarIdsToInclude identifier of calendars which linked occurrences must be included * into the result. * @param zoneId the identifier of the zone. * @param limit the maximum occurrences the result must have (must be lower than 500) * @return a list of {@link CalendarEventOccurrence}. */ public Stream<CalendarEventOccurrence> getNextEventOccurrences(final List<String> componentIds, final Set<String> calendarIdsToExclude, final Set<User> usersToInclude, final Set<String> calendarIdsToInclude, final ZoneId zoneId, final Integer limit) { final User currentRequester = User.getCurrentRequester(); // load calendars final List<Calendar> calendars = componentIds.stream().flatMap(i -> getCalendarsHandledBy(i).stream()) .distinct().collect(Collectors.toList()); // includes/excludes calendarIdsToInclude.removeAll(calendarIdsToExclude); calendars.removeIf(c -> calendarIdsToExclude.contains(c.getId())); if (!calendarIdsToInclude.isEmpty()) { calendars.forEach(c -> calendarIdsToInclude.remove(c.getId())); calendarIdsToInclude.forEach(i -> { Calendar calendarToInclude = Calendar.getById(i); if (calendarToInclude.canBeAccessedBy(currentRequester)) { calendars.add(calendarToInclude); } }); } // loading occurrences final int nbOccLimit = (limit != null && limit > 0 && limit <= 500) ? limit : DEFAULT_NB_MAX_NEXT_OCC; final LocalDate startDate = zoneId != null ? LocalDateTime.now(zoneId).toLocalDate() : LocalDate.now(); final Set<CalendarEventOccurrence> occurrences = new HashSet<>(); for (int nbMonthsToAdd : getNextEventTimeWindows()) { occurrences.clear(); LocalDate endDate = startDate.plusMonths(nbMonthsToAdd); occurrences.addAll(getEventOccurrencesOf(startDate, endDate, calendars)); if (!usersToInclude.isEmpty()) { getAllEventOccurrencesByUserIds(Pair.of(componentIds, currentRequester), startDate, endDate, usersToInclude).forEach((u, o) -> occurrences.addAll(o)); } if (occurrences.size() >= nbOccLimit) { break; } } return occurrences.stream().sorted(COMPARATOR_BY_DATE_ASC).limit(nbOccLimit); }
From source file:org.apache.nifi.cluster.coordination.http.replication.ThreadPoolRequestReplicator.java
private synchronized int purgeExpiredRequests() { final Set<String> expiredRequestIds = ThreadPoolRequestReplicator.this.responseMap.entrySet().stream() .filter(entry -> entry.getValue().isOlderThan(30, TimeUnit.SECONDS)) // older than 30 seconds .filter(entry -> entry.getValue().isComplete()) // is complete .map(entry -> entry.getKey()) // get the request id .collect(Collectors.toSet()); expiredRequestIds.forEach(id -> onResponseConsumed(id)); return responseMap.size(); }
From source file:org.nanoframework.concurrent.scheduler.SchedulerFactory.java
/** * /* www . j ava2s. co m*/ * @param groupName the groupName */ public void closeGroup(final String groupName) { Assert.hasLength(groupName, "groupName must not be null"); final Set<String> ids = Sets.newHashSet(); startedScheduler.forEach((id, scheduler) -> { if (groupName.equals(scheduler.getConfig().getGroup())) { if (!scheduler.isClose()) { /** Sync to Etcd by stop method */ ETCD_SCHEDULER.stopping(scheduler.getConfig().getGroup(), scheduler.getConfig().getId(), scheduler.getAnalysis()); scheduler.setClose(true); stoppingScheduler.put(scheduler.getConfig().getId(), scheduler); ids.add(scheduler.getConfig().getId()); } } }); ids.forEach(id -> startedScheduler.remove(id)); }
From source file:org.onosproject.ecord.carrierethernet.app.CarrierEthernetManager.java
/** * Applies FC- specific LTP attributes to global LTPs or adds them to the global LTP map if not there. * * @param ltpSet set of FC-specific LTPs the attributes of which will be applied to the global LTPs *//*from ww w. j a v a2s.co m*/ private void applyFcToGlobalLtps(Set<CarrierEthernetLogicalTerminationPoint> ltpSet) { ltpSet.forEach(ltp -> { if (!(ltpMap.keySet().contains(ltp.id()))) { // Just add the LTP as it appears at the FC addGlobalLtp(ltp); } else { // Add LTP resources (BWP, CE-VLAN ID, S-TAG) to existing global LTP ltpMap.get(ltp.id()).ni().addEcNi(ltp.ni()); // Update config identifier ltpMap.get(ltp.id()).ni().setCfgId(ltp.ni().cfgId()); } }); }
From source file:com.act.biointerpretation.networkanalysis.MetabolismNetwork.java
/** * Trace the pathway back from the given startNode for up to numSteps steps, and return the subgraph of all * precursors found. This is intended to supply explanatory pathways for the input node. * * @param startNode The node to explain. * @param numSteps The number of steps back from the node to search. * @return A report representing the precursors of the given starting metabolite. *//*from w w w . j ava2 s. co m*/ public PrecursorReport getPrecursorReport(NetworkNode startNode, int numSteps) { if (numSteps <= 0) { throw new IllegalArgumentException("Precursor graph is only well-defined for numSteps > 0"); } MetabolismNetwork subgraph = new MetabolismNetwork(); Map<NetworkNode, Integer> levelMap = new HashMap<>(); Set<NetworkNode> frontier = new HashSet<>(); frontier.add(startNode); levelMap.put(startNode, 0); for (MutableInt l = new MutableInt(1); l.toInteger() <= numSteps; l.increment()) { // Get edges leading into the derivative frontier List<NetworkEdge> edges = frontier.stream().flatMap(n -> n.getInEdges().stream()) .collect(Collectors.toList()); // Add all of the nodes adjacent to the edges, and the edges themselves, to the subgraph edges.forEach(e -> this.getSubstrates(e).forEach(subgraph::addNode)); edges.forEach(e -> this.getProducts(e).forEach(subgraph::addNode)); edges.forEach(subgraph::addEdge); // Calculate new frontier, excluding already-labeled nodes to avoid cycles frontier = edges.stream().flatMap(e -> this.getSubstrates(e).stream()).collect(Collectors.toSet()); frontier.removeIf(levelMap::containsKey); // Label remaining nodes with appropriate level. frontier.forEach(n -> levelMap.put(n, l.toInteger())); } return new PrecursorReport(startNode.getMetabolite(), subgraph, levelMap); }
From source file:sx.blah.discord.api.internal.DispatchHandler.java
private void ready(ReadyResponse ready) { Discord4J.LOGGER.info(LogMarkers.WEBSOCKET, "Connected to Discord Gateway v{}. Receiving {} guilds.", ready.v, ready.guilds.length); ws.state = DiscordWS.State.READY; ws.hasReceivedReady = true; // Websocket received actual ready event if (client.ourUser == null) client.ourUser = DiscordUtils.getUserFromJSON(shard, ready.user); client.getDispatcher().dispatch(new LoginEvent(shard)); new RequestBuilder(client).setAsync(true).doAction(() -> { ws.sessionId = ready.session_id; Set<UnavailableGuildObject> waitingGuilds = ConcurrentHashMap.newKeySet(ready.guilds.length); waitingGuilds.addAll(Arrays.asList(ready.guilds)); final AtomicInteger loadedGuilds = new AtomicInteger(0); client.getDispatcher().waitFor((GuildCreateEvent e) -> { waitingGuilds.removeIf(g -> g.id.equals(e.getGuild().getStringID())); return loadedGuilds.incrementAndGet() >= ready.guilds.length; }, (long) Math.ceil(Math.sqrt(2 * ready.guilds.length)), TimeUnit.SECONDS); waitingGuilds.forEach(guild -> client.getDispatcher() .dispatch(new GuildUnavailableEvent(Long.parseUnsignedLong(guild.id)))); return true; }).andThen(() -> {/*w w w . j ava2 s. co m*/ if (this.shard.getInfo()[0] == 0) { // pms are only sent to shard 0 for (ChannelObject pmObj : ready.private_channels) { IPrivateChannel pm = (IPrivateChannel) DiscordUtils.getChannelFromJSON(shard, null, pmObj); shard.privateChannels.put(pm); } } ws.isReady = true; client.getDispatcher().dispatch(new ShardReadyEvent(shard)); // All information for this shard has been received return true; }).execute(); }
From source file:ai.grakn.graql.internal.reasoner.AtomicTest.java
@Test public void testAlphaEquivalence_DifferentRelationInequivalentVariants() { GraknGraph graph = unificationTestSet.graph(); HashSet<String> patternStrings = Sets.newHashSet("{$x isa relation1;}", "{($y) isa relation1;}", "{($x, $y);}", "{($x, $y) isa relation1;}", "{(role1: $x, role2: $y) isa relation1;}", "{(role: $y, role2: $z) isa relation1;}", "{$x ($y, $z) isa relation1;}", "{$x (role1: $y, role2: $z) isa relation1;}"); Set<Atom> atoms = patternStrings.stream().map(s -> conjunction(s, graph)) .map(p -> ReasonerQueries.atomic(p, graph).getAtom()).collect(toSet()); atoms.forEach(at -> { atoms.stream().filter(a -> a != at).forEach(a -> atomicEquivalence(a, at, false)); });//from www . j av a 2 s . c om }
From source file:com.okta.swagger.codegen.AbstractOktaJavaClientCodegen.java
@Override public Map<String, Object> postProcessOperations(Map<String, Object> objs) { Map<String, Object> resultMap = super.postProcessOperations(objs); List<Map<String, String>> imports = (List<Map<String, String>>) objs.get("imports"); Map<String, Object> operations = (Map<String, Object>) objs.get("operations"); List<CodegenOperation> codegenOperations = (List<CodegenOperation>) operations.get("operation"); // find all of the list return values Set<String> importsToAdd = new HashSet<>(); codegenOperations.stream().filter(cgOp -> cgOp.returnType != null) .filter(cgOp -> cgOp.returnType.matches(".+List$")) .forEach(cgOp -> importsToAdd.add(toModelImport(cgOp.returnType))); // the params might have imports too codegenOperations.stream().filter(cgOp -> cgOp.allParams != null) .forEach(cgOp -> cgOp.allParams.stream().filter(cgParam -> cgParam.isEnum) .forEach(cgParam -> importsToAdd.add(toModelImport(cgParam.dataType)))); // add each one as an import importsToAdd.forEach(className -> { Map<String, String> listImport = new LinkedHashMap<>(); listImport.put("import", className); imports.add(listImport);/*w ww . ja v a2s . co m*/ }); return resultMap; }