List of usage examples for java.util Collection forEach
default void forEach(Consumer<? super T> action)
From source file:org.wso2.siddhi.extension.input.transport.kafka.KafkaSourceTestCase.java
@Test public void testAKafkaPauseAndResume() throws InterruptedException { try {//from ww w . j a v a 2 s . c o m log.info("Test to verify the pause and resume functionality of Kafka source"); String topics[] = new String[] { "kafka_topic3" }; createTopic(topics, 2); SiddhiManager siddhiManager = new SiddhiManager(); siddhiManager.setExtension("source.mapper:text", TextSourceMapper.class); ExecutionPlanRuntime executionPlanRuntime = siddhiManager .createExecutionPlanRuntime("@Plan:name('TestExecutionPlan') " + "define stream BarStream (symbol string, price float, volume long); " + "@info(name = 'query1') " + "@source(type='kafka', topic='kafka_topic3', group.id='test1', threading" + ".option='partition.wise', " + "bootstrap.servers='localhost:9092', partition.no.list='0,1', " + "@map(type='text'))" + "Define stream FooStream (symbol string, price float, volume long);" + "from FooStream select symbol, price, volume insert into BarStream;"); executionPlanRuntime.addCallback("BarStream", new StreamCallback() { @Override public void receive(Event[] events) { for (Event event : events) { System.out.println(event); eventArrived = true; count++; } } }); executionPlanRuntime.start(); Future eventSender = executorService.submit(new Runnable() { @Override public void run() { kafkaPublisher(topics, 2, 4); } }); while (!eventSender.isDone()) { Thread.sleep(1000); } Thread.sleep(2000); assertEquals(4, count); assertTrue(eventArrived); Collection<List<Source>> sources = executionPlanRuntime.getSources(); // pause the transports sources.forEach(e -> e.forEach(Source::pause)); init2(); eventSender = executorService.submit(new Runnable() { @Override public void run() { kafkaPublisher(topics, 2, 4); } }); while (!eventSender.isDone()) { Thread.sleep(1000); } Thread.sleep(5000); assertFalse(eventArrived); // resume the transports sources.forEach(e -> e.forEach(Source::resume)); Thread.sleep(2000); assertEquals(4, count); assertTrue(eventArrived); executionPlanRuntime.shutdown(); } catch (ZkTimeoutException ex) { log.warn("No zookeeper may not be available.", ex); } }
From source file:org.ligoj.app.plugin.id.ldap.dao.UserLdapRepository.java
/** * Add the user from the given groups. Cache is also updated. * * @param user// w ww.j a v a2 s. c o m * The user to add to the given groups. * @param groups * the groups to add, normalized. */ protected void addUserToGroups(final UserOrg user, final Collection<String> groups) { groups.forEach(g -> groupLdapRepository.addUser(user, g)); }
From source file:org.ligoj.app.plugin.id.ldap.dao.UserLdapRepository.java
/** * Remove the user from the given groups.Cache is also updated. * * @param user// w w w . j a v a 2s. c o m * The user to remove from the given groups. * @param groups * the groups to remove, normalized. */ protected void removeUserFromGroups(final UserOrg user, final Collection<String> groups) { groups.forEach(g -> groupLdapRepository.removeUser(user, g)); }
From source file:org.silverpeas.core.webapi.calendar.CalendarWebManager.java
/** * Gets all event occurrences associated to users and contained a the time window specified * by the start and end date times.<br> * Attendees which have answered negatively about their presence are not taken into account. * The occurrences are sorted from the lowest to the highest date and mapped by user identifiers. * @param currentUserAndComponentInstanceId the current user and the current component instance * ids from which the service is requested. * @param startDate the start date of time window. * @param endDate the end date of time window. * @param users the users to filter on./*from ww w . j a va 2s . com*/ * @return a list of entities of calendar event occurrences mapped by user identifiers. */ protected Map<String, List<CalendarEventOccurrence>> getAllEventOccurrencesByUserIds( final Pair<List<String>, User> currentUserAndComponentInstanceId, LocalDate startDate, LocalDate endDate, Collection<User> users) { // Retrieving the occurrences from personal calendars final List<Calendar> personalCalendars = new ArrayList<>(); users.forEach(u -> personalCalendars.addAll(getCalendarsHandledBy( PersonalComponentInstance.from(u, PersonalComponent.getByName("userCalendar").get()).getId()))); final List<CalendarEventOccurrence> entities = personalCalendars.isEmpty() ? emptyList() : Calendar.getTimeWindowBetween(startDate, endDate).filter(f -> f.onCalendar(personalCalendars)) .getEventOccurrences(); entities.addAll(Calendar.getTimeWindowBetween(startDate, endDate).filter(f -> f.onParticipants(users)) .getEventOccurrences()); // Getting the occurrences by users Map<String, List<CalendarEventOccurrence>> result = new CalendarEventInternalParticipationView(users) .apply(entities.stream().distinct().collect(Collectors.toList())); final String currentUserId = currentUserAndComponentInstanceId.getRight().getId(); if (result.containsKey(currentUserId)) { List<CalendarEventOccurrence> currentUserOccurrences = result.get(currentUserId); // Remove occurrence associated to given user when he is the creator currentUserOccurrences.removeIf(calendarEventOccurrence -> { CalendarEvent event = calendarEventOccurrence.getCalendarEvent(); return currentUserAndComponentInstanceId.getLeft() .contains(event.getCalendar().getComponentInstanceId()) && event.getCreator().getId().equals(currentUserId); }); } else { result.put(currentUserId, emptyList()); } return result; }
From source file:io.atomix.protocols.gossip.map.AntiEntropyMapDelegate.java
private void processUpdates(Collection<UpdateEntry> updates) { if (closed) { return;//from w w w . ja v a2s . co m } updates.forEach(update -> { final String key = update.key(); final MapValue value = update.value() == null ? null : update.value().copy(); if (value == null || value.isTombstone()) { MapValue previousValue = removeInternal(key, Optional.empty(), Optional.ofNullable(value)); if (previousValue != null && previousValue.isAlive()) { notifyListeners( new MapDelegateEvent<>(REMOVE, decodeKey(key), previousValue.get(this::decodeValue))); } } else { counter.incrementCount(); AtomicReference<byte[]> oldValue = new AtomicReference<>(); AtomicBoolean updated = new AtomicBoolean(false); items.compute(key, (k, existing) -> { if (existing == null || value.isNewerThan(existing)) { updated.set(true); oldValue.set(existing != null ? existing.get() : null); return value; } return existing; }); if (updated.get()) { if (oldValue.get() == null) { notifyListeners(new MapDelegateEvent<>(INSERT, decodeKey(key), decodeValue(value.get()))); } else { notifyListeners(new MapDelegateEvent<>(UPDATE, decodeKey(key), decodeValue(value.get()))); } } } }); }
From source file:io.atomix.protocols.gossip.map.AntiEntropyMapDelegate.java
private void queueUpdate(UpdateEntry event, Collection<MemberId> peers) { if (peers == null) { // we have no friends :( return;/*from w w w .j a v a2 s .c o m*/ } peers.forEach( node -> senderPending.computeIfAbsent(node, unusedKey -> new EventAccumulator(node)).add(event)); }
From source file:ai.grakn.test.graql.analytics.DegreeTest.java
@Test public void testDegreeIsPersisted() throws Exception { // TODO: Fix on TinkerGraphComputer assumeFalse(usingTinker());//from w w w . j av a 2 s .c om // create a simple graph RoleType pet = graph.putRoleType("pet"); RoleType owner = graph.putRoleType("owner"); RoleType breeder = graph.putRoleType("breeder"); RelationType mansBestFriend = graph.putRelationType("mans-best-friend").hasRole(pet).hasRole(owner) .hasRole(breeder); EntityType person = graph.putEntityType("person").playsRole(owner).playsRole(breeder); EntityType animal = graph.putEntityType("animal").playsRole(pet); // make one person breeder and owner Entity coco = animal.addEntity(); Entity dave = person.addEntity(); Relation daveBreedsAndOwnsCoco = mansBestFriend.addRelation().putRolePlayer(pet, coco).putRolePlayer(owner, dave); // manual degrees Map<String, Long> referenceDegrees = new HashMap<>(); referenceDegrees.put(coco.getId(), 1L); referenceDegrees.put(dave.getId(), 1L); referenceDegrees.put(daveBreedsAndOwnsCoco.getId(), 2L); graph.commit(); // compute and persist degrees graph.graql().compute().degree().persist().execute(); // check degrees are correct graph = factory.getGraph(); GraknGraph finalGraph = graph; referenceDegrees.entrySet().forEach(entry -> { Instance instance = finalGraph.getConcept(entry.getKey()); assertTrue(instance.resources().iterator().next().getValue().equals(entry.getValue())); }); // check only expected resources exist Collection<String> allConcepts = new ArrayList<>(); ResourceType<Long> rt = graph.getResourceType(AbstractComputeQuery.degree); Collection<Resource<Long>> degrees = rt.instances(); Map<Instance, Long> currentDegrees = new HashMap<>(); degrees.forEach(degree -> { Long degreeValue = degree.getValue(); degree.ownerInstances().forEach(instance -> currentDegrees.put(instance, degreeValue)); }); // check all resources exist and no more assertTrue(CollectionUtils.isEqualCollection(currentDegrees.values(), referenceDegrees.values())); // persist again and check again graph.graql().compute().degree().persist().execute(); // check only expected resources exist graph = factory.getGraph(); rt = graph.getResourceType(AbstractComputeQuery.degree); degrees = rt.instances(); degrees.forEach(i -> i.ownerInstances().iterator().forEachRemaining(r -> allConcepts.add(r.getId()))); // check degrees are correct GraknGraph finalGraph1 = graph; referenceDegrees.entrySet().forEach(entry -> { Instance instance = finalGraph1.getConcept(entry.getKey()); assertTrue(instance.resources().iterator().next().getValue().equals(entry.getValue())); }); degrees = rt.instances(); currentDegrees.clear(); degrees.forEach(degree -> { Long degreeValue = degree.getValue(); degree.ownerInstances().forEach(instance -> currentDegrees.put(instance, degreeValue)); }); // check all resources exist and no more assertTrue(CollectionUtils.isEqualCollection(currentDegrees.values(), referenceDegrees.values())); }
From source file:com.qwazr.search.index.IndexInstance.java
final <T> void updateDocsValues(final Map<String, Field> fields, final Collection<T> documents) throws IOException, InterruptedException { if (documents == null || documents.isEmpty()) return;// ww w.j a va 2 s .c o m final Semaphore sem = schema.acquireWriteSemaphore(); try { RecordsPoster.UpdateObjectDocValues poster = getDocValuesPoster(fields); documents.forEach(poster); nrtCommit(); } finally { if (sem != null) sem.release(); } }
From source file:com.qwazr.search.index.IndexInstance.java
final void updateMappedDocsValues(final Collection<Map<String, Object>> documents) throws IOException, ServerException, InterruptedException { if (documents == null || documents.isEmpty()) return;// w ww . j a v a 2 s . c o m final Semaphore sem = schema.acquireWriteSemaphore(); try { RecordsPoster.UpdateMapDocValues poster = getDocValuesPoster(); documents.forEach(poster); nrtCommit(); } finally { if (sem != null) sem.release(); } }