List of usage examples for com.google.common.collect Sets filter
@GwtIncompatible("NavigableSet") @SuppressWarnings("unchecked") @CheckReturnValue public static <E> NavigableSet<E> filter(NavigableSet<E> unfiltered, Predicate<? super E> predicate)
From source file:org.voltcore.agreement.AgreementSeeker.java
/** * Adds alive and dead graph information from a reporting * site survivor set/* ww w .j a v a 2s .c om*/ * @param reportingHsid the reporting site * @param sfm a {@link SiteFailureMessage} containing that * site's survivor set */ public void add(long reportingHsid, SiteFailureMessage sfm) { // skip if the reporting site did not belong to the pre // failure mesh, or the reporting site is reporting itself // dead, or none of the sites in the safe transaction map // are among the known hsids if (!m_hsids.contains(reportingHsid) || !sfm.m_survivors.contains(reportingHsid)) return; Set<Long> survivors = sfm.m_survivors; if (Sets.filter(sfm.getObservedFailedSites(), in(m_hsids)).isEmpty()) { survivors = m_hsids; } // dead = pre failure mesh - survivors Set<Long> dead = Sets.difference(m_hsids, survivors); removeValue(m_dead, reportingHsid); // add dead graph nodes for (long w : dead) { if (!m_hsids.contains(w)) continue; m_dead.put(w, reportingHsid); } // Remove all what the reporting site thought // was alive before this invocation removeValue(m_alive, reportingHsid); // add alive graph nodes for (long s : survivors) { if (!m_hsids.contains(s)) continue; m_alive.put(s, reportingHsid); } for (long s : sfm.getFailedSites()) { if (!m_hsids.contains(s)) continue; m_reported.put(s, reportingHsid); } }
From source file:org.apache.fluo.recipes.core.combine.CombineQueueImpl.java
void process(TransactionBase tx, Bytes ntfyRow, Column col, Combiner<K, V> combiner, ChangeObserver<K, V> changeObserver) throws Exception { Preconditions.checkState(ntfyRow.startsWith(updatePrefix)); Bytes nextKey = tx.get(ntfyRow, NEXT_COL); Span span;/*from w w w.ja v a2 s . com*/ if (nextKey != null) { Bytes startRow = Bytes.builder(ntfyRow.length() + nextKey.length()).append(ntfyRow).append(nextKey) .toBytes(); Span tmpSpan = Span.prefix(ntfyRow); Span nextSpan = new Span(new RowColumn(startRow, UPDATE_COL), false, tmpSpan.getEnd(), tmpSpan.isEndInclusive()); span = nextSpan; } else { span = Span.prefix(ntfyRow); } Iterator<RowColumnValue> iter = tx.scanner().over(span).fetch(UPDATE_COL).build().iterator(); Map<Bytes, List<Bytes>> updates = new HashMap<>(); long approxMemUsed = 0; Bytes partiallyReadKey = null; boolean setNextKey = false; if (iter.hasNext()) { Bytes lastKey = null; while (iter.hasNext() && approxMemUsed < bufferSize) { RowColumnValue rcv = iter.next(); Bytes curRow = rcv.getRow(); tx.delete(curRow, UPDATE_COL); Bytes serializedKey = getKeyFromUpdateRow(ntfyRow, curRow); lastKey = serializedKey; List<Bytes> updateList = updates.get(serializedKey); if (updateList == null) { updateList = new ArrayList<>(); updates.put(serializedKey, updateList); } Bytes val = rcv.getValue(); updateList.add(val); approxMemUsed += curRow.length(); approxMemUsed += val.length(); } if (iter.hasNext()) { RowColumnValue rcv = iter.next(); Bytes curRow = rcv.getRow(); // check if more updates for last key if (getKeyFromUpdateRow(ntfyRow, curRow).equals(lastKey)) { // there are still more updates for this key partiallyReadKey = lastKey; // start next time at the current key tx.set(ntfyRow, NEXT_COL, partiallyReadKey); } else { // start next time at the next possible key Bytes nextPossible = Bytes.builder(lastKey.length() + 1).append(lastKey).append(0).toBytes(); tx.set(ntfyRow, NEXT_COL, nextPossible); } setNextKey = true; } else if (nextKey != null) { // clear nextKey tx.delete(ntfyRow, NEXT_COL); } } else if (nextKey != null) { tx.delete(ntfyRow, NEXT_COL); } if (nextKey != null || setNextKey) { // If not all data was read need to run again in the future. If scanning was started in the // middle of the bucket, its possible there is new data before nextKey that still needs to be // processed. If scanning stopped before reading the entire bucket there may be data after the // stop point. tx.setWeakNotification(ntfyRow, col); } BytesBuilder rowBuilder = Bytes.builder(); rowBuilder.append(dataPrefix); rowBuilder.append(ntfyRow.subSequence(updatePrefix.length(), ntfyRow.length())); int rowPrefixLen = rowBuilder.getLength(); Set<Bytes> keysToFetch = updates.keySet(); if (partiallyReadKey != null) { final Bytes prk = partiallyReadKey; keysToFetch = Sets.filter(keysToFetch, b -> !b.equals(prk)); } Map<Bytes, Map<Column, Bytes>> currentVals = getCurrentValues(tx, rowBuilder, keysToFetch); ArrayList<Change<K, V>> updatesToReport = new ArrayList<>(updates.size()); for (Entry<Bytes, List<Bytes>> entry : updates.entrySet()) { rowBuilder.setLength(rowPrefixLen); Bytes currentValueRow = rowBuilder.append(entry.getKey()).toBytes(); Bytes currVal = currentVals.getOrDefault(currentValueRow, Collections.emptyMap()).get(DATA_COLUMN); K kd = serializer.deserialize(entry.getKey().toArray(), keyType); if (partiallyReadKey != null && partiallyReadKey.equals(entry.getKey())) { // not all updates were read for this key, so requeue the combined updates as an update Optional<V> nv = combiner.combine(new InputImpl<>(kd, this::deserVal, entry.getValue())); if (nv.isPresent()) { addAll(tx, Collections.singletonMap(kd, nv.get())); } } else { Optional<V> nv = combiner.combine(new InputImpl<>(kd, this::deserVal, currVal, entry.getValue())); Bytes newVal = nv.isPresent() ? Bytes.of(serializer.serialize(nv.get())) : null; if (newVal != null ^ currVal != null || (currVal != null && !currVal.equals(newVal))) { if (newVal == null) { tx.delete(currentValueRow, DATA_COLUMN); } else { tx.set(currentValueRow, DATA_COLUMN, newVal); } Optional<V> cvd = Optional.ofNullable(currVal).map(this::deserVal); updatesToReport.add(new ChangeImpl<>(kd, cvd, nv)); } } } // TODO could clear these as converted to objects to avoid double memory usage updates.clear(); currentVals.clear(); if (updatesToReport.size() > 0) { changeObserver.process(tx, updatesToReport); } }
From source file:edu.harvard.med.screensaver.model.screenresults.ScreenResult.java
private SortedSet<AssayPlate> findOrCreateAssayPlatesDataLoaded(int plateNumber, int replicatesDataLoaded) { SortedSet<AssayPlate> mostRecentAssayPlatesForPlateNumber = Sets.newTreeSet(); SortedSet<AssayPlate> allAssayPlatesForPlateNumber = getScreen().findAssayPlates(plateNumber); if (!allAssayPlatesForPlateNumber.isEmpty()) { final LibraryScreening lastLibraryScreening = ImmutableSortedSet .copyOf(Iterables.transform(allAssayPlatesForPlateNumber, AssayPlate.ToLibraryScreening)) .last();//from w w w . j ava 2s .co m assert lastLibraryScreening != null; mostRecentAssayPlatesForPlateNumber .addAll(Sets.filter(allAssayPlatesForPlateNumber, new Predicate<AssayPlate>() { public boolean apply(AssayPlate ap) { return lastLibraryScreening.equals(ap.getLibraryScreening()); } })); } SortedSet<AssayPlate> assayPlatesDataLoaded = Sets.newTreeSet(); // if there are fewer assay plates screened replicates than we have data // for, then a library screening must not have been recorded for the assay // plates that were used to generate this data, so we'll create them now if (mostRecentAssayPlatesForPlateNumber.size() < replicatesDataLoaded) { //log.warn("creating missing assay plate(s) for plate number " + plateNumber); for (int r = 0; r < replicatesDataLoaded; r++) { assayPlatesDataLoaded.add(getScreen().createAssayPlate(plateNumber, r)); } } else { for (AssayPlate assayPlate : mostRecentAssayPlatesForPlateNumber) { if (assayPlate.getReplicateOrdinal() < replicatesDataLoaded) { assayPlatesDataLoaded.add(assayPlate); } } } return assayPlatesDataLoaded; }
From source file:com.google.devtools.build.lib.skyframe.FilesetEntryFunction.java
private static Set<String> createExclusionSet(Set<String> input) { return Sets.filter(input, new Predicate<String>() { @Override//from w w w .j a v a2s .c o m public boolean apply(String e) { // Keep the top-level exclusions only. Do not look for "/" but count the path segments // instead, in anticipation of future Windows support. return new PathFragment(e).segmentCount() == 1; } }); }
From source file:org.jclouds.nodepool.config.BindBackendComputeService.java
@Provides @Singleton/* w ww . j av a 2s . c o m*/ @Backend @Exposed protected Supplier<Template> makeBackendTemplate(@Backend Supplier<ComputeService> compute, @Named(BACKEND_GROUP) final String poolGroupPrefix) { return Suppliers.memoize(Suppliers.compose(new Function<ComputeService, Template>() { @Override public Template apply(ComputeService input) { try { return input.templateBuilder().build(); } catch (IllegalStateException e) { // if there's no template we must be on byon and there must be at least one node in // our group Set<? extends NodeMetadata> nodes = Sets.filter( input.listNodesDetailsMatching(NodePredicates.all()), NodePredicates.inGroup(poolGroupPrefix)); checkState(!nodes.isEmpty(), "service provided no template and no node was in this nodepool's group."); final NodeMetadata node = Iterables.get(nodes, 0); final Image image = new ImageBuilder().id(node.getId()).location(node.getLocation()) .operatingSystem(node.getOperatingSystem()).status(Status.AVAILABLE) .description("physical node").build(); final Hardware hardware = new HardwareBuilder().id(node.getId()).build(); return new Template() { @Override public Image getImage() { return image; } @Override public Hardware getHardware() { return hardware; } @Override public Location getLocation() { return node.getLocation(); } @Override public TemplateOptions getOptions() { return new TemplateOptions(); } @Override public Template clone() { return this; } }; } } }, compute)); }
From source file:org.apache.bookkeeper.stream.storage.impl.sc.ZkStorageContainerManager.java
private void processMyAssignment(ServerAssignmentData myAssignment) { Set<Long> assignedContainerSet = myAssignment.getContainersList().stream().collect(Collectors.toSet()); Set<Long> liveContainerSet = Sets.newHashSet(liveContainers.keySet()); Set<Long> containersToStart = Sets .newHashSet(Sets.difference(assignedContainerSet, liveContainerSet).immutableCopy()); Set<Long> containersToStop = Sets .newHashSet(Sets.difference(liveContainerSet, assignedContainerSet).immutableCopy()); // if the containers are already in the pending start/stop list, we don't touch it until they are completed. containersToStart = Sets.filter(containersToStart, container -> !pendingStartStopContainers.contains(container)); containersToStop = Sets.filter(containersToStop, container -> !pendingStartStopContainers.contains(container)); if (!containersToStart.isEmpty() || !containersToStop.isEmpty()) { log.info(//from w w w . j a v a 2 s .c om "Process container changes:\n\tIdeal = {}\n\tLive = {}\n\t" + "Pending = {}\n\tToStart = {}\n\tToStop = {}", assignedContainerSet, liveContainerSet, pendingStartStopContainers, containersToStart, containersToStop); } containersToStart.forEach(this::startStorageContainer); containersToStop.forEach(this::stopStorageContainer); }
From source file:org.dllearner.utilities.examples.AutomaticNegativeExampleFinderSPARQL2.java
private <T extends OWLClassExpression> Set<T> filterByNamespace(Set<T> classes) { if (namespace != null) { return Sets.filter(classes, input -> input.toString().startsWith(namespace)); }/* ww w. j a v a 2 s .c o m*/ return classes; }
From source file:com.isotrol.impe3.web20.impl.MembersServiceImpl.java
private MemberEntity fill(MemberEntity entity, MemberDTO dto) { final Calendar date = Calendar.getInstance(); date.setTime(dto.getDate());/*from ww w . j a v a2 s.c o m*/ entity.setDate(date); entity.setDisplayName(dto.getDisplayName()); entity.setEmail(dto.getEmail()); entity.setMemberCode(dto.getCode()); entity.setName(dto.getName()); entity.setBlocked(dto.isBlocked()); // final Set<FavoriteEntity> favorites = entity.getFavorites(); // TODO no se contemplan en el dto. final Set<String> profiles = entity.getProfiles(); profiles.clear(); final Set<String> dtopf = dto.getProfiles(); if (dtopf != null) { profiles.addAll(Sets.filter(dtopf, notNull())); } final Map<String, String> properties = entity.getProperties(); properties.clear(); final Map<String, String> dtopr = dto.getProperties(); if (dtopr != null) { properties.putAll(Maps.filterKeys(Maps.filterValues(dtopr, notNull()), notNull())); } return entity; }
From source file:org.voltcore.agreement.MeshArbiter.java
/** * Notify all survivors when you are closing links to nodes * @param decision map where the keys contain the kill sites * and its values are their last known safe transaction ids *///from w w w. ja v a2 s. c om protected void notifyOnKill(Set<Long> hsIds, Map<Long, Long> decision) { SiteFailureMessage.Builder sfmb = SiteFailureMessage.builder().decisions(decision.keySet()) .failures(decision.keySet()); Set<Long> dests = Sets.filter(m_seeker.getSurvivors(), not(equalTo(m_hsId))); if (dests.isEmpty()) return; sfmb.survivors(Sets.difference(m_seeker.getSurvivors(), decision.keySet())); sfmb.safeTxnIds(getSafeTxnIdsForSites(hsIds)); SiteFailureMessage sfm = sfmb.build(); m_mailbox.send(Longs.toArray(dests), sfm); m_recoveryLog.info("Agreement, Sending [" + CoreUtils.hsIdCollectionToString(dests) + "] " + sfm); }
From source file:org.nuxeo.binary.metadata.internals.BinaryMetadataServiceImpl.java
/** * Check for each Binary Rule if the document is accepted or not. * * @return the list of metadata which should be processed sorted by rules order. (high to low priority) *//*from w w w . jav a2s .com*/ protected Set<MetadataRuleDescriptor> checkFilter(final ActionContext actionContext) { final ActionManager actionService = Framework.getLocalService(ActionManager.class); Set<MetadataRuleDescriptor> filtered = Sets.filter(BinaryMetadataComponent.self.ruleRegistry.contribs, new Predicate<MetadataRuleDescriptor>() { @Override public boolean apply(MetadataRuleDescriptor input) { if (!input.getEnabled()) { return false; } for (String filterId : input.getFilterIds()) { if (!actionService.checkFilter(filterId, actionContext)) { return false; } } return true; } }); return filtered; }