List of usage examples for java.util Set retainAll
boolean retainAll(Collection<?> c);
From source file:org.accada.hal.impl.sim.multi.GraphicSimulatorServer.java
/** * update the layered pane with new reader information * //from www . ja v a 2 s . c o m * @return layered pane */ private void updateJLayeredPane() { Set controllerReaderIds = controller.getReaderIds(); Set guiReaderIds = readers.keySet(); // get new readers Set newReaderIds = new TreeSet(controllerReaderIds); newReaderIds.removeAll(guiReaderIds); // get update readers Set updateReaderIds = new TreeSet(controllerReaderIds); updateReaderIds.retainAll(guiReaderIds); // get old readers Set oldReaderIds = new TreeSet(guiReaderIds); oldReaderIds.removeAll(controllerReaderIds); // create, update and delete readers createReaders(newReaderIds); updateReaders(updateReaderIds); removeReaders(oldReaderIds); jLayeredPane.repaint(); }
From source file:uk.gov.gchq.gaffer.spark.operation.dataframe.FiltersToOperationConverter.java
private AbstractGetRDD<?> applyPropertyFilters(final View derivedView, final AbstractGetRDD<?> operation) { final List<Set<String>> groupsRelatedToFilters = new ArrayList<>(); for (final Filter filter : filters) { final Set<String> groupsRelatedToFilter = getGroupsFromFilter(filter); if (groupsRelatedToFilter != null && !groupsRelatedToFilter.isEmpty()) { groupsRelatedToFilters.add(groupsRelatedToFilter); }//from ww w .j a v a 2 s . c o m LOGGER.info("Groups {} are related to filter {}", StringUtils.join(groupsRelatedToFilter, ','), filter); } LOGGER.info("Groups related to filters are: {}", StringUtils.join(groupsRelatedToFilters, ',')); // Take the intersection of this list of groups - only these groups can be related to the query final Set<String> intersection = new HashSet<>(derivedView.getEntityGroups()); intersection.addAll(derivedView.getEdgeGroups()); for (final Set<String> groupsRelatedToFilter : groupsRelatedToFilters) { intersection.retainAll(groupsRelatedToFilter); } LOGGER.info("Groups that can be returned are: {}", StringUtils.join(intersection, ',')); // Update view with filters and add to operation final Map<String, List<ConsumerFunctionContext<String, FilterFunction>>> groupToFunctions = new HashMap<>(); for (final Filter filter : filters) { final Map<String, List<ConsumerFunctionContext<String, FilterFunction>>> map = getFunctionsFromFilter( filter); for (final Entry<String, List<ConsumerFunctionContext<String, FilterFunction>>> entry : map .entrySet()) { if (!groupToFunctions.containsKey(entry.getKey())) { groupToFunctions.put(entry.getKey(), new ArrayList<ConsumerFunctionContext<String, FilterFunction>>()); } groupToFunctions.get(entry.getKey()).addAll(entry.getValue()); } } LOGGER.info("The following functions will be applied for the given group:"); for (final Entry<String, List<ConsumerFunctionContext<String, FilterFunction>>> entry : groupToFunctions .entrySet()) { LOGGER.info("Group = {}: ", entry.getKey()); for (final ConsumerFunctionContext<String, FilterFunction> cfc : entry.getValue()) { LOGGER.info("\t{} {}", StringUtils.join(cfc.getSelection(), ','), cfc.getFunction()); } } boolean updated = false; View.Builder builder = new View.Builder(); for (final String group : derivedView.getEntityGroups()) { if (intersection.contains(group)) { if (groupToFunctions.get(group) != null) { final ViewElementDefinition ved = new ViewElementDefinition.Builder() .merge(derivedView.getEntity(group)) .postAggregationFilterFunctions(groupToFunctions.get(group)).build(); LOGGER.info("Adding the following filter functions to the view for group {}:", group); for (final ConsumerFunctionContext<String, FilterFunction> cfc : groupToFunctions.get(group)) { LOGGER.info("\t{} {}", StringUtils.join(cfc.getSelection(), ','), cfc.getFunction()); } builder = builder.entity(group, ved); updated = true; } else { LOGGER.info("Not adding any filter functions to the view for group {}", group); } } } for (final String group : derivedView.getEdgeGroups()) { if (intersection.contains(group)) { if (groupToFunctions.get(group) != null) { final ViewElementDefinition ved = new ViewElementDefinition.Builder() .merge(derivedView.getEdge(group)) .postAggregationFilterFunctions(groupToFunctions.get(group)).build(); LOGGER.info("Adding the following filter functions to the view for group {}:", group); for (final ConsumerFunctionContext<String, FilterFunction> cfc : groupToFunctions.get(group)) { LOGGER.info("\t{} {}", StringUtils.join(cfc.getSelection(), ','), cfc.getFunction()); } builder = builder.edge(group, ved); updated = true; } else { LOGGER.info("Not adding any filter functions to the view for group {}", group); } } } if (updated) { operation.setView(builder.build()); } else { operation.setView(derivedView); } return operation; }
From source file:org.alfresco.module.org_alfresco_module_rm.model.behaviour.RecordsManagementSearchBehaviour.java
/** * On update vital record definition properties behaviour implementation. * * @param nodeRef node reference/*from ww w . j ava2s .c om*/ * @param before before properties * @param after after properties */ public void vitalRecordDefintionUpdateProperties(final NodeRef nodeRef, final Map<QName, Serializable> before, final Map<QName, Serializable> after) { AuthenticationUtil.runAsSystem(new AuthenticationUtil.RunAsWork<Void>() { @Override public Void doWork() { // Only care about record folders if (nodeService.exists(nodeRef) && recordFolderService.isRecordFolder(nodeRef)) { Set<QName> props = new HashSet<QName>(1); props.add(PROP_REVIEW_PERIOD); Set<QName> changed = determineChangedProps(before, after); changed.retainAll(props); if (!changed.isEmpty()) { updateVitalRecordDefinitionValues(nodeRef); } } return null; } }); }
From source file:Main.java
static <E> Set<E> ungrowableSet(final Set<E> s) { return new Set<E>() { @Override/*from w w w. j av a2 s . c o m*/ public int size() { return s.size(); } @Override public boolean isEmpty() { return s.isEmpty(); } @Override public boolean contains(Object o) { return s.contains(o); } @Override public Object[] toArray() { return s.toArray(); } @Override public <T> T[] toArray(T[] a) { return s.toArray(a); } @Override public String toString() { return s.toString(); } @Override public Iterator<E> iterator() { return s.iterator(); } @Override public boolean equals(Object o) { return s.equals(o); } @Override public int hashCode() { return s.hashCode(); } @Override public void clear() { s.clear(); } @Override public boolean remove(Object o) { return s.remove(o); } @Override public boolean containsAll(Collection<?> coll) { return s.containsAll(coll); } @Override public boolean removeAll(Collection<?> coll) { return s.removeAll(coll); } @Override public boolean retainAll(Collection<?> coll) { return s.retainAll(coll); } @Override public boolean add(E o) { throw new UnsupportedOperationException(); } @Override public boolean addAll(Collection<? extends E> coll) { throw new UnsupportedOperationException(); } }; }
From source file:py.una.pol.karaku.dao.entity.interceptors.InterceptorHandler.java
/** * Intercepta un atributo de un bean especifico. * /*from w w w. j a v a2 s . co m*/ * <p> * Reglas: * <ol> * <li>Si el item es un atributo normal, invocar a su respectivo * interceptor. * </p> * <li>Si es una coleccin, y tiene tiene la anotacin {@link OneToMany}, y * su {@link CascadeType} es {@link CascadeType#ALL}, entonces se propaga la * intercepcin a los miembros de la coleccin. </p> * * @param op * Operacin actual. * @param field * campo sobre el cual se esta ejecutando. * @param bean * objeto que esta siendo interceptado. */ private void intercept(@Nonnull Operation op, @Nonnull Field field, @Nonnull Object bean) { if (field.getAnnotation(OneToMany.class) != null) { OneToMany otm = field.getAnnotation(OneToMany.class); CascadeType[] cascade = otm.cascade(); if (cascade != null && ListHelper.contains(cascade, CascadeType.ALL)) { field.setAccessible(true); Collection<?> c = (Collection<?>) ReflectionUtils.getField(field, bean); if (Hibernate.isInitialized(c) && ListHelper.hasElements(c)) { for (Object o : c) { this.intercept(op, o); } } } return; } field.setAccessible(true); Class<?> type = field.getType(); Set<Interceptor> typeInterceptors = addAll(byType.get(void.class), byType.get(type)); Annotation[] annons = field.getAnnotations(); Set<Interceptor> annonInterceptors = new HashSet<Interceptor>(); if (byAnnotation.get(void.class) != null) { annonInterceptors.addAll(byAnnotation.get(void.class)); } for (Annotation an : annons) { if (byAnnotation.get(an.annotationType()) != null) { annonInterceptors.addAll(byAnnotation.get(an.annotationType())); } } typeInterceptors.retainAll(annonInterceptors); for (Interceptor bi : typeInterceptors) { if (this.isAssignable(field) && bi.interceptable(op, field, bean)) { bi.intercept(op, field, bean); } } }
From source file:com.aurel.track.admin.customize.category.filter.execute.loadItems.LoadItemLinksUtil.java
/** * Get all ascendents which are not included in the original query result * @param baseWorkItemBeans//from w ww. ja v a 2 s. c o m * @return */ private static Set<Integer> getParentHierarchy(List<TWorkItemBean> baseWorkItemBeans, Integer archived, Integer deleted) { Set<Integer> allAscendentIDsSet = new HashSet<Integer>(); if (baseWorkItemBeans != null) { //to avoid infinite cycles by inconsistent data Set<Integer> toRemoveSet = GeneralUtils.createIntegerSetFromBeanList(baseWorkItemBeans); List<TWorkItemBean> workItemBeans = baseWorkItemBeans; Set<Integer> notIncludedParentIDs; int i = 0; do { notIncludedParentIDs = new HashSet<Integer>(); if (workItemBeans != null) { for (TWorkItemBean workItemBean : workItemBeans) { Integer parentID = workItemBean.getSuperiorworkitem(); if (parentID != null && !toRemoveSet.contains(parentID)) { notIncludedParentIDs.add(parentID); } } } i++; LOGGER.debug(notIncludedParentIDs.size() + " parents found at " + i + ". level"); //gather the not yet present parents to the remove set to avoid infinite cycles by inconsistent data workItemBeans = null; if (!notIncludedParentIDs.isEmpty()) { toRemoveSet.addAll(notIncludedParentIDs); //get the next level of parent workItems workItemBeans = workItemDAO.loadByWorkItemKeys( GeneralUtils.createIntArrFromSet(notIncludedParentIDs), archived, deleted); //retain only the workItems with corresponding archive/delete notIncludedParentIDs.retainAll(GeneralUtils.createIntegerListFromBeanList(workItemBeans)); //add the not yet present parents to the list allAscendentIDsSet.addAll(notIncludedParentIDs); } } while (!notIncludedParentIDs.isEmpty()); } return allAscendentIDsSet; }
From source file:com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.KubernetesV2SearchProvider.java
private List<Map<String, Object>> getMatches(String query, List<String> types, Map<String, String> filters) { String matchQuery = String.format("*%s*", query.toLowerCase()); Set<String> typeSet = new HashSet<>(types); // We add k8s versions of Spinnaker types here to ensure that (for example) replica sets are returned when server groups are requested. typeSet.addAll(types.stream().map(t -> { try {/*w w w. j a v a2 s.co m*/ return KubernetesSpinnakerKindMap.SpinnakerKind.fromString(t); } catch (IllegalArgumentException e) { return null; } }).filter(Objects::nonNull).map(kindMap::translateSpinnakerKind).flatMap(Collection::stream) .map(KubernetesKind::toString).collect(Collectors.toSet())); // Remove caches that we can't search typeSet.retainAll(allCaches); // Search caches directly List<Map<String, Object>> results = typeSet.stream() .map(type -> cacheUtils.getAllKeysMatchingPattern(type, matchQuery)).flatMap(Collection::stream) .map(this::convertKeyToMap).filter(Objects::nonNull).collect(Collectors.toList()); // Search 'logical' caches (clusters, apps) for indirect matches Map<String, List<String>> keyToAllLogicalKeys = getKeysRelatedToLogicalMatches(matchQuery); results.addAll(keyToAllLogicalKeys.entrySet().stream().map(kv -> { Map<String, Object> result = convertKeyToMap(kv.getKey()); if (result == null) { return null; } kv.getValue().stream().map(Keys::parseKey).filter(Optional::isPresent).map(Optional::get) .filter(LogicalKey.class::isInstance).map(k -> (LogicalKey) k) .forEach(k -> result.put(k.getLogicalKind().singular(), k.getName())); return result; }).collect(Collectors.toList())); results = results.stream().filter(r -> typeSet.contains(r.get("type")) || typeSet.contains(r.get("group"))) .collect(Collectors.toList()); return results; }
From source file:edu.uci.ics.asterix.optimizer.rules.RemoveRedundantListifyRule.java
private boolean applyRuleDown(Mutable<ILogicalOperator> opRef, Set<LogicalVariable> varSet, IOptimizationContext context) throws AlgebricksException { boolean changed = applies(opRef, varSet, context); changed |= appliesForReverseCase(opRef, varSet, context); AbstractLogicalOperator op = (AbstractLogicalOperator) opRef.getValue(); VariableUtilities.getUsedVariables(op, varSet); if (op.hasNestedPlans()) { // Variables used by the parent operators should be live at op. Set<LogicalVariable> localLiveVars = new ListSet<LogicalVariable>(); VariableUtilities.getLiveVariables(op, localLiveVars); varSet.retainAll(localLiveVars); AbstractOperatorWithNestedPlans aonp = (AbstractOperatorWithNestedPlans) op; for (ILogicalPlan p : aonp.getNestedPlans()) { for (Mutable<ILogicalOperator> r : p.getRoots()) { if (applyRuleDown(r, varSet, context)) { changed = true;// w ww .j a va2s . com } context.addToDontApplySet(this, r.getValue()); } } } for (Mutable<ILogicalOperator> i : op.getInputs()) { if (applyRuleDown(i, varSet, context)) { changed = true; } context.addToDontApplySet(this, i.getValue()); } return changed; }
From source file:com.act.lcms.v2.fullindex.Searcher.java
/** * Searches an LCMS index for all (time, m/z, intensity) triples within some time and m/z ranges. * * Note that this method is very much a first-draft/WIP. There are many opportunities for optimization and * improvement here, but this works as an initial attempt. This method is littered with TODOs, which once TODone * should make this a near optimal method of searching through LCMS readings. * * @param mzRange The range of m/z values for which to search. * @param timeRange The time range for which to search. * @return A list of (time, m/z, intensity) triples that fall within the specified ranges. * @throws RocksDBException/*w w w . jav a2s . c o m*/ * @throws ClassNotFoundException * @throws IOException */ public List<TMzI> searchIndexInRange(Pair<Double, Double> mzRange, Pair<Double, Double> timeRange) throws RocksDBException, ClassNotFoundException, IOException { // TODO: gracefully handle the case when only range is specified. // TODO: consider producing some sort of query plan structure that can be used for optimization/explanation. DateTime start = DateTime.now(); /* Demote the time range to floats, as we know that that's how we stored times in the DB. This tight coupling would * normally be a bad thing, but given that this class is joined at the hip with Builder necessarily, it * doesn't seem like a terrible thing at the moment. */ Pair<Float, Float> tRangeF = // My kingdom for a functor! Pair.of(timeRange.getLeft().floatValue(), timeRange.getRight().floatValue()); LOGGER.info("Running search for %.6f <= t <= %.6f, %.6f <= m/z <= %.6f", tRangeF.getLeft(), tRangeF.getRight(), mzRange.getLeft(), mzRange.getRight()); // TODO: short circuit these filters. The first failure after success => no more possible hits. List<Float> timesInRange = timepointsInRange(tRangeF); byte[][] timeIndexBytes = extractValueBytes(ColumnFamilies.TIMEPOINT_TO_TRIPLES, timesInRange, Float.BYTES, ByteBuffer::putFloat); // TODO: bail if all the timeIndexBytes lengths are zero. List<MZWindow> mzWindowsInRange = mzWindowsInRange(mzRange); byte[][] mzIndexBytes = extractValueBytes(ColumnFamilies.WINDOW_ID_TO_TRIPLES, mzWindowsInRange, Integer.BYTES, (buff, mz) -> buff.putInt(mz.getIndex())); // TODO: bail if all the mzIndexBytes are zero. /* TODO: if the number of entries in one range is significantly smaller than the other (like an order of magnitude * or more, skip extraction of the other set of ids and just filter at the end. This will be especially helpful * when the number of ids in the m/z domain is small, as each time point will probably have >10k ids. */ LOGGER.info("Found/loaded %d matching time ranges, %d matching m/z ranges", timesInRange.size(), mzWindowsInRange.size()); // TODO: there is no need to union the time indices since they are necessarily distinct. Just concatenate instead. Set<Long> unionTimeIds = unionIdBuffers(timeIndexBytes); Set<Long> unionMzIds = unionIdBuffers(mzIndexBytes); // TODO: handle the case where one of the sets is empty specially. Either keep all in the other set or drop all. // TODO: we might be able to do this faster by intersecting two sorted lists. Set<Long> intersectionIds = new HashSet<>(unionTimeIds); /* TODO: this is effectively a hash join, which isn't optimal for sets of wildly different cardinalities. * Consider using sort-merge join instead, which will reduce the object overhead (by a lot) and allow us to pass * over the union of the ids from each range just once when joining them. Additionally, just skip this whole step * and filter at the end if one of the set's sizes is less than 1k or so and the other is large. */ intersectionIds.retainAll(unionMzIds); LOGGER.info("Id intersection results: t = %d, mz = %d, t ^ mz = %d", unionTimeIds.size(), unionMzIds.size(), intersectionIds.size()); List<Long> idsToFetch = new ArrayList<>(intersectionIds); Collections.sort(idsToFetch); // Sort ids so we retrieve them in an order that exploits index locality. LOGGER.info("Collecting TMzI triples"); // Collect all the triples for the ids we extracted. // TODO: don't manifest all the bytes: just create a stream of results from the cursor to reduce memory overhead. List<TMzI> results = new ArrayList<>(idsToFetch.size()); byte[][] resultBytes = extractValueBytes(ColumnFamilies.ID_TO_TRIPLE, idsToFetch, Long.BYTES, ByteBuffer::putLong); for (byte[] tmziBytes : resultBytes) { results.add(TMzI.readNextFromByteBuffer(ByteBuffer.wrap(tmziBytes))); } // TODO: do this filtering inline with the extraction. We shouldn't have to load all the triples before filtering. LOGGER.info("Performing final filtering"); int preFilterTMzICount = results.size(); results = results.stream() .filter(tmzi -> tmzi.getTime() >= tRangeF.getLeft() && tmzi.getTime() <= tRangeF.getRight() && tmzi.getMz() >= mzRange.getLeft() && tmzi.getMz() <= mzRange.getRight()) .collect(Collectors.toList()); LOGGER.info("Precise filtering results: %d -> %d", preFilterTMzICount, results.size()); DateTime end = DateTime.now(); LOGGER.info("Search completed in %dms", end.getMillis() - start.getMillis()); // TODO: return a stream instead that can load the triples lazily. return results; }
From source file:org.mindswap.pellet.KRSSLoader.java
public void verifyTBox(String file, KnowledgeBase kb) throws Exception { initTokenizer(new FileReader(file)); int verifiedCount = 0; int token = in.nextToken(); while (token != ')' && token != StreamTokenizer.TT_EOF) { ATermUtils.assertTrue(token == '('); verifiedCount++;/* w w w . jav a2 s . c o m*/ ATermAppl c = null; if (peekNext('(')) { ATermAppl[] list = parseExprList(); c = list[0]; Set eqs = kb.getEquivalentClasses(c); for (int i = 1; i < list.length; i++) { ATermAppl t = list[i]; if (!eqs.contains(t)) throw new RuntimeException(t + " is not equivalent to " + c); } } else c = parseExpr(); Set supers = SetUtils.union(kb.getSuperClasses(c, true)); Set subs = SetUtils.union(kb.getSubClasses(c, true)); if (log.isDebugEnabled()) log.debug("Verify (" + verifiedCount + ") " + c + " " + supers + " " + subs); if (peekNext('(')) { ATermAppl[] terms = parseExprList(); for (int i = 0; i < terms.length; i++) { ATerm t = terms[i]; if (!supers.contains(t)) throw new RuntimeException(t + " is not a superclass of " + c + " " + supers); } } else skipNext(); if (peekNext('(')) { ATermAppl[] terms = parseExprList(); for (int i = 0; i < terms.length; i++) { ATermAppl t = terms[i]; if (!subs.contains(t)) { Set temp = new HashSet(subs); Set sames = kb.getEquivalentClasses(t); temp.retainAll(sames); if (temp.size() == 0) throw new RuntimeException(t + " is not a subclass of " + c + " " + subs); } } } skipNext(); token = in.nextToken(); } ATermUtils.assertTrue(in.nextToken() == StreamTokenizer.TT_EOF); }