List of usage examples for com.google.common.collect Iterables limit
public static <T> Iterable<T> limit(final Iterable<T> iterable, final int limitSize)
From source file:org.apache.jackrabbit.oak.security.user.PasswordHistory.java
/** * Verify that the specified new password is not contained in the history. * * @param userTree The user tree.//from w w w .ja v a2 s. c om * @param newPassword The new password * @throws ConstraintViolationException If the passsword is found in the history * @throws AccessDeniedException If the editing session cannot access the rep:pwd node. */ private void checkPasswordInHistory(@Nonnull Tree userTree, @Nonnull String newPassword) throws ConstraintViolationException, AccessDeniedException { if (PasswordUtil.isSame(TreeUtil.getString(userTree, UserConstants.REP_PASSWORD), newPassword)) { throw new PasswordHistoryException("New password is identical to the current password."); } Tree pwTree = getPasswordTree(userTree, false); if (pwTree.exists()) { PropertyState pwHistoryProperty = pwTree.getProperty(UserConstants.REP_PWD_HISTORY); if (pwHistoryProperty != null) { for (String historyPwHash : Iterables.limit(pwHistoryProperty.getValue(Type.STRINGS), maxSize)) { if (PasswordUtil.isSame(historyPwHash, newPassword)) { throw new PasswordHistoryException("New password was found in password history."); } } } } }
From source file:com.xiaomi.linden.cluster.ResultMerger.java
private static LindenResult mergeGroupSearch(LindenSearchRequest lindenRequest, List<LindenResult> resultList) { LindenResult mergedResult = resultList.get(0); if (!mergedResult.isSetHits()) { mergedResult.setHits(new ArrayList<LindenHit>()); }// w w w.ja v a2 s .com String groupField = lindenRequest.getGroupParam().getGroupField(); int innerLimit = lindenRequest.getGroupParam().getGroupInnerLimit(); //traverse LindenResults from shards for (int i = 1; i < resultList.size(); ++i) { LindenResult subResult = resultList.get(i); if (!subResult.isSetHits()) { continue; } mergedResult.totalHits += subResult.totalHits; mergedResult.totalGroups = Math.max(mergedResult.totalGroups, subResult.totalGroups); mergedResult.totalGroupHits += subResult.totalGroupHits; //traverse groups in waiting LindenResult for (LindenHit subGroup : subResult.getHits()) { String groupName = subGroup.getFields().get(groupField); boolean isFound = false; //find the group in the merged groupList for (int j = 0; j < mergedResult.getHitsSize(); ++j) { LindenHit mergedHit = mergedResult.getHits().get(j); if (mergedHit.getFields().get(groupField).equals(groupName)) { Iterable<LindenHit> groupIterable = Iterables.mergeSorted( ImmutableList.of(subGroup.getGroupHits(), mergedHit.getGroupHits()), new LindenHitCmp(null)); List<LindenHit> hits = Lists.newArrayList(Iterables.limit(groupIterable, innerLimit)); if (mergedHit.getScore() < subGroup.getScore()) { mergedHit = subGroup; } mergedHit.setGroupHits(hits); mergedResult.getHits().set(j, mergedHit); isFound = true; break; } } if (!isFound) { mergedResult.getHits().add(subGroup); } } } //sort the group by score Ordering<LindenHit> ordering = new Ordering<LindenHit>() { @Override public int compare(@Nullable LindenHit left, @Nullable LindenHit right) { return Double.compare(left.getScore(), right.getScore()); } }; List<LindenHit> orderedHits = ordering.greatestOf(mergedResult.getHits(), mergedResult.getHitsSize()); //offset -> offset+size groups int from = lindenRequest.getOffset(); int size = lindenRequest.getLength(); if (from < orderedHits.size()) { List<LindenHit> subHits = orderedHits.subList(from, Math.min(from + size, orderedHits.size())); mergedResult.setHits(subHits); } else { mergedResult.setHits(new ArrayList<LindenHit>()); } return mergedResult; }
From source file:gaffer.rest.service.SimpleOperationService.java
protected <OUTPUT> Iterable<OUTPUT> executeGet(final Operation<?, Iterable<OUTPUT>> operation, final Integer n) { return null != n ? Iterables.limit(execute(operation), n) : execute(operation); }
From source file:org.fcrepo.kernel.utils.iterators.RdfStream.java
/** * As {@link Iterables#limit(Iterable, int)} while maintaining context. * * @param limit//from w w w .j ava 2s . co m * @return */ public RdfStream limit(final Integer limit) { if (limit < 0) { return this; } return withThisContext(Iterables.limit(this, limit)); }
From source file:org.apache.mahout.math.neighborhood.ProjectionSearch.java
/** * Searches for the query vector returning the closest limit referenceVectors. * * @param query the vector to search for. * @param limit the number of results to return. * @return a list of Vectors wrapped in WeightedThings where the "thing"'s weight is the * distance./*from w w w . ja va 2 s .c om*/ */ @Override public List<WeightedThing<Vector>> search(Vector query, int limit) { Set<Vector> candidates = Sets.newHashSet(); Iterator<? extends Vector> projections = basisMatrix.iterator(); for (TreeMultiset<WeightedThing<Vector>> v : scalarProjections) { Vector basisVector = projections.next(); WeightedThing<Vector> projectedQuery = new WeightedThing<Vector>(query, query.dot(basisVector)); for (WeightedThing<Vector> candidate : Iterables.concat( Iterables.limit(v.tailMultiset(projectedQuery, BoundType.CLOSED), searchSize), Iterables.limit( v.headMultiset(projectedQuery, BoundType.OPEN).descendingMultiset(), searchSize))) { candidates.add(candidate.getValue()); } } // If searchSize * scalarProjections.size() is small enough not to cause much memory pressure, // this is probably just as fast as a priority queue here. List<WeightedThing<Vector>> top = Lists.newArrayList(); for (Vector candidate : candidates) { top.add(new WeightedThing<Vector>(candidate, distanceMeasure.distance(query, candidate))); } Collections.sort(top); return top.subList(0, Math.min(limit, top.size())); }
From source file:edu.udo.scaffoldhunter.model.util.Scaffolds.java
private static int[] fillMap(Map<Scaffold, List<Integer>> map, Scaffold scaffold, ConfigMapping mapping, final PropertyDefinition propertyDefinition, boolean cumulative) { int[] dist = new int[mapping.getIntervals().size()]; { // determine Distribution for current Scaffold Predicate<Molecule> propertyNotNull = new Predicate<Molecule>() { @Override// www . j a va2s . c om public boolean apply(Molecule input) { return input.getNumPropertyValue(propertyDefinition) != null; } }; Set<Molecule> current = Sets.filter(scaffold.getMolecules(), propertyNotNull); int i = mapping.getIntervals().size() - 1; for (Interval interval : Iterables.limit(Lists.reverse(mapping.getIntervals()), mapping.getIntervals().size() - 1)) { Set<Molecule> filtered = Sets.filter(current, new LesserOrEqual(propertyDefinition, interval.getLowerBound())); dist[i--] = current.size() - filtered.size(); current = filtered; } dist[0] = current.size(); } /* * determine distributions for children recursively and add them in the * cumulative case */ for (Scaffold child : scaffold.getChildren()) { int[] childDistribution = fillMap(map, child, mapping, propertyDefinition, cumulative); if (cumulative) { for (int i = 0; i < dist.length; ++i) dist[i] += childDistribution[i]; } } map.put(scaffold, Ints.asList(dist)); return dist; }
From source file:org.robotframework.ide.eclipse.main.plugin.project.build.causes.SimilaritiesAnalyst.java
private <T> Collection<T> limit(final Collection<T> elements) { return newArrayList(Iterables.limit(elements, limit)); }
From source file:org.apache.jackrabbit.oak.plugins.index.property.jmx.PropertyIndexStats.java
private CompositeData getStatsForIndex(String path, NodeState idx, int maxValueCount, int maxDepth, int maxPathCount) throws OpenDataException { Map<String, Object> result = new HashMap<String, Object>(); //Add placeholder result.put("path", path); result.put("values", new String[0]); result.put("paths", new String[0]); result.put("valueCount", -1L); result.put("pathCount", -1); result.put("maxPathCount", maxPathCount); result.put("maxDepth", maxDepth); result.put("maxValueCount", maxValueCount); String status = "No index found at path " + path; NodeState data = idx.getChildNode(INDEX_CONTENT_NODE_NAME); if (data.exists()) { if (idx.getBoolean(UNIQUE_PROPERTY_NAME)) { status = "stats not supported for unique indexes"; } else {// w w w . j av a2 s .co m long childNodeCount = data.getChildNodeCount(maxValueCount); if (childNodeCount == Long.MAX_VALUE || childNodeCount > maxValueCount) { status = String.format("stats cannot be determined as number of values exceed the max limit of " + "[%d]. Estimated value count [%d]", maxValueCount, childNodeCount); } else { String[] values = Iterables.toArray(Iterables.limit(data.getChildNodeNames(), maxValueCount), String.class); String[] paths = determineIndexedPaths(data.getChildNodeEntries(), maxDepth, maxPathCount); result.put("values", values); result.put("paths", paths); result.put("pathCount", paths.length); status = "Result determined and above path list can be safely used based on current indexed data"; } result.put("valueCount", childNodeCount); } } result.put("status", status); return new CompositeDataSupport(getType(), result); }
From source file:com.opengamma.integration.viewer.status.impl.ViewStatusResultAggregatorImpl.java
private List<List<Object>> createRowData(final Map<List<String>, Set<String>> fixedRow2Columns, final Set<String> extraColumns, List<ViewColumnType> columnTypes) { List<List<String>> rows = Lists.newArrayList(fixedRow2Columns.keySet()); Comparator<List<String>> rowComparator = new Comparator<List<String>>() { @Override//from ww w.j a v a2s . co m public int compare(List<String> left, List<String> right) { int compare = 0; for (int i = 0; i < left.size(); i++) { compare = left.get(i).compareTo(right.get(i)); if (compare != 0) { return compare; } } return compare; } }; Collections.sort(rows, rowComparator); List<List<Object>> processedRows = Lists.newArrayListWithCapacity(rows.size()); String[] currentRow = new String[Iterables.getFirst(rows, Lists.newArrayList()).size()]; for (List<String> row : rows) { List<Object> processedRow = Lists.newArrayList(); Iterable<String> columns = Iterables.limit(row, row.size() - 1); int count = 0; for (String col : columns) { if (currentRow[count] == null || !col.equals(currentRow[count])) { currentRow[count] = col; processedRow.add(col); } else { processedRow.add(EMPTY_STR); } count++; } processedRow.add(Iterables.getLast(row)); for (String col : extraColumns) { List<String> keyMemebers = Lists.newArrayList(row); keyMemebers.add(col); ViewStatus status = getStatus(keyFromRowValues(keyMemebers, columnTypes)); if (status == null) { processedRow.add(EMPTY_STR); } else { processedRow.add(status); } } processedRows.add(processedRow); } return processedRows; }
From source file:org.diqube.executionenv.FlattenedTableInstanceManager.java
@PostConstruct public void initialize() { // Use a CountCleanupStrategy that cleans up everything that was already evicted from the cache: If something was // evicted from the cache, we definitely // won't offer it again, since we will not use that same versionId again. Therefore we can free up the count memory // of those./*from w w w.j av a 2 s. c o m*/ // Additionally we remove the counts of every version that is in #countCleanupCacheEntries. These are old versions. // If anybody still needs those versions, they must have flagged those elements in the cache, otherwise their // entries will have count 0 and that will most probably lead to them being evicted from the cache on the next run. CountCleanupStrategy<Pair<String, String>, UUID> cacheCountCleanupStrategy = (countsForCleanup, allCounts) -> { Set<Pair<Pair<String, String>, UUID>> curCountCleanupCacheEntries = new HashSet<>(); while (!countCleanupCacheEntries.isEmpty()) { try { curCountCleanupCacheEntries.add(countCleanupCacheEntries.pop()); } catch (NoSuchElementException e) { // swallow -> two thread concurrently traversed countCleanupCacheEntries and our thread did not get another // element. Thats fine. (Although this will not happen currently, since CountingCache synchronizes). } } Set<Pair<Pair<String, String>, UUID>> res = Sets.union(countsForCleanup, Sets.intersection(allCounts, curCountCleanupCacheEntries)); logger.trace("Evicting old usage counts (limit): {}", Iterables.limit(res, 100)); return res; }; MemoryConsumptionProvider<FlattenedTableInfo> cacheMemoryConsumptionProvider = info -> info .getFlattenedTable().calculateApproximateSizeInBytes(); cache = new CountingCache<>(flattenedTableCacheSizeMb * 1024L * 1024L, cacheMemoryConsumptionProvider, cacheCountCleanupStrategy); }