Example usage for com.google.common.collect Sets filter

List of usage examples for com.google.common.collect Sets filter

Introduction

In this page you can find the example usage for com.google.common.collect Sets filter.

Prototype

@GwtIncompatible("NavigableSet")
@SuppressWarnings("unchecked")
@CheckReturnValue
public static <E> NavigableSet<E> filter(NavigableSet<E> unfiltered, Predicate<? super E> predicate) 

Source Link

Document

Returns the elements of a NavigableSet , unfiltered , that satisfy a predicate.

Usage

From source file:com.google.api.explorer.client.base.ServiceLoader.java

/**
 * Load the directory document from either cache or the wire and notify the specified callback
 * when done.//from   w w  w. j a  va2s .co m
 */
public void loadServiceDefinitions(final Callback<Set<ServiceDefinition>, String> callback) {
    if (directoryCache == null) {
        googleApi.loadApiDirectory(new AsyncCallback<Set<ServiceDefinition>>() {
            @Override
            public void onSuccess(Set<ServiceDefinition> unfiltered) {
                // Filter the list of services according to the blacklist.
                directoryCache = Sets.filter(unfiltered, new Predicate<ServiceDefinition>() {
                    @Override
                    public boolean apply(ServiceDefinition service) {
                        return !SERVICE_NAME_BLACKLIST.contains(service.getName())
                                && !SERVICE_ID_BLACKLIST.contains(service.getId());
                    }
                });

                callback.onSuccess(directoryCache);
                delegate.directoryLoaded(directoryCache);
            }

            @Override
            public void onFailure(Throwable caught) {
                callback.onFailure(caught.getMessage());
            }

        });
    } else {
        callback.onSuccess(directoryCache);
    }
}

From source file:com.siemens.sw360.moderation.db.ModerationDatabaseHandler.java

private ModerationRequest createStubRequest(String user, boolean isDeleteRequest, String documentId,
        Set<String> moderators) {
    final ModerationRequest request;

    List<ModerationRequest> requestByDocumentId = getRequestByDocumentId(documentId);
    Optional<ModerationRequest> firstModerationRequestOfUser = CommonUtils
            .getFirstModerationRequestOfUser(requestByDocumentId, user);
    if (firstModerationRequestOfUser.isPresent()
            && CommonUtils.isStillRelevant(firstModerationRequestOfUser.get())) {
        request = firstModerationRequestOfUser.get();
    } else {/* www. j ava2  s .c o m*/
        request = new ModerationRequest();
        request.setRequestingUser(user);
        request.setDocumentId(documentId);
    }

    request.setTimestamp(System.currentTimeMillis());
    request.setModerationState(ModerationState.PENDING);
    request.setRequestDocumentDelete(isDeleteRequest);
    request.setModerators(Sets.filter(moderators, notEmptyOrNull()));

    return request;

}

From source file:org.opentestsystem.delivery.testadmin.domain.schedule.Schedule.java

/**
 * Returns the ordered timeslots or a filtered view if the argument is true. Filtered view returns all timeslots
 * equal to or after today./*from w  ww  . jav a  2 s  .co  m*/
 * 
 * @param rescheduleView
 * @return
 */
public TreeSet<ScheduledTimeSlot> getOrderedTimeSlots(final boolean rescheduleView) {

    if (rescheduleView) {
        return Sets.newTreeSet(Sets.filter(this.orderedTimeSlots, new Predicate<ScheduledTimeSlot>() {

            @Override
            public boolean apply(final ScheduledTimeSlot timeSlot) {
                return timeSlot.getStartTime().isEqualNow() || timeSlot.getStartTime().isAfterNow();
            }
        }));
    } else {
        return this.orderedTimeSlots;
    }
}

From source file:org.brooth.jeta.apt.processors.MetaModuleProcessor.java

private Set<? extends Element> getScopeEntities(TypeElement scopeElement) {
    final String scopeClassStr = scopeElement.getQualifiedName().toString();
    final boolean isDefaultScope = defaultScopeStr != null && defaultScopeStr.equals(scopeClassStr);

    return Sets.filter(allMetaEntities, new Predicate<Element>() {
        public boolean apply(Element input) {
            final MetaEntity a = input.getAnnotation(MetaEntity.class);
            String scope = MetacodeUtils.extractClassName(new Runnable() {
                public void run() {
                    a.scope();//from w  w  w.  j ava  2s  . c  om
                }
            });

            if (scopeClassStr.equals(scope))
                return true;

            if (isVoid(scope)) {
                if (defaultScopeStr == null)
                    throw new ProcessingException("Scope undefined for '" + input.getSimpleName().toString()
                            + "'. "
                            + "You need to set the scope via @MetaEntity(scope) or define default one as 'inject.scope.default' property");
                if (isDefaultScope)
                    return true;
            }

            return false;
        }
    });
}

From source file:com.viadeo.kasper.core.component.query.interceptor.cache.QueryAttributesKeyGenerator.java

final Set<String> retainMissingNames(final Set<String> expectedFieldNames, final Set<Field> discoveredFields) {

    final Set<String> discoveredNames = Sets
            .newHashSet(Iterables.transform(discoveredFields, new Function<Field, String>() {
                @Override/*from w w  w.  j a v  a  2 s.c om*/
                public String apply(Field input) {
                    return input.getName();
                }
            }));

    return Sets.filter(expectedFieldNames, new Predicate<String>() {
        @Override
        public boolean apply(final String input) {
            return !discoveredNames.contains(input);
        }
    });
}

From source file:org.sosy_lab.cpachecker.util.predicates.weakening.InductiveWeakeningManager.java

/**
 * Find weakening of {@code lemmas} with respect to {@code transition}.
 * This method assumes to- and from- lemmas are the same, and drops both at
 * the same time.//  w  w w  . j  a v a 2  s  .  co  m
 *
 * @param lemmas Set of uninstantiated lemmas.
 * @return inductive subset of {@code lemmas}
 */
public Set<BooleanFormula> findInductiveWeakeningForRCNF(final SSAMap startingSSA, final PathFormula transition,
        Set<BooleanFormula> lemmas) throws SolverException, InterruptedException {

    // Mapping from selectors to the items they annotate.
    final BiMap<BooleanFormula, BooleanFormula> selectionInfo = HashBiMap.create();

    List<BooleanFormula> fromStateLemmasInstantiated = fmgr.instantiate(lemmas, startingSSA);
    BooleanFormula fromStateLemmasAnnotated = annotateConjunctions(fromStateLemmasInstantiated, selectionInfo);
    BooleanFormula toStateLemmasAnnotated = fmgr.instantiate(fromStateLemmasAnnotated, transition.getSsa());

    final Set<BooleanFormula> toAbstract = findSelectorsToAbstract(selectionInfo, fromStateLemmasAnnotated,
            transition, toStateLemmasAnnotated, startingSSA, lemmas);

    Set<BooleanFormula> out = Sets.filter(lemmas,
            lemma -> (!toAbstract.contains(selectionInfo.inverse().get(fmgr.instantiate(lemma, startingSSA)))));
    assert checkAllMapsTo(out, startingSSA, out, transition.getSsa(), transition.getFormula());

    return out;
}

From source file:co.cask.cdap.data2.transaction.stream.FileStreamAdmin.java

@Override
public void configureGroups(Id.Stream streamId, Map<Long, Integer> groupInfo) throws Exception {
    Preconditions.checkArgument(!groupInfo.isEmpty(), "Consumer group information must not be empty.");

    LOG.info("Configure groups for {}: {}", streamId, groupInfo);

    StreamConfig config = StreamUtils.ensureExists(this, streamId);
    try (StreamConsumerStateStore stateStore = stateStoreFactory.create(config)) {
        Set<StreamConsumerState> states = Sets.newHashSet();
        stateStore.getAll(states);//from  w  w w .j  av a2 s  . com

        // Remove all groups that are no longer exists. The offset information in that group can be discarded.
        Set<StreamConsumerState> removeStates = Sets.newHashSet();
        for (StreamConsumerState state : states) {
            if (!groupInfo.containsKey(state.getGroupId())) {
                removeStates.add(state);
            }
        }

        // For each groups, compute the new file offsets if needed
        Set<StreamConsumerState> newStates = Sets.newHashSet();
        for (Map.Entry<Long, Integer> entry : groupInfo.entrySet()) {
            final long groupId = entry.getKey();

            // Create a view of old states which match with the current groupId only.
            mutateStates(groupId, entry.getValue(), Sets.filter(states, new Predicate<StreamConsumerState>() {
                @Override
                public boolean apply(StreamConsumerState state) {
                    return state.getGroupId() == groupId;
                }
            }), newStates, removeStates);
        }

        // Save the states back
        if (!newStates.isEmpty()) {
            stateStore.save(newStates);
            LOG.info("Configure groups new states: {} {}", groupInfo, newStates);
        }
        if (!removeStates.isEmpty()) {
            stateStore.remove(removeStates);
            LOG.info("Configure groups remove states: {} {}", groupInfo, removeStates);
        }
    }
}

From source file:org.opendaylight.controller.md.sal.common.impl.service.TwoPhaseCommit.java

private Optional<RootedChangeSet<P, D>> resolveOperChange(P affectedPath) {
    Map<P, D> originalOper = dataBroker.deepGetBySubpath(transaction.getOriginalOperationalData(),
            affectedPath);/*  www.  j  av  a 2 s. c om*/
    Map<P, D> createdOper = dataBroker.deepGetBySubpath(transaction.getCreatedOperationalData(), affectedPath);
    Map<P, D> updatedOper = dataBroker.deepGetBySubpath(transaction.getUpdatedOperationalData(), affectedPath);
    Set<P> removedOper = Sets.filter(transaction.getRemovedOperationalData(),
            dataBroker.createIsContainedPredicate(affectedPath));
    return resolveChanges(affectedPath, originalOper, createdOper, updatedOper, removedOper);
}

From source file:dagger.internal.codegen.DuplicateBindingsValidator.java

private String incompatibleBindingsMessage(Key key, ImmutableSet<Binding> duplicateBindings,
        BindingGraph graph) {/*from w  w w.  j  a  v a  2  s .  c  o  m*/
    ImmutableSet<dagger.model.Binding> multibindings = duplicateBindings.stream()
            .filter(binding -> binding.kind().isMultibinding()).collect(toImmutableSet());
    verify(multibindings.size() == 1, "expected only one multibinding for %s: %s", key, multibindings);
    StringBuilder message = new StringBuilder();
    java.util.Formatter messageFormatter = new java.util.Formatter(message);
    messageFormatter.format("%s has incompatible bindings or declarations:\n", key);
    message.append(INDENT);
    dagger.model.Binding multibinding = getOnlyElement(multibindings);
    messageFormatter.format("%s bindings and declarations:", multibindingTypeString(multibinding));
    formatDeclarations(message, 2, declarations(graph, multibindings));

    Set<dagger.model.Binding> uniqueBindings = Sets.filter(duplicateBindings,
            binding -> !binding.equals(multibinding));
    message.append('\n').append(INDENT).append("Unique bindings and declarations:");
    formatDeclarations(message, 2, Sets.filter(declarations(graph, uniqueBindings),
            declaration -> !(declaration instanceof MultibindingDeclaration)));
    return message.toString();
}

From source file:org.apache.fluo.recipes.map.CollisionFreeMap.java

void process(TransactionBase tx, Bytes ntfyRow, Column col) throws Exception {

    Bytes nextKey = tx.get(ntfyRow, NEXT_COL);

    ScannerConfiguration sc = new ScannerConfiguration();

    if (nextKey != null) {
        Bytes startRow = Bytes.newBuilder(ntfyRow.length() + nextKey.length()).append(ntfyRow).append(nextKey)
                .toBytes();// w  w  w.jav a  2s .  c  om
        Span tmpSpan = Span.prefix(ntfyRow);
        Span nextSpan = new Span(new RowColumn(startRow, UPDATE_COL), false, tmpSpan.getEnd(),
                tmpSpan.isEndInclusive());
        sc.setSpan(nextSpan);
    } else {
        sc.setSpan(Span.prefix(ntfyRow));
    }

    sc.setSpan(Span.prefix(ntfyRow));
    sc.fetchColumn(UPDATE_COL.getFamily(), UPDATE_COL.getQualifier());
    RowIterator iter = tx.get(sc);

    Map<Bytes, List<Bytes>> updates = new HashMap<>();

    long approxMemUsed = 0;

    Bytes partiallyReadKey = null;

    if (iter.hasNext()) {
        Bytes lastKey = null;
        while (iter.hasNext() && approxMemUsed < bufferSize) {
            Entry<Bytes, ColumnIterator> rowCol = iter.next();
            Bytes curRow = rowCol.getKey();

            tx.delete(curRow, UPDATE_COL);

            Bytes serializedKey = getKeyFromUpdateRow(ntfyRow, curRow);
            lastKey = serializedKey;

            List<Bytes> updateList = updates.get(serializedKey);
            if (updateList == null) {
                updateList = new ArrayList<>();
                updates.put(serializedKey, updateList);
            }

            Bytes val = rowCol.getValue().next().getValue();
            updateList.add(val);

            approxMemUsed += curRow.length();
            approxMemUsed += val.length();
        }

        if (iter.hasNext()) {
            Entry<Bytes, ColumnIterator> rowCol = iter.next();
            Bytes curRow = rowCol.getKey();

            // check if more updates for last key
            if (getKeyFromUpdateRow(ntfyRow, curRow).equals(lastKey)) {
                // there are still more updates for this key
                partiallyReadKey = lastKey;

                // start next time at the current key
                tx.set(ntfyRow, NEXT_COL, partiallyReadKey);
            } else {
                // start next time at the next possible key
                Bytes nextPossible = Bytes.newBuilder(lastKey.length() + 1).append(lastKey)
                        .append(new byte[] { 0 }).toBytes();
                tx.set(ntfyRow, NEXT_COL, nextPossible);
            }

            // may not read all data because of mem limit, so notify self
            tx.setWeakNotification(ntfyRow, col);
        } else if (nextKey != null) {
            // clear nextKey
            tx.delete(ntfyRow, NEXT_COL);
        }
    } else if (nextKey != null) {
        tx.delete(ntfyRow, NEXT_COL);
    }

    byte[] dataPrefix = ntfyRow.toArray();
    // TODO this is awful... no sanity check... hard to read
    dataPrefix[Bytes.of(mapId).length() + 1] = 'd';

    BytesBuilder rowBuilder = Bytes.newBuilder();
    rowBuilder.append(dataPrefix);
    int rowPrefixLen = rowBuilder.getLength();

    Set<Bytes> keysToFetch = updates.keySet();
    if (partiallyReadKey != null) {
        final Bytes prk = partiallyReadKey;
        keysToFetch = Sets.filter(keysToFetch, b -> !b.equals(prk));
    }
    Map<Bytes, Map<Column, Bytes>> currentVals = getCurrentValues(tx, rowBuilder, keysToFetch);

    ArrayList<Update<K, V>> updatesToReport = new ArrayList<>(updates.size());

    for (Entry<Bytes, List<Bytes>> entry : updates.entrySet()) {
        rowBuilder.setLength(rowPrefixLen);
        Bytes currentValueRow = rowBuilder.append(entry.getKey()).toBytes();
        Bytes currVal = currentVals.getOrDefault(currentValueRow, Collections.emptyMap()).get(DATA_COLUMN);

        Iterator<V> ui = Iterators.transform(entry.getValue().iterator(), this::deserVal);

        K kd = serializer.deserialize(entry.getKey().toArray(), keyType);

        if (partiallyReadKey != null && partiallyReadKey.equals(entry.getKey())) {
            // not all updates were read for this key, so requeue the combined updates as an update
            Optional<V> nv = combiner.combine(kd, ui);
            if (nv.isPresent()) {
                update(tx, Collections.singletonMap(kd, nv.get()));
            }
        } else {
            Optional<V> nv = combiner.combine(kd, concat(ui, currVal));
            Bytes newVal = nv.isPresent() ? Bytes.of(serializer.serialize(nv.get())) : null;
            if (newVal != null ^ currVal != null || (currVal != null && !currVal.equals(newVal))) {
                if (newVal == null) {
                    tx.delete(currentValueRow, DATA_COLUMN);
                } else {
                    tx.set(currentValueRow, DATA_COLUMN, newVal);
                }

                Optional<V> cvd = Optional.ofNullable(currVal).map(this::deserVal);
                updatesToReport.add(new Update<>(kd, cvd, nv));
            }
        }
    }

    // TODO could clear these as converted to objects to avoid double memory usage
    updates.clear();
    currentVals.clear();

    if (updatesToReport.size() > 0) {
        updateObserver.updatingValues(tx, updatesToReport.iterator());
    }
}