Example usage for java.util Set containsAll

List of usage examples for java.util Set containsAll

Introduction

In this page you can find the example usage for java.util Set containsAll.

Prototype

boolean containsAll(Collection<?> c);

Source Link

Document

Returns true if this set contains all of the elements of the specified collection.

Usage

From source file:com.wolvereness.overmapped.lib.WellOrdered.java

public static <T, C extends List<? super T>> C process(final C out, final Iterable<? extends T> in,
        final Informer<T> informer) throws WellOrderedException {
    Validate.notNull(out, "Collection out cannot be null");
    Validate.notNull(in, "Token in cannot be null");
    Validate.notNull(informer, "Informer cannot be null");

    final Map<T, Collection<T>> preceding = newHashMap();
    final Map<T, Collection<T>> required = newHashMap();
    final Set<T> pending = newLinkedHashSet(in);

    { // Preprocessing of information from specified informer
        final List<T> buffer = newArrayList();
        for (final T token : pending) {

            // Preferred preceding elements
            informer.addPrecedingPreferencesTo(token, buffer);
            addToAsLinkedList(token, preceding, buffer);
            buffer.clear();//ww  w .j av  a  2  s. co  m

            // Required preceding elements
            informer.addPrecedingTo(token, buffer);
            if (!pending.containsAll(buffer))
                throw new UnmetPrecedingTokenException(token + " cannot be proceded by one of " + buffer
                        + " with only " + pending + " available");
            addToAsLinkedList(token, required, buffer);
            buffer.clear();

            // Preferred proceeding elements
            informer.addProceedingPreferencesTo(token, buffer);
            addToAllLinkedLists(buffer, preceding, token);
            buffer.clear();
        }
    }

    int size = pending.size();
    while (size != 0) {

        { // Start normal processing
            final Iterator<T> tokenIterator = pending.iterator();
            while (tokenIterator.hasNext()) {
                final T token = tokenIterator.next();
                if (
                // Use preceding as primary/first check;
                // required is covered by the fall-back
                handleTokens(token, preceding, pending) && handleTokens(token, required, pending)) {
                    tokenIterator.remove();
                    out.add(token);
                }
            }
        }

        if (size == (size = pending.size())) {
            // Fall-back situation when we can't find a token that's ready
            final Iterator<T> tokenIterator = pending.iterator();
            while (tokenIterator.hasNext()) {
                final T token = tokenIterator.next();
                // At this point, we ignore preferences
                if (handleTokens(token, required, pending)) {
                    tokenIterator.remove();
                    preceding.remove(token);
                    out.add(token);
                    break;
                }
            }

            if (size == (size = pending.size())) {
                // We made no progress; it's circular
                break;
            }
        }
    }

    if (size != 0)
        throw new CircularOrderException("Failed to resolve circular preceding requirements in " + required);

    return out;
}

From source file:org.apache.kylin.cube.CubeCapabilityChecker.java

private static void tryDimensionAsMeasures(Collection<FunctionDesc> unmatchedAggregations,
        CapabilityResult result, Set<TblColRef> dimCols) {

    Iterator<FunctionDesc> it = unmatchedAggregations.iterator();
    while (it.hasNext()) {
        FunctionDesc functionDesc = it.next();

        // let calcite handle count
        if (functionDesc.isCount()) {
            it.remove();//from   ww  w.  j a va2 s . c o m
            continue;
        }

        // calcite can do aggregation from columns on-the-fly
        ParameterDesc parameterDesc = functionDesc.getParameter();
        if (parameterDesc == null) {
            continue;
        }
        List<TblColRef> neededCols = parameterDesc.getColRefs();
        if (neededCols.size() > 0 && dimCols.containsAll(neededCols)
                && FunctionDesc.BUILT_IN_AGGREGATIONS.contains(functionDesc.getExpression())) {
            result.influences.add(new CapabilityResult.DimensionAsMeasure(functionDesc));
            it.remove();
            continue;
        }
    }
}

From source file:org.apache.geode.management.internal.beans.QueryDataFunction.java

public static Object queryData(final String query, final String members, final int limit,
        final boolean zipResult, final int queryResultSetLimit, final int queryCollectionsDepth)
        throws Exception {

    if (query == null || query.isEmpty()) {
        return new JsonisedErrorMessage(ManagementStrings.QUERY__MSG__QUERY_EMPTY.toLocalizedString())
                .toString();//from w  ww  . j  ava  2 s. c  om
    }

    Set<DistributedMember> inputMembers = null;
    if (StringUtils.isNotBlank(members)) {
        inputMembers = new HashSet<>();
        StringTokenizer st = new StringTokenizer(members, ",");
        while (st.hasMoreTokens()) {
            String member = st.nextToken();
            DistributedMember distributedMember = BeanUtilFuncs.getDistributedMemberByNameOrId(member);
            inputMembers.add(distributedMember);
            if (distributedMember == null) {
                return new JsonisedErrorMessage(
                        ManagementStrings.QUERY__MSG__INVALID_MEMBER.toLocalizedString(member)).toString();
            }
        }
    }

    InternalCache cache = (InternalCache) CacheFactory.getAnyInstance();
    try {

        SystemManagementService service = (SystemManagementService) ManagementService
                .getExistingManagementService(cache);
        Set<String> regionsInQuery = compileQuery(cache, query);

        // Validate region existence
        if (regionsInQuery.size() > 0) {
            for (String regionPath : regionsInQuery) {
                DistributedRegionMXBean regionMBean = service.getDistributedRegionMXBean(regionPath);
                if (regionMBean == null) {
                    return new JsonisedErrorMessage(
                            ManagementStrings.QUERY__MSG__REGIONS_NOT_FOUND.toLocalizedString(regionPath))
                                    .toString();
                } else {
                    Set<DistributedMember> associatedMembers = DataCommandsUtils
                            .getRegionAssociatedMembers(regionPath, cache, true);

                    if (inputMembers != null && inputMembers.size() > 0) {
                        if (!associatedMembers.containsAll(inputMembers)) {
                            return new JsonisedErrorMessage(
                                    ManagementStrings.QUERY__MSG__REGIONS_NOT_FOUND_ON_MEMBERS
                                            .toLocalizedString(regionPath)).toString();
                        }
                    }
                }
            }
        } else {
            return new JsonisedErrorMessage(ManagementStrings.QUERY__MSG__INVALID_QUERY
                    .toLocalizedString("Region mentioned in query probably missing /")).toString();
        }

        // Validate
        if (regionsInQuery.size() > 1 && inputMembers == null) {
            for (String regionPath : regionsInQuery) {
                DistributedRegionMXBean regionMBean = service.getDistributedRegionMXBean(regionPath);

                if (regionMBean.getRegionType().equals(DataPolicy.PARTITION.toString())
                        || regionMBean.getRegionType().equals(DataPolicy.PERSISTENT_PARTITION.toString())) {
                    return new JsonisedErrorMessage(
                            ManagementStrings.QUERY__MSG__JOIN_OP_EX.toLocalizedString()).toString();
                }
            }
        }

        String randomRegion = regionsInQuery.iterator().next();

        Set<DistributedMember> associatedMembers = DataCommandsUtils
                .getQueryRegionsAssociatedMembers(regionsInQuery, cache, false);// First
        // available
        // member

        if (associatedMembers != null && associatedMembers.size() > 0) {
            Object[] functionArgs = new Object[6];
            if (inputMembers != null && inputMembers.size() > 0) {// on input
                // members

                functionArgs[DISPLAY_MEMBERWISE] = true;
                functionArgs[QUERY] = query;
                functionArgs[REGION] = randomRegion;
                functionArgs[LIMIT] = limit;
                functionArgs[QUERY_RESULTSET_LIMIT] = queryResultSetLimit;
                functionArgs[QUERY_COLLECTIONS_DEPTH] = queryCollectionsDepth;
                return callFunction(functionArgs, inputMembers, zipResult);
            } else { // Query on any random member
                functionArgs[DISPLAY_MEMBERWISE] = false;
                functionArgs[QUERY] = query;
                functionArgs[REGION] = randomRegion;
                functionArgs[LIMIT] = limit;
                functionArgs[QUERY_RESULTSET_LIMIT] = queryResultSetLimit;
                functionArgs[QUERY_COLLECTIONS_DEPTH] = queryCollectionsDepth;
                return callFunction(functionArgs, associatedMembers, zipResult);
            }

        } else {
            return new JsonisedErrorMessage(ManagementStrings.QUERY__MSG__REGIONS_NOT_FOUND
                    .toLocalizedString(regionsInQuery.toString())).toString();
        }

    } catch (QueryInvalidException qe) {
        return new JsonisedErrorMessage(
                ManagementStrings.QUERY__MSG__INVALID_QUERY.toLocalizedString(qe.getMessage())).toString();
    }
}

From source file:org.deri.iris.queryrewriting.RewritingUtils.java

/**
 * Checks whether there exists a homomorphism from {@link IRule} r1 to {@link IRule} r2 and returns that inside
 * {@link Map<IVariable,ITerm>} substitution.
 * @pre r1 and r2 have disjoint sets of variables. Since the substitution operates on these variable, the caller has
 *      to take care that the variables are properly renamed before calling this method.
 * @param r1 the first rule.//ww w  .  ja v a2s  .c  o m
 * @param r2 the second rule.
 * @param substitution the homomorphism (if any).
 * @return is the homomorphism exists.
 */
public static boolean mapsTo(final IRule r1, final IRule r2) {
    rep.incrementValue(RewMetric.MAPSTO_CHECK_COUNT);

    if (!r2.getPredicates().containsAll(r1.getPredicates()))
        return false;

    final Set<ILiteral> s1 = r1.getAllLiterals();
    final Set<ILiteral> s2 = r2.getAllLiterals();

    if (s2.containsAll(s1)) {
        MapsToCache.cache(s1, s2, MapsToCache.CacheType.MAPSTO);
        return true;
    }

    if (MapsToCache.inCache(s1, s2, MapsToCache.CacheType.NOT_MAPSTO)) {
        rep.incrementValue(RewMetric.NOT_MAPSTO_CACHE_HITS);
        return false;
    }

    if (MapsToCache.inCache(s1, s2, MapsToCache.CacheType.MAPSTO)) {
        rep.incrementValue(RewMetric.MAPSTO_CACHE_HITS);
        return true;
    }

    final Collection<ILiteral> sr1 = RenamingUtils.canonicalRenaming(s1, "T",
            new HashMap<IVariable, IVariable>());
    final Collection<ILiteral> sr2 = RenamingUtils.canonicalRenaming(s2, "U",
            new HashMap<IVariable, IVariable>());

    if (mapsTo(sr1, sr2)) {
        MapsToCache.cache(s1, s2, MapsToCache.CacheType.MAPSTO);
        return (true);
    } else {
        MapsToCache.cache(s1, s2, MapsToCache.CacheType.NOT_MAPSTO);
        return (false);
    }
}

From source file:com.boundary.zoocreeper.Restore.java

private static ACL readACL(JsonParser jp) throws IOException {
    expectCurrentToken(jp, JsonToken.START_OBJECT);
    String scheme = null;/*from w  w  w.  j  a v a 2 s  .c o  m*/
    String id = null;
    int perms = -1;
    final Set<String> seenFields = Sets.newHashSet();
    while (jp.nextToken() != JsonToken.END_OBJECT) {
        jp.nextValue();
        final String fieldName = jp.getCurrentName();
        seenFields.add(fieldName);
        if (Backup.FIELD_ACL_SCHEME.equals(fieldName)) {
            scheme = jp.getValueAsString();
        } else if (Backup.FIELD_ACL_ID.equals(fieldName)) {
            id = jp.getValueAsString();
        } else if (Backup.FIELD_ACL_PERMS.equals(fieldName)) {
            perms = jp.getIntValue();
        } else {
            throw new IOException("Unexpected field: " + fieldName);
        }
    }
    if (!seenFields.containsAll(REQUIRED_ACL_FIELDS)) {
        throw new IOException("Missing required ACL fields: " + REQUIRED_ACL_FIELDS);
    }
    final Id zkId;
    if (Ids.ANYONE_ID_UNSAFE.getScheme().equals(scheme) && Ids.ANYONE_ID_UNSAFE.getId().equals(id)) {
        zkId = Ids.ANYONE_ID_UNSAFE;
    } else {
        zkId = new Id(scheme, id);
    }
    return new ACL(perms, zkId);
}

From source file:org.apache.kylin.cube.CubeCapabilityChecker.java

public static CapabilityResult check(CubeInstance cube, SQLDigest digest) {
    CapabilityResult result = new CapabilityResult();
    result.capable = false;/*  w  w w .  j  ava 2 s  . c om*/

    // match joins is ensured at model select

    // dimensions & measures
    Collection<TblColRef> dimensionColumns = getDimensionColumns(digest);
    Collection<FunctionDesc> aggrFunctions = digest.aggregations;
    Collection<TblColRef> unmatchedDimensions = unmatchedDimensions(dimensionColumns, cube);
    Collection<FunctionDesc> unmatchedAggregations = unmatchedAggregations(aggrFunctions, cube);

    // try custom measure types
    tryCustomMeasureTypes(unmatchedDimensions, unmatchedAggregations, digest, cube, result);

    //more tricks
    String rootFactTable = cube.getRootFactTable();
    if (rootFactTable.equals(digest.factTable)) {
        //for query-on-facttable
        //1. dimension as measure

        if (!unmatchedAggregations.isEmpty()) {
            tryDimensionAsMeasures(unmatchedAggregations, result,
                    cube.getDescriptor().listDimensionColumnsIncludingDerived());
        }
    } else {
        //for non query-on-facttable 
        if (cube.getSegments().get(0).getSnapshots().containsKey(digest.factTable)) {

            Set<TblColRef> dimCols = Sets
                    .newHashSet(cube.getModel().findFirstTable(digest.factTable).getColumns());

            //1. all aggregations on lookup table can be done. For distinct count, mark them all DimensionAsMeasures
            // so that the measure has a chance to be upgraded to DimCountDistinctMeasureType in org.apache.kylin.metadata.model.FunctionDesc#reInitMeasureType
            if (!unmatchedAggregations.isEmpty()) {
                Iterator<FunctionDesc> itr = unmatchedAggregations.iterator();
                while (itr.hasNext()) {
                    FunctionDesc functionDesc = itr.next();
                    if (dimCols.containsAll(functionDesc.getParameter().getColRefs())) {
                        itr.remove();
                    }
                }
            }
            tryDimensionAsMeasures(Lists.newArrayList(aggrFunctions), result, dimCols);

            //2. more "dimensions" contributed by snapshot
            if (!unmatchedDimensions.isEmpty()) {
                unmatchedDimensions.removeAll(dimCols);
            }
        } else {
            logger.info("cube {} does not touch lookup table {} at all", cube.getName(), digest.factTable);
        }
    }

    if (!unmatchedDimensions.isEmpty()) {
        logger.info("Exclude cube " + cube.getName() + " because unmatched dimensions: " + unmatchedDimensions);
        return result;
    }

    if (!unmatchedAggregations.isEmpty()) {
        logger.info(
                "Exclude cube " + cube.getName() + " because unmatched aggregations: " + unmatchedAggregations);
        return result;
    }

    if (cube.getStorageType() == IStorageAware.ID_HBASE
            && MassInTupleFilter.containsMassInTupleFilter(digest.filter)) {
        logger.info("Exclude cube " + cube.getName()
                + " because only v2 storage + v2 query engine supports massin");
        return result;
    }

    if (digest.limitPrecedesAggr) {
        logger.info("Exclude cube " + cube.getName() + " because there's limit preceding aggregation");
        return result;
    }

    if (digest.isRawQuery && rootFactTable.equals(digest.factTable)) {
        result.influences.add(new CapabilityInfluence() {
            @Override
            public double suggestCostMultiplier() {
                return 100;
            }
        });
    }

    // cost will be minded by caller
    result.capable = true;
    return result;
}

From source file:org.deri.iris.queryrewriting.RewritingUtils.java

public static Set<IRule> decomposeQuery(final IRule query, final Map<IPosition, Set<IRule>> exPos,
        final List<IRule> tgds) {

    Set<IRule> queryComponents = new LinkedHashSet<IRule>();

    final Set<PositionJoin> exJoins = DepGraphUtils.computeExistentialJoins(query, tgds, exPos);
    final Set<PositionJoin> joins = DepGraphUtils.computePositionJoins(query);

    if (exJoins.containsAll(joins) || (query.getBody().size() == 1)) {
        queryComponents.add(query);/*  w  ww  . ja va 2  s .c  o m*/
        return queryComponents;
    } else if (exJoins.isEmpty()) {
        // each atom is a component
        final Set<Set<ILiteral>> decomposition = Sets.newLinkedHashSet();
        for (final ILiteral l : query.getBody()) {
            decomposition.add(ImmutableSet.of(l));
        }
        queryComponents = constructQueryComponents(query, exPos, decomposition);
    } else {
        // explore the decomposition space
        final Set<Set<Set<ILiteral>>> currentLevelDecompositions = new LinkedHashSet<Set<Set<ILiteral>>>();

        // create a level 0 decomposition (i.e., only singletons)
        final Set<ILiteral> body = query.getBody();
        final Set<Set<ILiteral>> decomposition = new LinkedHashSet<Set<ILiteral>>();
        for (final ILiteral l : body) {
            decomposition.add(ImmutableSet.of(l));
        }
        currentLevelDecompositions.add(decomposition);

        int level = 1;
        do {
            Set<Set<Set<ILiteral>>> nextLevelDecompositions = new LinkedHashSet<Set<Set<ILiteral>>>();

            for (final Set<Set<ILiteral>> currentDecomposition : currentLevelDecompositions) {
                // check validity of the decomposition
                queryComponents = constructQueryComponents(query, exPos, currentDecomposition);
                if (validDecomposition(queryComponents, exJoins, tgds, exPos))
                    return queryComponents;
            }
            // compute next-level decompositions
            nextLevelDecompositions = mergeDecompositions(currentLevelDecompositions);
            currentLevelDecompositions.addAll(nextLevelDecompositions);
            nextLevelDecompositions.clear();
            level++;
        } while (level < body.size());
    }
    return queryComponents;
}

From source file:com.impetus.kundera.utils.KunderaCoreUtils.java

/**
 * cheking whether all the fields of partition key are present in the jpa
 * query//  ww  w  .  j  av  a  2  s  .  co m
 * 
 * @param filterQueue
 * @param metaModel
 * @param metadata
 */
private static void isCompletePartitionKeyPresentInQuery(Queue filterQueue, MetamodelImpl metaModel,
        EntityMetadata metadata) {
    Set<String> partitionKeyFields = new HashSet<String>();
    populateEmbeddedIdFields(
            metaModel.embeddable(metadata.getIdAttribute().getBindableJavaType()).getAttributes(), metaModel,
            partitionKeyFields);

    Set<String> queryAttributes = new HashSet<String>();
    for (Object object : filterQueue) {
        if (object instanceof FilterClause) {
            FilterClause filter = (FilterClause) object;
            String property = filter.getProperty();
            String filterAttr[] = property.split("\\.");
            for (String s : filterAttr) {
                queryAttributes.add(s);
            }
        }
    }
    if (!queryAttributes.containsAll(partitionKeyFields)) {
        throw new QueryHandlerException("Incomplete partition key fields in query");
    }
}

From source file:com.boundary.zoocreeper.Restore.java

private static BackupZNode readZNode(JsonParser jp, String path) throws IOException {
    expectNextToken(jp, JsonToken.START_OBJECT);
    long ephemeralOwner = 0;
    byte[] data = null;
    final List<ACL> acls = Lists.newArrayList();
    final Set<String> seenFields = Sets.newHashSet();
    while (jp.nextToken() != JsonToken.END_OBJECT) {
        jp.nextValue();/*from w w w .j a va 2s  .  c om*/
        final String fieldName = jp.getCurrentName();
        seenFields.add(fieldName);
        if (Backup.FIELD_EPHEMERAL_OWNER.equals(fieldName)) {
            ephemeralOwner = jp.getLongValue();
        } else if (Backup.FIELD_DATA.equals(fieldName)) {
            if (jp.getCurrentToken() == JsonToken.VALUE_NULL) {
                data = null;
            } else {
                data = jp.getBinaryValue();
            }
        } else if (Backup.FIELD_ACLS.equals(fieldName)) {
            readACLs(jp, acls);
        } else {
            LOGGER.debug("Ignored field: {}", fieldName);
        }
    }
    if (!seenFields.containsAll(REQUIRED_ZNODE_FIELDS)) {
        throw new IOException("Missing required fields: " + REQUIRED_ZNODE_FIELDS);
    }
    return new BackupZNode(path, ephemeralOwner, data, acls);
}

From source file:net.sf.morph.util.TransformerUtils.java

private static Class[] getClassIntersection(Transformer[] transformers, ClassStrategy strategy) {
    Set s = ContainerUtils.createOrderedSet();
    s.addAll(Arrays.asList(strategy.get(transformers[0])));

    for (int i = 1; i < transformers.length; i++) {
        Set survivors = ContainerUtils.createOrderedSet();
        Class[] c = strategy.get(transformers[i]);
        for (int j = 0; j < c.length; j++) {
            if (s.contains(c[j])) {
                survivors.add(c[j]);/*from   ww  w . jav a 2s . com*/
                break;
            }
            if (c[j] == null) {
                break;
            }
            for (Iterator it = s.iterator(); it.hasNext();) {
                Class next = (Class) it.next();
                if (next != null && next.isAssignableFrom(c[j])) {
                    survivors.add(c[j]);
                    break;
                }
            }
        }
        if (!survivors.containsAll(s)) {
            for (Iterator it = s.iterator(); it.hasNext();) {
                Class next = (Class) it.next();
                if (survivors.contains(next) || next == null) {
                    break;
                }
                for (int j = 0; j < c.length; j++) {
                    if (c[j] != null && c[j].isAssignableFrom(next)) {
                        survivors.add(next);
                        break;
                    }
                }
            }
        }
        s = survivors;
    }
    return s.isEmpty() ? CLASS_NONE : (Class[]) s.toArray(new Class[s.size()]);
}