Example usage for com.google.common.collect Sets union

List of usage examples for com.google.common.collect Sets union

Introduction

In this page you can find the example usage for com.google.common.collect Sets union.

Prototype

public static <E> SetView<E> union(final Set<? extends E> set1, final Set<? extends E> set2) 

Source Link

Document

Returns an unmodifiable view of the union of two sets.

Usage

From source file:com.jivesoftware.os.tasmo.view.reader.service.ViewProvider.java

Map<TenantAndActor, Set<Id>> checkPermissions(Map<TenantAndActor, Set<Id>> permissionCheckTheseIds) {
    Map<TenantAndActor, Set<Id>> canViewTheseIds = new HashMap<>();
    for (TenantAndActor tenantAndActor : permissionCheckTheseIds.keySet()) { // 1 permissions check call per tenant and actor id tuple.
        Collection<Id> ids = permissionCheckTheseIds.get(tenantAndActor);
        ViewPermissionCheckResult checkResult = viewPermissionChecker.check(tenantAndActor.tenantId,
                tenantAndActor.actorId, new HashSet<>(ids));
        canViewTheseIds.put(tenantAndActor, Sets.union(checkResult.allowed(), checkResult.unknown())); // For now... TODO
    }//from  ww  w.j a v  a  2 s.co m
    return canViewTheseIds;
}

From source file:de.iteratec.iteraplan.businesslogic.service.ElasticMiContextAndStakeholderManagerInitializationServiceImpl.java

private Map<Role, de.iteratec.iteraplan.elasticmi.permission.Role> createRoles() {
    Map<Role, de.iteratec.iteraplan.elasticmi.permission.Role> roles = Maps.newHashMap();
    Set<String> allAtgNames = allAtgNames();
    Set<String> explicitAtgNames = Sets.newHashSet();
    for (Role iteraplanRole : roleService.loadElementList()) {
        de.iteratec.iteraplan.elasticmi.permission.Role elasticRole = stakeholderManager
                .createRole(iteraplanRole.getRoleName());
        roles.put(iteraplanRole, elasticRole);
        grantBbtPerms(iteraplanRole, elasticRole);
        Set<String> newExplicitAtgNames = grantAtgPerms(iteraplanRole, elasticRole);
        explicitAtgNames = Sets.union(explicitAtgNames, newExplicitAtgNames);
    }/*ww  w  .j av a  2 s  .c om*/
    for (String atgName : allAtgNames) {
        if (!explicitAtgNames.contains(atgName)) {
            ElasticMiFeatureGroupPermission updatePerm = stakeholderManager
                    .findFeatureGroupPermissionByPersistentName(ElasticMiAccessLevel.UPDATE + ":" + atgName);
            for (de.iteratec.iteraplan.elasticmi.permission.Role role : stakeholderManager.getRoles()) {
                role.grantFeatureGroupPermission(updatePerm);
            }
        }
    }
    initSupervisorRole();
    return roles;
}

From source file:org.apache.gobblin.data.management.retention.policy.CombineRetentionPolicy.java

private Set<T> unionDatasetVersions(Collection<Set<T>> sets) {
    if (sets.size() <= 0) {
        return Sets.newHashSet();
    }/* ww w .j  av a2  s .  c o m*/
    Iterator<Set<T>> it = sets.iterator();
    Set<T> outputSet = it.next();
    while (it.hasNext()) {
        outputSet = Sets.union(outputSet, it.next());
    }
    return outputSet;
}

From source file:org.eclipse.incquery.runtime.matchers.context.surrogate.SurrogateQueryRegistry.java

/**
 * @return an unmodifiable set that contains all features with surrogate queries.
 *//*from w  ww.j av  a2s .  c o  m*/
public Set<IInputKey> getAllSurrogateQueries() {
    return Sets.union(getRegisteredSurrogateQueriesInternal(), getDynamicSurrogateQueriesInternal());
}

From source file:org.apache.gobblin.source.extractor.extract.QueryBasedSource.java

@Override
public List<WorkUnit> getWorkunits(SourceState state) {
    initLogger(state);/*ww w.j  a  v a2s  . co  m*/
    lineageInfo = LineageInfo.getLineageInfo(state.getBroker());

    List<WorkUnit> workUnits = Lists.newArrayList();

    // Map<String, String> tableNameToEntityMap = Maps.newHashMap();
    Set<SourceEntity> entities = getFilteredSourceEntities(state);

    Map<SourceEntity, State> tableSpecificPropsMap = shouldObtainTablePropsFromConfigStore(state)
            ? getTableSpecificPropsFromConfigStore(entities, state)
            : getTableSpecificPropsFromState(entities, state);
    Map<SourceEntity, Long> prevWatermarksByTable = getPreviousWatermarksForAllTables(state);

    for (SourceEntity sourceEntity : Sets.union(entities, prevWatermarksByTable.keySet())) {

        log.info("Source entity to be processed: {}, carry-over from previous state: {} ", sourceEntity,
                !entities.contains(sourceEntity));

        SourceState combinedState = getCombinedState(state, tableSpecificPropsMap.get(sourceEntity));
        long previousWatermark = prevWatermarksByTable.containsKey(sourceEntity)
                ? prevWatermarksByTable.get(sourceEntity)
                : ConfigurationKeys.DEFAULT_WATERMARK_VALUE;

        // If a table name exists in prevWatermarksByTable (i.e., it has a previous watermark) but does not exist
        // in talbeNameToEntityMap, create an empty workunit for it, so that its previous watermark is preserved.
        // This is done by overriding the high watermark to be the same as the previous watermark.
        if (!entities.contains(sourceEntity)) {
            combinedState.setProp(ConfigurationKeys.SOURCE_QUERYBASED_END_VALUE, previousWatermark);
        }

        workUnits.addAll(generateWorkUnits(sourceEntity, combinedState, previousWatermark));
    }

    log.info("Total number of workunits for the current run: " + workUnits.size());
    List<WorkUnit> previousWorkUnits = this.getPreviousWorkUnitsForRetry(state);
    log.info("Total number of incomplete tasks from the previous run: " + previousWorkUnits.size());
    workUnits.addAll(previousWorkUnits);

    int numOfMultiWorkunits = state.getPropAsInt(ConfigurationKeys.MR_JOB_MAX_MAPPERS_KEY,
            ConfigurationKeys.DEFAULT_MR_JOB_MAX_MAPPERS);

    return pack(workUnits, numOfMultiWorkunits);
}

From source file:com.yahoo.yqlplus.engine.internal.plan.IndexedQueryPlanner.java

private void prepareQuery(Set<String> availableJoinColumns, QueryStrategy iq,
        OperatorNode<ExpressionOperator> filter, boolean exact) {
    // if we've issued a SCAN, skip this logic and just scan
    if (iq.scan) {
        return;/*  ww  w .j a  v  a  2s  .c  o m*/
    }
    // we can handle the following scenarios:
    //   EQ READ_FIELD *
    //   EQ * READ_FIELD
    //   IN READ_FIELD *
    //   ZIP_MATCH
    //      (appropriate combinations of AND, EQ, and IN are transformed in the ZipMatchOperatorTransform)
    // we'll transform the input filter into
    //   candidate column ZipMatchOperators
    //   everything-else
    // logically these are ANDed together, but only the candidate column filters can be used for indexes
    // we'll match the most precise combination of columns for available indexes, and move any remaining columns to everything-else

    Map<String, OperatorNode<ExpressionOperator>> columns = Maps.newHashMap();
    List<OperatorNode<ExpressionOperator>> others = Lists.newArrayList();
    if (filter.getOperator() == ExpressionOperator.AND) {
        List<OperatorNode<ExpressionOperator>> clauses = filter.getArgument(0);
        for (OperatorNode<ExpressionOperator> clause : clauses) {
            processFilterClause(columns, others, clause);
        }
    } else {
        processFilterClause(columns, others, filter);
    }

    IndexKey index = matchIndex(Sets.union(availableJoinColumns, columns.keySet()), exact);
    if (index == null) {
        iq.scan = true;
        return;
    }

    // splendid, we matched an index -- rearrange the columns / others according to the matched index
    List<String> indexColumns = Lists.newArrayList(index.columnOrder);
    List<OperatorNode<ExpressionOperator>> unmatched = Lists.newArrayList();
    Iterator<Map.Entry<String, OperatorNode<ExpressionOperator>>> cols = columns.entrySet().iterator();
    while (cols.hasNext()) {
        Map.Entry<String, OperatorNode<ExpressionOperator>> clause = cols.next();
        if (!indexColumns.contains(clause.getKey())) {
            unmatched.add(clause.getValue());
            cols.remove();
        } else {
            indexColumns.remove(clause.getKey());
        }
    }
    if (!unmatched.isEmpty()) {
        others.add(OperatorNode.create(ExpressionOperator.AND, unmatched));
    }

    // add the entry to our query strategy
    IndexStrategy indexStrategy = new IndexStrategy(index, indexes.get(index));
    if (others.isEmpty()) {
        indexStrategy.filter = null;
    } else if (others.size() == 1) {
        indexStrategy.filter = others.get(0);
    } else {
        indexStrategy.filter = OperatorNode.create(ExpressionOperator.AND, others);
    }
    indexStrategy.indexFilter = columns;
    if (!indexColumns.isEmpty()) {
        indexStrategy.joinColumns = indexColumns;
    }
    iq.add(indexStrategy);
}

From source file:com.techshroom.wood.module.ModuleDependencySolver.java

ImmutableList<Module> computeDependencyOrder() {
    // Fast-track no module case
    if (this.moduleMap.isEmpty()) {
        return ImmutableList.of();
    }/*from   w  ww . java 2  s .co m*/
    // If a node goes from A->B, A must be loaded AFTER B
    MutableGraph<ModuleMetadata> depGraph = GraphBuilder.directed().allowsSelfLoops(false)
            .expectedNodeCount(this.moduleMap.size()).build();
    // Insert all nodes before connecting
    this.moduleMap.values().stream().map(Module::getMetadata).forEach(depGraph::addNode);
    for (Module factory : this.moduleMap.values()) {
        ModuleMetadata data = factory.getMetadata();
        data.getLoadAfterModules().forEach(dep -> {
            Range<SemVer> acceptable = dep.getVersionRange();
            // Here, we must load data after meta, put data->meta
            depGraph.nodes().stream().filter(meta -> acceptable.contains(meta.getVersion())).findAny()
                    .ifPresent(meta -> {
                        // Do a check for existing edges going the other
                        // way
                        if (depGraph.edges().contains(EndpointPair.ordered(meta, data))) {
                            throw new IllegalStateException("Cannot have a two-way dependency. Found between "
                                    + Modules.getBasicRepresentation(data) + " and "
                                    + Modules.getBasicRepresentation(meta));
                        }
                        depGraph.putEdge(data, meta);
                    });
        });
        data.getLoadBeforeModules().forEach(dep -> {
            Range<SemVer> acceptable = dep.getVersionRange();
            // Here, we must load data before meta, put meta->data
            depGraph.nodes().stream().filter(meta -> acceptable.contains(meta.getVersion())).findAny()
                    .ifPresent(meta -> {
                        // Do a check for existing edges going the other
                        // way
                        if (depGraph.edges().contains(EndpointPair.ordered(data, meta))) {
                            throw new IllegalStateException("Cannot have a two-way dependency. Found between "
                                    + Modules.getBasicRepresentation(data) + " and "
                                    + Modules.getBasicRepresentation(meta));
                        }
                        depGraph.putEdge(meta, data);
                    });
        });
        data.getRequiredModules().forEach(dep -> {
            Range<SemVer> acceptable = dep.getVersionRange();
            // Here, we must load data after meta, put data->meta
            ModuleMetadata result = depGraph.nodes().stream()
                    .filter(meta -> acceptable.contains(meta.getVersion())).findAny().orElseThrow(() -> {
                        return new IllegalStateException("Missing required dependency " + dep);
                    });
            // Do a check for existing edges going the other
            // way
            if (depGraph.edges().contains(EndpointPair.ordered(result, data))) {
                throw new IllegalStateException("Cannot have a two-way dependency. Found between "
                        + Modules.getBasicRepresentation(data) + " and "
                        + Modules.getBasicRepresentation(result));
            }
            depGraph.putEdge(data, result);
        });
    }
    // Modules in dependency-loading order
    List<ModuleMetadata> dependencyOrder = new LinkedList<>();
    // The outDegree is the number of dependencies
    Set<ModuleMetadata> noDeps = depGraph.nodes().stream().filter(m -> depGraph.outDegree(m) == 0)
            .collect(Collectors.toSet());
    checkState(!noDeps.isEmpty(), "There must be at least one module with no dependencies.");
    // this set tracks encountered modules (i.e. child nodes)
    // that have not been known as satisfied by things in depedencyOrder
    Set<ModuleMetadata> encounteredNotSatisfied = new HashSet<>();
    // this set tracks satisfied modules
    // (but not yet added to dependencyOrder)
    // that have not been processed to find other modules
    Set<ModuleMetadata> satisfiedNotProcessed = new HashSet<>(noDeps);
    // Snapshots the last round hashcode for checks
    int lastDepOrderSize = 0;
    while (!satisfiedNotProcessed.isEmpty() || lastDepOrderSize != Objects.hash(dependencyOrder,
            encounteredNotSatisfied, satisfiedNotProcessed)) {
        lastDepOrderSize = Objects.hash(dependencyOrder, encounteredNotSatisfied, satisfiedNotProcessed);
        // Process satisfied modules
        for (ModuleMetadata node : satisfiedNotProcessed) {
            dependencyOrder.add(node);
            // Load modules that depend on `node`
            // insert them into encountered
            depGraph.predecessors(node).forEach(dependent -> {
                encounteredNotSatisfied.add(dependent);
            });
        }
        // Clear satisfiedNotProcessed, after processing
        satisfiedNotProcessed.clear();
        // Process encountered nodes
        for (ModuleMetadata node : encounteredNotSatisfied) {
            // Calculate the load-after deps that might be satisfiable
            // Basically does a ID check against the available
            // dependencies.
            Set<ModuleDependency> satisfiableLoadAfters = getSatisfiableLoadAfters(depGraph.nodes(),
                    node.getLoadAfterModules());
            Set<ModuleDependency> deps = Sets.union(satisfiableLoadAfters, node.getRequiredModules());
            if (allDependenciesSatisified(dependencyOrder, deps)) {
                satisfiedNotProcessed.add(node);
            }
        }
        // Remove all satisfied
        encounteredNotSatisfied.removeAll(satisfiedNotProcessed);
    }
    if (encounteredNotSatisfied.size() > 0) {
        throw new IllegalStateException("Unsatisfied dependencies: " + encounteredNotSatisfied);
    }
    return FluentIterable.from(dependencyOrder).transform(ModuleMetadata::getId).transform(this.moduleMap::get)
            .toList();
}

From source file:org.eclipse.sirius.business.internal.session.IsModifiedSavingPolicy.java

/**
 * Computes the set of resources to save. This is a safe approximation of
 * the exact sub-set of resource in the scope whose serialization has
 * changed. Saving all the returned resources will produce the same result
 * as saving all the resources in the scope, but in the general case will
 * save much less resources (and thus be faster).
 * <p>//  w ww  .j  a  v a 2 s  . co  m
 * It may save more resources than strictly needed. For example if resource
 * A (not modified) contains references to elements in resource B
 * (modified), but the only references are to elements in B whose URI will
 * not change. In such a case we will save A anyway. More precise analyses
 * would be possible but cost-prohibitive.
 * <p>
 * {@inheritDoc}
 */
@Override
public Collection<Resource> computeResourcesToSave(Set<Resource> scope, Map<?, ?> options,
        IProgressMonitor monitor) {

    final Map<Object, Object> mergedOptions = new HashMap<Object, Object>(getDefaultSaveOptions());
    if (options != null) {
        mergedOptions.putAll(options);
    }

    Set<Resource> saveable = Sets.newLinkedHashSet(Iterables.filter(scope, new Predicate<Resource>() {

        @Override
        public boolean apply(Resource resourcetoSave) {
            return !ResourceSetSync.isReadOnly(resourcetoSave)
                    && !SiriusUtil.isModelerDescriptionFile(resourcetoSave);
        }

    }));

    /* We must save a resource if is has been logically modified ... */

    Set<Resource> logicallyModified = Sets.newLinkedHashSet(Iterables.filter(saveable, isModified));

    /*
     * ... or it references a resource which has been modified (in which
     * case the URIs to the referenced elements in these resource *may*
     * havechanged)...
     */
    Set<Resource> dependOnLogicallyModified = Sets.newLinkedHashSet();
    if (logicallyModified.size() > 0) {
        Iterables.addAll(dependOnLogicallyModified, Iterables
                .filter(Sets.difference(saveable, logicallyModified), new ResourceHasReferenceTo(isModified)));
    }

    Predicate<Resource> exists = new Predicate<Resource>() {
        private URIConverter defaultConverter;

        @Override
        public boolean apply(Resource resourcetoSave) {
            ResourceSet rs = resourcetoSave.getResourceSet();
            URIConverter uriConverter = rs == null ? getDefaultURIConverter() : rs.getURIConverter();
            return uriConverter.exists(resourcetoSave.getURI(), mergedOptions);
        }

        private URIConverter getDefaultURIConverter() {
            if (defaultConverter == null) {
                defaultConverter = new ResourceSetImpl().getURIConverter();
            }
            return defaultConverter;
        }
    };
    Set<Resource> underlyingFileDoesNotExist = Sets
            .newLinkedHashSet(Iterables.filter(saveable, Predicates.not(exists)));
    Set<Resource> isConflictingOrDeleted = Sets
            .newLinkedHashSet(Iterables.filter(saveable, underlyingFileIsDeletedOrConflicting));
    /*
     * or the underlying file is out of date and must be recreated/updated
     * to match the version in memory.
     */
    Set<Resource> toSave = Sets.newLinkedHashSet();
    for (Resource resource : Sets.union(logicallyModified, dependOnLogicallyModified)) {
        if (hasDifferentSerialization(resource, mergedOptions)) {
            toSave.add(resource);
        } else {
            ResourceMigrationMarker.clearMigrationMarker(resource);

        }
    }

    Iterables.addAll(toSave, Sets.union(underlyingFileDoesNotExist, isConflictingOrDeleted));
    /*
     * if we have something to save which has no different serialization
     * then something is fishy...
     */
    return toSave;
}

From source file:org.eclipse.sirius.common.tools.api.ecore.WorkspaceEPackageRegistry.java

/**
 * {@inheritDoc}//ww  w  .  j a  va 2 s .  co  m
 */
@Override
public Collection<Object> values() {
    return Sets.union(Sets.newLinkedHashSet(super.values()), Sets.newLinkedHashSet(delegated.values()));
}

From source file:org.diqube.executionenv.DefaultExecutionEnvironment.java

@Override
protected Map<String, QueryableColumnShard> delegateGetAllNonTemporaryColumnShards() {
    Map<String, QueryableColumnShard> res = new HashMap<>();

    Set<String> allColNames = new HashSet<>();

    if (tableShard != null) {
        allColNames.addAll(Sets.union(
                Sets.union(tableShard.getDoubleColumns().keySet(), tableShard.getLongColumns().keySet()),
                tableShard.getStringColumns().keySet()));
    }/*from   ww  w  .  ja v a  2  s. c  om*/

    for (String colName : allColNames) {
        QueryableColumnShard colShard = getColumnShard(colName);
        res.put(colName, colShard);
    }

    return res;
}