Example usage for com.google.common.collect Sets union

List of usage examples for com.google.common.collect Sets union

Introduction

In this page you can find the example usage for com.google.common.collect Sets union.

Prototype

public static <E> SetView<E> union(final Set<? extends E> set1, final Set<? extends E> set2) 

Source Link

Document

Returns an unmodifiable view of the union of two sets.

Usage

From source file:com.google.security.zynamics.binnavi.debug.models.breakpoints.BreakpointManager.java

/**
 * This function enforces the type hierarchy of breakpoints.
 *
 * @param addresses The set of addresses for the breakpoints to be added.
 * @param type The type of the breakpoints to be added.
 *
 * @return The Set of breakpoints which has been set.
 *//*from  w  w  w.j  a va2s .  c  o m*/
private Set<BreakpointAddress> enforceBreakpointHierarchy(final Set<BreakpointAddress> addresses,
        final BreakpointType type) {
    final SetView<BreakpointAddress> alreadyRegularBreakpoints = Sets.intersection(addresses,
            indexedBreakpointStorage.getBreakPointAddresses());
    final SetView<BreakpointAddress> alreadySteppingBreakpoints = Sets.intersection(addresses,
            stepBreakpointStorage.getBreakPointAddresses());
    final SetView<BreakpointAddress> alreadyEchoBreakpoints = Sets.intersection(addresses,
            echoBreakpointStorage.getBreakPointAddresses());

    Set<BreakpointAddress> addressesSet = null;

    switch (type) {
    case REGULAR:
        final SetView<BreakpointAddress> notInRegularBreakpoints = Sets.difference(addresses,
                indexedBreakpointStorage.getBreakPointAddresses());
        removeBreakpoints(alreadySteppingBreakpoints, stepBreakpointStorage);
        removeBreakpoints(alreadyEchoBreakpoints, echoBreakpointStorage);
        addressesSet = notInRegularBreakpoints;
        break;

    case STEP:
        final SetView<BreakpointAddress> notInSteppingBreakpoints = Sets.difference(addresses,
                stepBreakpointStorage.getBreakPointAddresses());
        removeBreakpoints(alreadyEchoBreakpoints, echoBreakpointStorage);
        addressesSet = Sets.difference(notInSteppingBreakpoints, alreadyRegularBreakpoints);
        break;

    case ECHO:
        final SetView<BreakpointAddress> notInEchoBreakPoints = Sets.difference(addresses,
                echoBreakpointStorage.getBreakPointAddresses());
        addressesSet = Sets.difference(notInEchoBreakPoints,
                Sets.union(alreadySteppingBreakpoints, alreadyRegularBreakpoints));
        break;
    default:
        throw new IllegalStateException("IE00722: Breakpoint of invalid type");

    }
    return addressesSet;
}

From source file:ai.grakn.graql.internal.reasoner.rule.InferenceRule.java

/**
 * @param parentAtom atom containing constraints (parent)
 * @param unifier unifier unifying parent with the rule
 * @return rule with propagated constraints from parent
 *//*from   w  ww .ja  v  a2s. c om*/
public InferenceRule propagateConstraints(Atom parentAtom, Unifier unifier) {
    if (!parentAtom.isRelation() && !parentAtom.isResource())
        return this;

    //only transfer value predicates if head has a user specified value variable
    Atom headAtom = head.getAtom();
    if (headAtom.isResource() && ((Resource) headAtom).getMultiPredicate().isEmpty()) {
        Set<ValuePredicate> valuePredicates = parentAtom.getValuePredicates().stream()
                .flatMap(vp -> vp.unify(unifier).stream()).collect(toSet());
        head.addAtomConstraints(valuePredicates);
        body.addAtomConstraints(valuePredicates);
    }

    Set<TypeAtom> unifiedTypes = parentAtom.getTypeConstraints().stream()
            .flatMap(type -> type.unify(unifier).stream()).collect(toSet());

    //set rule body types to sub types of combined query+rule types
    Set<TypeAtom> ruleTypes = body.getTypeConstraints().stream().filter(t -> !t.isRelation()).collect(toSet());
    Set<TypeAtom> allTypes = Sets.union(unifiedTypes, ruleTypes);
    Set<TypeAtom> types = allTypes.stream().filter(ta -> {
        Type type = ta.getType();
        Type subType = allTypes.stream().map(Atom::getType).filter(Objects::nonNull)
                .filter(t -> ReasonerUtils.getSuperTypes(t).contains(type)).findFirst().orElse(null);
        return type == null || subType == null;
    }).collect(toSet());

    ruleTypes.forEach(body::removeAtomic);
    body.addAtomConstraints(types);

    return this;
}

From source file:org.eclipse.sw360.moderation.db.ModerationRequestGenerator.java

protected void dealWithCustomMap(U field) {
    Map<String, Set<String>> updateDocumentMap = CommonUtils
            .nullToEmptyMap((Map<String, Set<String>>) updateDocument.getFieldValue(field));
    Map<String, Set<String>> actualDocumentMap = CommonUtils
            .nullToEmptyMap((Map<String, Set<String>>) actualDocument.getFieldValue(field));
    if (updateDocumentMap.equals(actualDocumentMap)) {
        return;// w  ww  .  j  a v a 2  s.c om
    }

    Map<String, Set<String>> addMap = new HashMap<>();
    Map<String, Set<String>> deleteMap = new HashMap<>();
    for (String key : Sets.union(actualDocumentMap.keySet(), updateDocumentMap.keySet())) {
        Set<String> actualStrings = actualDocumentMap.get(key);
        Set<String> updateStrings = updateDocumentMap.get(key);
        Set<String> addedStrings = getAddedStrings(actualStrings, updateStrings);
        Set<String> deletedStrings = getDeletedStrings(actualStrings, updateStrings);
        if (!addedStrings.isEmpty()) {
            addMap.put(key, addedStrings);
        }
        if (!deletedStrings.isEmpty()) {
            deleteMap.put(key, deletedStrings);
        }
    }

    documentAdditions.setFieldValue(field, addMap);
    documentDeletions.setFieldValue(field, deleteMap);
}

From source file:org.fenixedu.ulisboa.specifications.ui.evaluation.managelooseevaluation.LooseEvaluationController.java

@RequestMapping(value = _CREATE_URI + "{scpId}/{executionSemesterId}", method = RequestMethod.GET)
public String create(@PathVariable("scpId") final StudentCurricularPlan studentCurricularPlan,
        @PathVariable("executionSemesterId") final ExecutionSemester executionSemester, final Model model) {

    model.addAttribute("studentCurricularPlan", studentCurricularPlan);
    model.addAttribute("LooseEvaluationBean_enrolment_options",
            studentCurricularPlan.getEnrolmentsSet().stream()
                    .filter(e -> e.getExecutionPeriod() == executionSemester)
                    .sorted(CurriculumLineServices.COMPARATOR).collect(Collectors.toList()));

    final boolean possibleOldData = executionSemester.getExecutionYear().getEndCivilYear() < 2016;
    final Stream<EvaluationSeason> evaluationSeasons = possibleOldData ? EvaluationSeasonServices.findAll()
            : EvaluationSeasonServices.findByActive(true);
    model.addAttribute("typeValues", evaluationSeasons.sorted(EvaluationSeasonServices.SEASON_ORDER_COMPARATOR)
            .collect(Collectors.toList()));

    model.addAttribute("gradeScaleValues",
            Arrays.<GradeScale>asList(GradeScale.values()).stream().map(
                    l -> new TupleDataSourceBean(((GradeScale) l).name(), ((GradeScale) l).getDescription()))
                    .collect(Collectors.<TupleDataSourceBean>toList()));

    model.addAttribute("improvementSemesterValues",
            ExecutionSemester.readNotClosedPublicExecutionPeriods().stream()
                    .sorted(ExecutionSemester.COMPARATOR_BY_BEGIN_DATE.reversed())
                    .collect(Collectors.toList()));

    model.addAttribute("executionSemester", executionSemester);

    final String url = String.format(
            "/academicAdministration/studentEnrolmentsExtended.do?scpID=%s&executionSemesterID=%s&method=prepare",
            studentCurricularPlan.getExternalId(), executionSemester.getExternalId());

    final String backUrl = GenericChecksumRewriter.injectChecksumInUrl(request.getContextPath(), url, session);
    model.addAttribute("backUrl", backUrl);

    final List<EnrolmentEvaluation> evaluations = studentCurricularPlan.getEnrolmentsSet().stream()
            .filter(e -> e.getExecutionPeriod() == executionSemester).map(l -> l.getEvaluationsSet())
            .reduce((a, c) -> Sets.union(a, c)).orElse(Sets.newHashSet()).stream()
            .filter(l -> l.getMarkSheet() == null && l.getCompetenceCourseMarkSheet() == null
                    && l.getGrade() != null && !l.getGrade().isEmpty())
            .sorted((x, y) -> CurriculumLineServices.COMPARATOR.compare(x.getEnrolment(), y.getEnrolment()))
            .collect(Collectors.toList());

    model.addAttribute("evaluationsSet", evaluations);

    return jspPage("create");
}

From source file:org.onosproject.store.primitives.impl.DatabaseManager.java

@Activate
public void activate() {
    localNodeId = clusterService.getLocalNode().id();

    Map<PartitionId, Set<NodeId>> partitionMap = Maps.newHashMap();
    clusterMetadataService.getClusterMetadata().getPartitions().forEach(p -> {
        partitionMap.put(p.getId(), Sets.newHashSet(p.getMembers()));
    });/*ww  w .  j av a  2s  .co  m*/

    String[] activeNodeUris = partitionMap.values().stream().reduce((s1, s2) -> Sets.union(s1, s2)).get()
            .stream().map(this::nodeIdToUri).toArray(String[]::new);

    String localNodeUri = nodeIdToUri(clusterMetadataService.getLocalNode().id());
    Protocol protocol = new CopycatCommunicationProtocol(clusterService, clusterCommunicator);

    ClusterConfig clusterConfig = new ClusterConfig().withProtocol(protocol)
            .withElectionTimeout(electionTimeoutMillis(activeNodeUris))
            .withHeartbeatInterval(heartbeatTimeoutMillis(activeNodeUris)).withMembers(activeNodeUris)
            .withLocalMember(localNodeUri);

    CopycatConfig copycatConfig = new CopycatConfig().withName("onos").withClusterConfig(clusterConfig)
            .withDefaultSerializer(new DatabaseSerializer()).withDefaultExecutor(
                    Executors.newSingleThreadExecutor(new NamedThreadFactory("copycat-coordinator-%d")));

    coordinator = new DefaultClusterCoordinator(copycatConfig.resolve());

    Function<PartitionId, Log> logFunction = id -> id.asInt() == 0 ? newInMemoryLog() : newPersistentLog();

    Map<PartitionId, Database> databases = Maps.transformEntries(partitionMap, (k, v) -> {
        String[] replicas = v.stream().map(this::nodeIdToUri).toArray(String[]::new);
        DatabaseConfig config = newDatabaseConfig(String.format("p%s", k), logFunction.apply(k), replicas);
        return coordinator.<Database>getResource(config.getName(),
                config.resolve(clusterConfig).withSerializer(copycatConfig.getDefaultSerializer())
                        .withDefaultExecutor(copycatConfig.getDefaultExecutor()));
    });

    inMemoryDatabase = databases.remove(PartitionId.from(0));

    partitionedDatabase = new PartitionedDatabase("onos-store", databases.values());

    CompletableFuture<Void> status = coordinator.open().thenCompose(v -> CompletableFuture
            .allOf(inMemoryDatabase.open(), partitionedDatabase.open()).whenComplete((db, error) -> {
                if (error != null) {
                    log.error("Failed to initialize database.", error);
                } else {
                    log.info("Successfully initialized database.");
                }
            }));

    Futures.getUnchecked(status);

    AsyncConsistentMap<TransactionId, Transaction> transactions = this
            .<TransactionId, Transaction>consistentMapBuilder().withName("onos-transactions")
            .withSerializer(Serializer.using(KryoNamespaces.API, MapUpdate.class, MapUpdate.Type.class,
                    Transaction.class, Transaction.State.class))
            .buildAsyncMap();

    transactionManager = new TransactionManager(partitionedDatabase, transactions);
    partitionedDatabase.setTransactionManager(transactionManager);

    log.info("Started");
}

From source file:org.sonar.server.computation.issue.IssueComputation.java

private void copyRuleTags(DefaultIssue issue) {
    RuleDto rule = ruleCache.get(issue.ruleKey());
    issue.setTags(Sets.union(rule.getTags(), rule.getSystemTags()));
}

From source file:org.onosproject.store.consistent.impl.DatabaseManager.java

@Activate
public void activate() {
    localNodeId = clusterService.getLocalNode().id();
    // load database configuration
    File databaseDefFile = new File(PARTITION_DEFINITION_FILE);
    log.info("Loading database definition: {}", databaseDefFile.getAbsolutePath());

    Map<String, Set<NodeInfo>> partitionMap;
    try {//from w  w w. j av a 2 s. c  o  m
        DatabaseDefinitionStore databaseDefStore = new DatabaseDefinitionStore(databaseDefFile);
        if (!databaseDefFile.exists()) {
            createDefaultDatabaseDefinition(databaseDefStore);
        }
        partitionMap = databaseDefStore.read().getPartitions();
    } catch (IOException e) {
        throw new IllegalStateException("Failed to load database config", e);
    }

    String[] activeNodeUris = partitionMap.values().stream().reduce((s1, s2) -> Sets.union(s1, s2)).get()
            .stream().map(this::nodeToUri).toArray(String[]::new);

    String localNodeUri = nodeToUri(NodeInfo.of(clusterService.getLocalNode()));
    Protocol protocol = new CopycatCommunicationProtocol(clusterService, clusterCommunicator);

    ClusterConfig clusterConfig = new ClusterConfig().withProtocol(protocol)
            .withElectionTimeout(electionTimeoutMillis(activeNodeUris))
            .withHeartbeatInterval(heartbeatTimeoutMillis(activeNodeUris)).withMembers(activeNodeUris)
            .withLocalMember(localNodeUri);

    CopycatConfig copycatConfig = new CopycatConfig().withName("onos").withClusterConfig(clusterConfig)
            .withDefaultSerializer(new DatabaseSerializer()).withDefaultExecutor(
                    Executors.newSingleThreadExecutor(new NamedThreadFactory("copycat-coordinator-%d")));

    coordinator = new DefaultClusterCoordinator(copycatConfig.resolve());

    DatabaseConfig inMemoryDatabaseConfig = newDatabaseConfig(BASE_PARTITION_NAME, newInMemoryLog(),
            activeNodeUris);
    inMemoryDatabase = coordinator.getResource(inMemoryDatabaseConfig.getName(),
            inMemoryDatabaseConfig.resolve(clusterConfig).withSerializer(copycatConfig.getDefaultSerializer())
                    .withDefaultExecutor(copycatConfig.getDefaultExecutor()));

    List<Database> partitions = partitionMap.entrySet().stream().map(entry -> {
        String[] replicas = entry.getValue().stream().map(this::nodeToUri).toArray(String[]::new);
        return newDatabaseConfig(entry.getKey(), newPersistentLog(), replicas);
    }).map(config -> {
        Database db = coordinator.getResource(config.getName(),
                config.resolve(clusterConfig).withSerializer(copycatConfig.getDefaultSerializer())
                        .withDefaultExecutor(copycatConfig.getDefaultExecutor()));
        return db;
    }).collect(Collectors.toList());

    partitionedDatabase = new PartitionedDatabase("onos-store", partitions);

    CompletableFuture<Void> status = coordinator.open().thenCompose(v -> CompletableFuture
            .allOf(inMemoryDatabase.open(), partitionedDatabase.open()).whenComplete((db, error) -> {
                if (error != null) {
                    log.error("Failed to initialize database.", error);
                } else {
                    log.info("Successfully initialized database.");
                }
            }));

    Futures.getUnchecked(status);

    transactionManager = new TransactionManager(partitionedDatabase, consistentMapBuilder());
    partitionedDatabase.setTransactionManager(transactionManager);

    log.info("Started");
}

From source file:org.apache.aurora.scheduler.scheduling.TaskSchedulerImpl.java

private Set<String> scheduleTasks(MutableStoreProvider store, Set<String> ids) {
    LOG.debug("Attempting to schedule tasks {}", ids);
    Map<String, IAssignedTask> tasksById = fetchTasks(store, ids);

    if (tasksById.isEmpty()) {
        // None of the tasks were found in storage.  This could be caused by a task group that was
        // killed by the user, for example.
        return ids;
    }// w ww.  j  a  v  a  2  s. co m

    // Prepare scheduling context for the tasks
    ITaskConfig task = Iterables.getOnlyElement(
            tasksById.values().stream().map(IAssignedTask::getTask).collect(Collectors.toSet()));
    AttributeAggregate aggregate = AttributeAggregate.getJobActiveState(store, task.getJob());

    // Attempt to schedule using available resources.
    Set<String> launched = assigner.maybeAssign(store,
            ResourceRequest.fromTask(task, executorSettings, aggregate, tierManager), TaskGroupKey.from(task),
            ImmutableSet.copyOf(tasksById.values()), reservations.asMap());

    attemptsFired.addAndGet(tasksById.size());

    // Fall back to preemption for tasks not scheduled above.
    Set<String> unassigned = Sets.difference(tasksById.keySet(), launched);
    unassigned.forEach(taskId -> {
        // TODO(maxim): Now that preemption slots are searched asynchronously, consider
        // retrying a launch attempt within the current scheduling round IFF a reservation is
        // available.
        maybePreemptFor(tasksById.get(taskId), aggregate, store);
    });
    attemptsNoMatch.addAndGet(unassigned.size());

    // Return all successfully launched tasks as well as those weren't tried (not in PENDING).
    return Sets.union(launched, Sets.difference(ids, tasksById.keySet()));
}

From source file:com.google.gerrit.lucene.LuceneAccountIndex.java

private Set<String> fields(QueryOptions opts) {
    Set<String> fs = opts.fields();
    return fs.contains(ID.getName()) ? fs : Sets.union(fs, ImmutableSet.of(ID.getName()));
}

From source file:prm4j.indexing.model.ParametricPropertyModel.java

/**
 * Returns the set of parameter sets X, where only instances i with Dom(i) = X can have monitors.
 * //from  w w w .  j  a v  a  2 s  .  com
 * @return all instance types which can carry monitors
 */
public Set<Set<Parameter<?>>> getMonitorInstanceTypes() {
    final Set<Set<Parameter<?>>> result = new HashSet<Set<Parameter<?>>>();
    for (BaseEvent baseEvent : getParametricProperty().getSpec().getBaseEvents()) {
        for (Set<Parameter<?>> enableParameterSet : getParametricProperty().getEnableParameterSets()
                .get(baseEvent)) {
            result.add(Sets.union(baseEvent.getParameters(), enableParameterSet));
        }
    }
    for (BaseEvent creationEvent : getParametricProperty().getCreationEvents()) {
        result.add(creationEvent.getParameters());
    }
    return result;
}