Example usage for java.util Set addAll

List of usage examples for java.util Set addAll

Introduction

In this page you can find the example usage for java.util Set addAll.

Prototype

boolean addAll(Collection<? extends E> c);

Source Link

Document

Adds all of the elements in the specified collection to this set if they're not already present (optional operation).

Usage

From source file:fr.inria.oak.paxquery.algebra.optimizer.rules.PushProjections.java

private static Set<ProjectColumn> getOperatorColumns(BaseLogicalOperator op,
        Set<ProjectColumn> columnsRequiredAbove, Set<ProjectColumn> columnsRequiredBelow) {
    final Set<ProjectColumn> outputColumns = Sets.newTreeSet();

    if (op instanceof XMLConstruct) {
        outputColumns.addAll(columnsRequiredBelow);
    } else if (op instanceof XMLTreeConstruct) {
        outputColumns.addAll(columnsRequiredBelow);
    } else if (op instanceof LeftOuterNestedJoin) {
        // Nested outer join
        LeftOuterNestedJoin lonj = (LeftOuterNestedJoin) op;
        final boolean aggregate = lonj instanceof LeftOuterNestedJoinWithAggregation;
        final int aggregationCol = aggregate
                ? ((LeftOuterNestedJoinWithAggregation) lonj).getAggregationColumn()
                : -1;/*  ww w.ja  v  a2  s  .  c  om*/
        Set<Integer> cols = PushdownUtility.getPredicateColumns(lonj.getPred());
        final int numberColsLeftInput = lonj.getLeft().getNRSMD().getColNo();
        final int nestedColumnPos = aggregate ? lonj.getNRSMD().getColNo() - 2 : lonj.getNRSMD().getColNo() - 1;
        final ProjectColumn nestedColumn = new ProjectColumn(nestedColumnPos);
        for (ProjectColumn column : columnsRequiredAbove) {
            if (column.pos == nestedColumnPos) {
                for (ProjectColumn nested : column.nestedColumns) {
                    nestedColumn.nestedColumns.add(nested.copy());
                }
            } else {
                outputColumns.add(column.copy());
            }
        }
        for (ProjectColumn column : columnsRequiredBelow) {
            if (column.pos < numberColsLeftInput) {
                outputColumns.add(column.copy());
            } else if (!cols.contains(column.pos) && aggregate ? column.pos != aggregationCol : true) {
                nestedColumn.nestedColumns.add(column.copy(column.pos - numberColsLeftInput));
            }
        }
        outputColumns.add(nestedColumn);
        if (aggregate) {
            outputColumns.add(new ProjectColumn(lonj.getNRSMD().getColNo() - 1));
        }
    } else if (op instanceof BaseJoinOperator) {
        outputColumns.addAll(columnsRequiredBelow);
    } else if (op instanceof CartesianProduct) {
        outputColumns.addAll(columnsRequiredBelow);
    } else if (op instanceof GroupBy) {
        // Grouping with aggregation
        GroupBy gb = (GroupBy) op;
        final boolean aggregate = gb instanceof GroupByWithAggregation;
        final int nestedColumnPos = aggregate ? gb.getNRSMD().getColNo() - 2 : gb.getNRSMD().getColNo() - 1;
        final ProjectColumn nestedColumn = new ProjectColumn(nestedColumnPos);
        for (ProjectColumn column : columnsRequiredAbove) {
            if (column.pos == nestedColumnPos) {
                for (ProjectColumn nested : column.nestedColumns) {
                    nestedColumn.nestedColumns.add(nested.copy());
                }
            } else {
                outputColumns.add(column.copy());
            }
        }
        outputColumns.add(nestedColumn);
        for (int pos : gb.getGroupByColumns()) {
            outputColumns.add(new ProjectColumn(pos));
        }
        for (int pos : gb.getReduceByColumns()) {
            outputColumns.add(new ProjectColumn(pos));
        }
        if (aggregate) {
            outputColumns.add(new ProjectColumn(gb.getNRSMD().getColNo() - 1));
        }
    } else if (op instanceof Aggregation) {
        // Aggregation
        Aggregation agg = (Aggregation) op;
        outputColumns.addAll(columnsRequiredBelow);
        if (agg.getAggregationPath().length != 1) {
            outputColumns.add(new ProjectColumn(agg.getNRSMD().getColNo() - 1));
        }
    } else if (op instanceof Selection || op instanceof DuplicateElimination) {
        // Selection
        outputColumns.addAll(columnsRequiredBelow);
    } else if (op instanceof Navigation) {
        // Navigation
        Navigation pnop = (Navigation) op;
        outputColumns.addAll(getRequiredInputColumns(pnop, columnsRequiredAbove));
        outputColumns.addAll(generateColumnSet(op.getNRSMD(), pnop.getChild().getNRSMD().getColNo()));
    } else if (op instanceof XMLScan) {
        // XMLScan
        outputColumns.addAll(generateFullColumnSet(op.getNRSMD()));
    }

    return outputColumns;
}

From source file:org.opendatakit.security.server.SecurityServiceUtil.java

/**
 * Given a collection of users, ensure that each user is a registered user (creating a registered
 * user if one doesn't exist).//from   w  w w . j  a v  a  2 s.c  om
 * </p>
 * <p>
 * The collection is assumed to be exhaustive. Users not in the list will be deleted.
 * </p>
 *
 * @param users
 * @param cc
 * @return map of users to their Uri strings
 * @throws DatastoreFailureException
 * @throws AccessDeniedException
 */
private static Map<UserSecurityInfo, String> setUsers(ArrayList<UserSecurityInfo> users, CallingContext cc)
        throws DatastoreFailureException, AccessDeniedException {
    List<UserSecurityInfo> allUsersList = getAllUsers(false, cc);

    Set<UserSecurityInfo> removedUsers = new TreeSet<UserSecurityInfo>();
    removedUsers.addAll(allUsersList);
    removedUsers.removeAll(users);

    Datastore ds = cc.getDatastore();
    User user = cc.getCurrentUser();

    Map<UserSecurityInfo, String> pkMap = new HashMap<UserSecurityInfo, String>();
    try {
        // mark absent users as removed...
        for (UserSecurityInfo u : removedUsers) {
            if (u.getType() != UserType.REGISTERED)
                continue;
            RegisteredUsersTable t;
            if (u.getUsername() == null) {
                t = RegisteredUsersTable.getUniqueUserByEmail(u.getEmail(), ds, user);
            } else {
                t = RegisteredUsersTable.getUniqueUserByUsername(u.getUsername(), ds, user);
            }
            if (t != null) {
                t.setIsRemoved(true);
                ds.putEntity(t, user);
            }
        }
        // go through all other users. Assert that they exist.
        // This will update the fields to match those specified.
        for (UserSecurityInfo u : users) {
            if (u.getType() != UserType.REGISTERED)
                continue;
            RegisteredUsersTable t = RegisteredUsersTable.assertActiveUserByUserSecurityInfo(u, cc);
            pkMap.put(u, t.getUri());
        }
    } catch (ODKDatastoreException e) {
        e.printStackTrace();
        throw new DatastoreFailureException("Incomplete security update", e);
    }
    return pkMap;
}

From source file:fll.scheduler.TableOptimizer.java

/**
 * Walk over the schedule and figure out how tables are grouped. If the
 * schedule uses alternating tables the returned list will have more than 1
 * element./*from w w w.  jav  a 2s .  c  om*/
 */
private static List<List<String>> determineTableGroups(final TournamentSchedule schedule) {
    final Map<LocalTime, Set<String>> tablesAtTime = gatherTablesAtTime(schedule);

    final List<Set<String>> tableGroups = new ArrayList<>();
    for (final Map.Entry<LocalTime, Set<String>> entry : tablesAtTime.entrySet()) {
        final Set<String> toFind = entry.getValue();

        boolean found = false;
        for (int i = 0; i < tableGroups.size() && !found; ++i) {
            final Set<String> group = tableGroups.get(i);
            if (containsAny(group, toFind)) {
                group.addAll(toFind);
                found = true;
            }
        } // foreach known table group

        if (!found) {
            // create new grouping
            tableGroups.add(toFind);
        }
    } // foreach group of tables in the schedule

    // consolidate the existing groups
    final List<List<String>> finalGroups = new ArrayList<>();
    if (tableGroups.size() > 1) {
        final List<String> firstGroup = new ArrayList<>(tableGroups.remove(0));
        finalGroups.add(firstGroup);

        while (!tableGroups.isEmpty()) {
            final List<String> toFind = new ArrayList<>(tableGroups.remove(0));

            boolean found = false;
            for (int i = 0; i < finalGroups.size() && !found; ++i) {
                final List<String> group = finalGroups.get(i);
                if (containsAny(group, toFind)) {
                    group.addAll(toFind);
                    found = true;
                }
            } // foreach known table group

            if (!found) {
                // create new grouping
                finalGroups.add(toFind);
            }
        }

    } else {
        for (final Set<String> group : tableGroups) {
            finalGroups.add(new ArrayList<String>(group));
        }
    }
    return finalGroups;

}

From source file:com.espertech.esper.epl.core.ResultSetProcessorFactoryFactory.java

private static Set<Pair<Integer, String>> getGroupByProperties(ExprNode[] groupByNodes)
        throws ExprValidationException {
    // Get the set of properties refered to by all group-by expression nodes.
    Set<Pair<Integer, String>> propertiesGroupBy = new HashSet<Pair<Integer, String>>();

    for (ExprNode groupByNode : groupByNodes) {
        ExprNodeIdentifierVisitor visitor = new ExprNodeIdentifierVisitor(true);
        groupByNode.accept(visitor);//  w  ww.j a  v  a 2 s .  c o m
        List<Pair<Integer, String>> propertiesNode = visitor.getExprProperties();
        propertiesGroupBy.addAll(propertiesNode);

        // For each group-by expression node, require at least one property.
        if (propertiesNode.isEmpty()) {
            throw new ExprValidationException("Group-by expressions must refer to property names");
        }
    }

    return propertiesGroupBy;
}

From source file:com.streamsets.pipeline.lib.util.ProtobufTypeUtil.java

private static Set<Descriptors.FileDescriptor> getDependencies(
        Map<String, Set<Descriptors.FileDescriptor>> dependenciesMap,
        Map<String, Descriptors.FileDescriptor> fileDescriptorMap, DescriptorProtos.FileDescriptorProto file,
        DescriptorProtos.FileDescriptorSet set) throws StageException {
    Set<Descriptors.FileDescriptor> result = new LinkedHashSet<>();
    for (String name : file.getDependencyList()) {
        DescriptorProtos.FileDescriptorProto fileDescriptorProto = null;
        for (DescriptorProtos.FileDescriptorProto fdp : set.getFileList()) {
            if (name.equals(fdp.getName())) {
                fileDescriptorProto = fdp;
                break;
            }/*  w w  w .  j  a v  a 2 s  . com*/
        }
        if (fileDescriptorProto == null) {
            // could not find the message type from all the proto files contained in the descriptor file
            throw new StageException(Errors.PROTOBUF_01, file.getName());
        }
        Descriptors.FileDescriptor fileDescriptor;
        if (fileDescriptorMap.containsKey(fileDescriptorProto.getName())) {
            fileDescriptor = fileDescriptorMap.get(fileDescriptorProto.getName());
        } else {
            Set<Descriptors.FileDescriptor> deps = new LinkedHashSet<>();
            if (dependenciesMap.containsKey(name)) {
                deps.addAll(dependenciesMap.get(name));
            } else {
                deps.addAll(getDependencies(dependenciesMap, fileDescriptorMap, fileDescriptorProto, set));
            }
            try {
                fileDescriptor = Descriptors.FileDescriptor.buildFrom(fileDescriptorProto,
                        deps.toArray(new Descriptors.FileDescriptor[deps.size()]));
            } catch (Descriptors.DescriptorValidationException e) {
                throw new StageException(Errors.PROTOBUF_07, e.getDescription(), e);
            }
        }
        result.add(fileDescriptor);
    }
    return result;
}

From source file:io.seldon.importer.articles.AttributesImporterUtils.java

public static Set<String> getTags(Document articleDoc, String tagsCssSelector, String title) {
    Set<String> tagSet = new HashSet<String>();

    if (StringUtils.isNotBlank(tagsCssSelector)) {
        Elements tagsElements = articleDoc.select(tagsCssSelector);
        Element tagsElement = tagsElements.first();
        List<String> tagsParts;
        if ((tagsElement != null) && (tagsElement.attr("content") != null)
                && (StringUtils.isNotBlank(tagsElement.attr("content")))) {
            tagsParts = AttributesImporterUtils.getTagsPartsFromSingleElement(tagsElement);
        } else {/*  w w  w  . ja v a 2 s . co m*/
            tagsParts = AttributesImporterUtils.getTagsPartsFromMultipleElement(tagsElements);

        }
        List<String> extraTagsParts = AttributesImporterUtils.createExtraTagsPartsFromTitle(title, tagsParts);
        tagSet.addAll(tagsParts);
        tagSet.addAll(extraTagsParts);
    }

    return tagSet;
}

From source file:com.alibaba.jstorm.daemon.worker.Worker.java

/**
 * get current task's output task list/*from  w  ww .ja  v a  2  s. c  om*/
 */
public static Set<Integer> worker_output_tasks(WorkerData workerData) {
    ContextMaker context_maker = workerData.getContextMaker();
    Set<Integer> task_ids = workerData.getTaskids();
    StormTopology topology = workerData.getSysTopology();

    Set<Integer> rtn = new HashSet<Integer>();

    for (Integer taskid : task_ids) {
        TopologyContext context = context_maker.makeTopologyContext(topology, taskid, null);

        // <StreamId, <ComponentId, Grouping>>
        Map<String, Map<String, Grouping>> targets = context.getThisTargets();
        for (Map<String, Grouping> e : targets.values()) {
            for (String componentId : e.keySet()) {
                List<Integer> tasks = context.getComponentTasks(componentId);
                rtn.addAll(tasks);
            }
        }
    }

    return rtn;
}

From source file:edu.stanford.muse.xword.Crossword.java

public static Set<String> getTabooTokensFromOwnNames(Set<String> ownNames) {
    Set<String> result = new LinkedHashSet<String>();
    for (String n : ownNames)
        result.addAll(cleanAndTokenize(n));
    return result;
}

From source file:net.sourceforge.fenixedu.domain.Professorship.java

public static List<Professorship> readByDegreeCurricularPlansAndExecutionYear(
        List<DegreeCurricularPlan> degreeCurricularPlans, ExecutionYear executionYear) {

    Set<Professorship> professorships = new HashSet<Professorship>();
    for (DegreeCurricularPlan degreeCurricularPlan : degreeCurricularPlans) {
        for (CurricularCourse curricularCourse : degreeCurricularPlan.getCurricularCoursesSet()) {
            if (executionYear != null) {
                for (ExecutionCourse executionCourse : curricularCourse
                        .getExecutionCoursesByExecutionYear(executionYear)) {
                    professorships.addAll(executionCourse.getProfessorshipsSet());
                }//  www.ja  va 2  s.co  m
            } else {
                for (ExecutionCourse executionCourse : curricularCourse.getAssociatedExecutionCoursesSet()) {
                    professorships.addAll(executionCourse.getProfessorshipsSet());
                }
            }
        }
    }
    return new ArrayList<Professorship>(professorships);
}

From source file:com.frostwire.gui.bittorrent.TorrentUtil.java

public static Set<File> getSkipedFiles() {
    Set<File> set = new HashSet<File>();
    if (AzureusStarter.isAzureusCoreStarted()) {
        List<?> dms = AzureusStarter.getAzureusCore().getGlobalManager().getDownloadManagers();
        for (Object obj : dms) {
            DownloadManager dm = (DownloadManager) obj;
            set.addAll(getSkippedFiles(dm));
        }//from  w  w w  . j  av a2  s  . co  m
    }
    return set;
}