Example usage for java.util Set containsAll

List of usage examples for java.util Set containsAll

Introduction

In this page you can find the example usage for java.util Set containsAll.

Prototype

boolean containsAll(Collection<?> c);

Source Link

Document

Returns true if this set contains all of the elements of the specified collection.

Usage

From source file:org.mitre.oauth2.token.ChainedTokenGranter.java

@Override
protected OAuth2Authentication getOAuth2Authentication(ClientDetails client, TokenRequest tokenRequest)
        throws AuthenticationException, InvalidTokenException {
    // read and load up the existing token
    String incomingTokenValue = tokenRequest.getRequestParameters().get("token");
    OAuth2AccessTokenEntity incomingToken = tokenServices.readAccessToken(incomingTokenValue);

    // check for scoping in the request, can't up-scope with a chained request
    Set<String> approvedScopes = incomingToken.getScope();
    Set<String> requestedScopes = tokenRequest.getScope();

    if (requestedScopes == null) {
        requestedScopes = new HashSet<>();
    }/*from  w  w w  .  j a v a 2  s.c  o  m*/

    // do a check on the requested scopes -- if they exactly match the client scopes, they were probably shadowed by the token granter
    if (client.getScope().equals(requestedScopes)) {
        requestedScopes = new HashSet<>();
    }

    // if our scopes are a valid subset of what's allowed, we can continue
    if (approvedScopes.containsAll(requestedScopes)) {

        if (requestedScopes.isEmpty()) {
            // if there are no scopes, inherit the original scopes from the token
            tokenRequest.setScope(approvedScopes);
        } else {
            // if scopes were asked for, give only the subset of scopes requested
            // this allows safe downscoping
            tokenRequest.setScope(Sets.intersection(requestedScopes, approvedScopes));
        }

        // NOTE: don't revoke the existing access token

        // create a new access token
        OAuth2Authentication authentication = new OAuth2Authentication(
                getRequestFactory().createOAuth2Request(client, tokenRequest),
                incomingToken.getAuthenticationHolder().getAuthentication().getUserAuthentication());

        return authentication;

    } else {
        throw new InvalidScopeException("Invalid scope requested in chained request", approvedScopes);
    }

}

From source file:org.ut.biolab.medsavant.server.serverapi.UserManager.java

@Override
public void dropRolesForUser(String sessID, String user, Set<UserRole> roles)
        throws RemoteException, SessionExpiredException, SQLException, SecurityException {
    checkAdmin(sessID);//from  w ww  .j  a v  a 2 s  .c  o m
    //Check if any of the roles given are already assigned, and if so remove them from the
    //roles to register.
    Set<UserRole> assignedRoles = getRolesForUser(sessID, user);
    if (assignedRoles.containsAll(roles)) {
        return;
    } else if (assignedRoles.size() > 0) {
        roles.removeAll(assignedRoles);
    }

    //register the remaining roles.        
    TableSchema raTable = MedSavantDatabase.UserRoleAssignmentTableSchema;
    for (UserRole role : roles) {
        DeleteQuery dq = new DeleteQuery(raTable.getTableName());
        dq.addCondition(BinaryCondition.equalTo(
                raTable.getDBColumn(MedSavantDatabase.UserRoleAssignmentTableSchema.COLUMNNAME_OF_USERNAME),
                user));
        dq.addCondition(BinaryCondition.equalTo(
                raTable.getDBColumn(MedSavantDatabase.UserRoleAssignmentTableSchema.COLUMNNAME_OF_ROLE_ID),
                role.getRoleId()));
        ConnectionController.executeUpdate(sessID, dq.toString());
    }
}

From source file:org.sakaiproject.adminsiteperms.service.SitePermsService.java

private void initiateSitePermsThread(final User currentUser, final Session currentSession, final String[] perms,
        final String[] types, final String[] roles, final boolean add) throws InterruptedException {
    String permsString = makeStringFromArray(perms);
    String typesString = makeStringFromArray(types);
    String rolesString = makeStringFromArray(roles);
    // exit if we are locked for updates
    if (isLockedForUpdates()) {
        throw new IllegalStateException("Cannot start new perms update, one is already in progress");
    }/*from w w  w  .j  a v  a2  s .  c o  m*/
    updateStarted = System.currentTimeMillis();
    // update the session with a status message
    String msg = getMessage("siterole.message.processing." + (add ? "add" : "remove"),
            new Object[] { permsString, typesString, rolesString, 0 });
    log.info("STARTED: " + msg + " :: pauseTimeMS=" + pauseTimeMS + ", sitesUntilPause=" + sitesUntilPause
            + ", maxUpdateTimeMS=" + maxUpdateTimeMS);
    updateStatus = "RUNNING";
    updateMessage = msg;
    // set the current user in this thread so they can perform the operations
    Session threadSession = setCurrentUser(currentUser.getId());
    try {
        List<String> permsList = Arrays.asList(perms);
        // now add the perms to all matching roles in all matching sites
        // switched site listing to using ids only - KNL-1125
        List<String> siteIds = siteService.getSiteIds(SelectionType.ANY, types, null, null, SortType.NONE,
                null);
        int pauseTime = 0;
        int sitesCounter = 0;
        int updatesCount = 0;
        int successCount = 0;
        for (String siteId : siteIds) {
            String siteRef = siteService.siteReference(siteId);
            try {
                AuthzGroup ag = authzGroupService.getAuthzGroup(siteRef);
                if (authzGroupService.allowUpdate(ag.getId())) {
                    boolean updated = false;
                    for (String role : roles) {
                        Role r = ag.getRole(role);
                        // if role not found in this group then move on
                        if (r != null) {
                            // get the current perms so we can possibly avoid an update
                            Set<String> current = r.getAllowedFunctions();
                            if (add) {
                                if (!current.containsAll(permsList)) {
                                    // only update if the perms are not already there
                                    r.allowFunctions(permsList);
                                    updated = true;
                                }
                            } else {
                                boolean found = false;
                                for (String perm : permsList) {
                                    if (current.contains(perm)) {
                                        found = true;
                                        break;
                                    }
                                }
                                if (found) {
                                    // only update if at least one perm needs to be removed
                                    r.disallowFunctions(permsList);
                                    updated = true;
                                }
                            }
                        }
                    }
                    if (updated) {
                        // only save if the group was updated
                        authzGroupService.save(ag);
                        updatesCount++;
                        log.info("Added Permissions (" + permsString + ") for roles (" + rolesString
                                + ") to group:" + siteRef);
                    }
                    successCount++;
                    if (updatesCount > 0 && updatesCount % sitesUntilPause == 0) {
                        // pause every 10 (default) sites updated or so for about 1 second (default)
                        Thread.sleep(pauseTimeMS);
                        pauseTime += pauseTimeMS;
                        // make sure the sessions do not timeout
                        threadSession.setActive();
                        currentSession.setActive();
                    }
                } else {
                    log.warn("Cannot update authz group: " + siteRef + ", unable to apply any perms change");
                }
            } catch (GroupNotDefinedException e) {
                log.error("Could not find authz group: " + siteRef + ", unable to apply any perms change");
            } catch (AuthzPermissionException e) {
                log.error("Could not save authz group: " + siteRef + ", unable to apply any perms change");
            }
            sitesCounter++;
            if (!isLockedForUpdates()) {
                // if we are no longer locked for updates then we have a timeout failure
                throw new RuntimeException("Timeout occurred while running site permissions update");
            } else if (sitesCounter % 4 == 0) {
                // update the processor status every few sites processed
                int percentComplete = (int) (sitesCounter * 100) / siteIds.size();
                msg = getMessage("siterole.message.processing." + (add ? "add" : "remove"),
                        new Object[] { permsString, typesString, rolesString, percentComplete });
                updateMessage = msg;
            }
        }
        int failureCount = siteIds.size() - successCount;
        long totalTime = System.currentTimeMillis() - updateStarted;
        int totalSecs = totalTime > 0 ? (int) (totalTime / 1000) : 0;
        int pauseSecs = pauseTime > 0 ? (int) (pauseTime / 1000) : 0;
        msg = getMessage("siterole.message.permissions." + (add ? "added" : "removed"),
                new Object[] { permsString, typesString, rolesString, siteIds.size(), updatesCount,
                        successCount, failureCount, totalSecs, pauseSecs });
        log.info(msg);
        updateMessage = msg;
    } finally {
        // reset the update status
        updateStatus = STATUS_COMPLETE;
        updateStarted = 0;
        // cleanup the session associated with this thread
        threadSession.clear();
        threadSession.invalidate();
    }
}

From source file:org.talend.designer.core.ui.editor.properties.controllers.HadoopJarSetupController.java

@Override
public Control createControl(Composite subComposite, final IElementParameter param, int numInRow, int nbInRow,
        int top, Control lastControl) {

    final Composite container = subComposite;
    Button subButton = getWidgetFactory().createButton(container, "", SWT.PUSH); //$NON-NLS-1$
    subButton.addSelectionListener(new SelectionAdapter() {

        @Override/*from   w w  w  . j a v a2s.co  m*/
        public void widgetSelected(SelectionEvent e) {
            initHadoopVersionType();
            boolean readonly = false;
            String readOnlyIfString = param.getReadOnlyIf();
            if (StringUtils.isNotEmpty(readOnlyIfString)) {
                if (param.isReadOnly(elem.getElementParameters())) {
                    readonly = true;
                }
            }
            // if readonly is true, then needn't to do this check, since it's aim is check readonly also
            if (readonly == false) {
                IElementParameter propertyParameter = elem
                        .getElementParameter(EParameterName.PROPERTY_TYPE.getName());
                if (propertyParameter != null) {
                    if (EmfComponent.REPOSITORY.equals(propertyParameter.getValue())) {
                        readonly = true;
                    }
                }
            }
            HadoopCustomVersionDefineDialog customVersionDialog = new HadoopCustomVersionDefineDialog(
                    PlatformUI.getWorkbench().getActiveWorkbenchWindow().getShell(), getCustomVersionMap()) {

                @Override
                protected ECustomVersionType[] getDisplayTypes() {
                    return new ECustomVersionType[] { versionType };
                }
            };

            IElementParameter sparkLocalParam = elem.getElementParameter(HadoopConstants.SPARK_LOCAL_MODE);
            IElementParameter sparkParam = elem.getElementParameter(HadoopConstants.SPARK_MODE);
            boolean isSparkLocalMode = false;

            if (sparkLocalParam != null) {
                isSparkLocalMode = (Boolean) sparkLocalParam.getValue();
            }

            if (sparkParam != null) {
                String sparkMode = null;
                if (isSparkLocalMode) {
                    sparkMode = "LOCAL"; //$NON-NLS-1$
                } else {
                    sparkMode = "" + sparkParam.getValue(); //$NON-NLS-1$
                }
                customVersionDialog.setSparkMode(sparkMode);
                customVersionDialog.setSparkStreamingMode(sparkMode);
            }
            customVersionDialog.setReadonly(readonly);
            Set<String> oldLibList = customVersionDialog.getLibList(versionType.getGroup());
            if (customVersionDialog.open() == Window.OK) {
                Set<String> newLibList = customVersionDialog.getLibList(versionType.getGroup());
                if (oldLibList != null && newLibList != null && oldLibList.size() == newLibList.size()
                        && oldLibList.containsAll(newLibList)) {
                    // means nothing changes, so nothing need to do
                } else {
                    // changed
                    String customJars = customVersionDialog.getLibListStr(versionType.getGroup());
                    executeCommand(new PropertyChangeCommand(elem, EParameterName.HADOOP_CUSTOM_JARS.getName(),
                            StringUtils.trimToEmpty(customJars)));
                }
            }
        }

    });
    subButton.setImage(ImageProvider.getImage(CoreUIPlugin.getImageDescriptor(DOTS_BUTTON)));
    FormData data = new FormData();
    data.left = new FormAttachment(lastControl, 0);
    data.right = new FormAttachment(lastControl, STANDARD_BUTTON_WIDTH, SWT.RIGHT);
    data.top = new FormAttachment(0, top);
    data.height = STANDARD_HEIGHT - 2;
    subButton.setLayoutData(data);

    return container;
}

From source file:org.ut.biolab.medsavant.server.serverapi.UserManager.java

@Override
public void registerRoleForUser(String sessID, String user, Set<UserRole> roles)
        throws RemoteException, SessionExpiredException, SQLException, SecurityException {
    checkAdmin(sessID);//from  w  w  w . j  av a  2 s  . c  o m

    //Check if any of the roles given are already assigned, and if so remove them from the
    //roles to register.
    Set<UserRole> assignedRoles = getRolesForUser(sessID, user);
    if (assignedRoles.containsAll(roles)) {
        return;
    } else if (assignedRoles.size() > 0) {
        roles.removeAll(assignedRoles);
    }

    //register the remaining roles.        
    TableSchema raTable = MedSavantDatabase.UserRoleAssignmentTableSchema;
    for (UserRole role : roles) {
        InsertQuery iq = new InsertQuery(raTable.getTableName());
        iq.addColumn(raTable.getDBColumn(MedSavantDatabase.UserRoleAssignmentTableSchema.COLUMNNAME_OF_ROLE_ID),
                role.getRoleId());
        iq.addColumn(
                raTable.getDBColumn(MedSavantDatabase.UserRoleAssignmentTableSchema.COLUMNNAME_OF_USERNAME),
                user);
        ConnectionController.executeUpdate(sessID, iq.toString());
    }

}

From source file:io.minio.policy.BucketPolicy.java

/**
 * Appends given statement into statement list to have unique statements.
 * - If statement already exists in statement list, it ignores.
 * - If statement exists with different conditions, they are merged.
 * - Else the statement is appended to statement list.
 *//*from ww  w. j av a2 s. c  o  m*/
private void appendStatement(Statement statement) {
    for (Statement s : statements) {
        Set<String> aws = s.principal().aws();
        ConditionMap conditions = s.conditions();

        if (s.actions().containsAll(statement.actions()) && s.effect().equals(statement.effect()) && aws != null
                && aws.containsAll(statement.principal().aws()) && conditions != null
                && conditions.equals(statement.conditions())) {
            s.resources().addAll(statement.resources());
            return;
        }

        if (s.resources().containsAll(statement.resources()) && s.effect().equals(statement.effect())
                && aws != null && aws.containsAll(statement.principal().aws()) && conditions != null
                && conditions.equals(statement.conditions())) {
            s.actions().addAll(statement.actions());
            return;
        }

        if (s.resources().containsAll(statement.resources()) && s.actions().containsAll(statement.actions())
                && s.effect().equals(statement.effect()) && aws != null
                && aws.containsAll(statement.principal().aws())) {
            if (conditions != null && conditions.equals(statement.conditions())) {
                return;
            }

            if (conditions != null && statement.conditions() != null) {
                conditions.putAll(statement.conditions());
                return;
            }
        }
    }

    if (!(statement.actions().isEmpty() && statement.resources().isEmpty())) {
        statements.add(statement);
    }
}

From source file:com.linkedin.pinot.controller.helix.sharding.SegmentAssignmentStrategyTest.java

@Test
public void testTableLevelAndMirroringReplicaGroupSegmentAssignmentStrategy() throws Exception {
    // Create the configuration for segment assignment strategy.
    int numInstancesPerPartition = 5;
    ReplicaGroupStrategyConfig replicaGroupStrategyConfig = new ReplicaGroupStrategyConfig();
    replicaGroupStrategyConfig.setNumInstancesPerPartition(numInstancesPerPartition);
    replicaGroupStrategyConfig.setMirrorAssignmentAcrossReplicaGroups(true);

    // Create table config
    TableConfig tableConfig = new TableConfig.Builder(CommonConstants.Helix.TableType.OFFLINE)
            .setTableName(TABLE_NAME_TABLE_LEVEL_REPLICA_GROUP).setNumReplicas(NUM_REPLICA)
            .setSegmentAssignmentStrategy("ReplicaGroupSegmentAssignmentStrategy").build();

    tableConfig.getValidationConfig().setReplicaGroupStrategyConfig(replicaGroupStrategyConfig);

    // Create the table and upload segments
    _pinotHelixResourceManager.addTable(tableConfig);

    // Wait for table addition
    while (!_pinotHelixResourceManager.hasOfflineTable(TABLE_NAME_TABLE_LEVEL_REPLICA_GROUP)) {
        Thread.sleep(100);/*from   www.  j a  v a  2  s.  c o  m*/
    }

    int numSegments = 20;
    Set<String> segments = new HashSet<>();
    for (int i = 0; i < numSegments; ++i) {
        String segmentName = "segment" + i;
        addOneSegmentWithPartitionInfo(TABLE_NAME_TABLE_LEVEL_REPLICA_GROUP, segmentName, null, 0);
        segments.add(segmentName);
    }

    // Wait for all segments appear in the external view
    while (!allSegmentsPushedToIdealState(TABLE_NAME_TABLE_LEVEL_REPLICA_GROUP, numSegments)) {
        Thread.sleep(100);
    }

    // Create a table of a list of segments that are assigned to a server.
    Map<String, Set<String>> serverToSegments = getServersToSegmentsMapping(
            TABLE_NAME_TABLE_LEVEL_REPLICA_GROUP);

    // Fetch the replica group mapping table
    String offlineTableName = TableNameBuilder.OFFLINE.tableNameWithType(TABLE_NAME_TABLE_LEVEL_REPLICA_GROUP);
    ReplicaGroupPartitionAssignment partitionAssignment = _partitionAssignmentGenerator
            .getReplicaGroupPartitionAssignment(offlineTableName);

    // Check that each replica group for contains all segments of the table.
    for (int group = 0; group < NUM_REPLICA; group++) {
        List<String> serversInReplicaGroup = partitionAssignment.getInstancesfromReplicaGroup(0, group);
        Set<String> segmentsInReplicaGroup = new HashSet<>();
        for (String server : serversInReplicaGroup) {
            segmentsInReplicaGroup.addAll(serverToSegments.get(server));
        }
        Assert.assertTrue(segmentsInReplicaGroup.containsAll(segments));
    }

    // Create the expected mirroring servers.
    for (int instanceIndex = 0; instanceIndex < numInstancesPerPartition; instanceIndex++) {
        Set<Set<String>> mirroringServerSegments = new HashSet<>();
        for (int group = 0; group < NUM_REPLICA; group++) {
            List<String> serversInReplicaGroup = partitionAssignment.getInstancesfromReplicaGroup(0, group);
            String server = serversInReplicaGroup.get(instanceIndex);
            mirroringServerSegments.add(serverToSegments.get(server));
        }
        Assert.assertEquals(mirroringServerSegments.size(), 1);
    }
}

From source file:org.drugis.mtc.parameterization.InconsistencyBaselineSearchProblem.java

public boolean isGoal(Map<Study, Treatment> state) {
    if (!state.keySet().containsAll(d_studies)) {
        return false;
    }/*from   www .  ja  v a 2s. c om*/

    // Calculate covered edges
    Set<FoldedEdge<Treatment, Study>> covered = new HashSet<FoldedEdge<Treatment, Study>>();
    for (Entry<Study, Treatment> entry : state.entrySet()) {
        Treatment t0 = entry.getValue();
        for (Treatment t1 : entry.getKey().getTreatments()) {
            FoldedEdge<Treatment, Study> edge = d_cGraph.findEdge(t0, t1);
            if (edge != null) {
                covered.add(edge);
            }
        }
    }

    if (d_cycleClasses == null) { // Looking for full baseline cover
        return covered.containsAll(d_cGraph.getEdges());
    }

    // Now check that for each cycle class, all cycles have at least (n-1) edges covered,
    // and if the cycle is potentially inconsistent, that at least one as n edges covered.
    for (Entry<Partition, Set<List<Treatment>>> c : d_cycleClasses.entrySet()) {
        Set<List<Treatment>> cycles = c.getValue();
        if (existsTooManyMissing(cycles, covered)) {
            return false;
        } else if (InconsistencyParameterization.isInconsistencyCycle(c.getKey())
                && !existsNoneMissing(cycles, covered)) {
            return false;
        }
    }
    return true;
}

From source file:com.linkedin.pinot.common.partition.StreamPartitionAssignmentGeneratorTest.java

private void verifyPartitionAssignmentFromIdealState(TableConfig tableConfig, IdealState idealState,
        int numPartitions) {
    TestStreamPartitionAssignmentGenerator partitionAssignmentGenerator = new TestStreamPartitionAssignmentGenerator(
            _mockHelixManager);/* w  w  w  . j  a va 2s.  c om*/
    PartitionAssignment partitionAssignmentFromIdealState = partitionAssignmentGenerator
            .getStreamPartitionAssignmentFromIdealState(tableConfig, idealState);
    Assert.assertEquals(tableConfig.getTableName(), partitionAssignmentFromIdealState.getTableName());
    Assert.assertEquals(partitionAssignmentFromIdealState.getNumPartitions(), numPartitions);
    // check that latest segments are honoring partition assignment
    Map<String, LLCSegmentName> partitionIdToLatestLLCSegment = partitionAssignmentGenerator
            .getPartitionToLatestSegments(idealState);
    for (Map.Entry<String, LLCSegmentName> entry : partitionIdToLatestLLCSegment.entrySet()) {
        Set<String> idealStateInstances = idealState.getInstanceStateMap(entry.getValue().getSegmentName())
                .keySet();
        List<String> partitionAssignmentInstances = partitionAssignmentFromIdealState
                .getInstancesListForPartition(entry.getKey());
        Assert.assertEquals(idealStateInstances.size(), partitionAssignmentInstances.size());
        Assert.assertTrue(idealStateInstances.containsAll(partitionAssignmentInstances));
    }
}

From source file:org.intermine.bio.dataconversion.IdResolver.java

/**
 * Return true if the idResolver contains information about a collection of taxon id.
 * @param taxonIds a collection of organism to check for
 * @return true if data about this taxon id
 *///  www . j a v  a  2  s .c  o  m
public boolean hasTaxons(Set<String> taxonIds) {
    Set<String> taxonIdSet = new HashSet<String>();
    for (MultiKey key : orgIdMaps.keySet()) {
        taxonIdSet.add((String) key.getKey(0));
    }
    return taxonIdSet.containsAll(taxonIds);
}