Example usage for java.util Set removeAll

List of usage examples for java.util Set removeAll

Introduction

In this page you can find the example usage for java.util Set removeAll.

Prototype

boolean removeAll(Collection<?> c);

Source Link

Document

Removes from this set all of its elements that are contained in the specified collection (optional operation).

Usage

From source file:podd.search.impl.SearchCriteriaImpl.java

/**
 * Process the project id inclusive and exclusive so that there are no overlapping values
 * @return//www.j av a  2  s  .  co m
 */
private Set<String> getFinalProjectIdsInclusive() {
    // Clean up by removing any project ids identified in exclusive from the list of inclusive
    Set<String> cleanInclusive = new HashSet<String>();
    if (hasProjectIdsInclusive())
        cleanInclusive.addAll(this.getProjectIdsInclusive());
    if (hasProjectIdsExclusive())
        cleanInclusive.removeAll(this.getProjectIdsExclusive());

    // clean up any null items in the collection
    for (Iterator<String> it = cleanInclusive.iterator(); it.hasNext();) {
        String projectId = it.next();
        if (projectId == null)
            it.remove();
    }

    return cleanInclusive;
}

From source file:com.wlami.mibox.client.metadata.MetadataWorker.java

public void synchronizeLocalMetadataWithRemoteMetadata(File f, DecryptedMiTree local, DecryptedMiTree remote) {
    log.info("starting incoming synchronization of folder [{}]", f.getAbsolutePath());
    if (local == null) {
        log.debug("local metadata not available for folder [{}]", f.getAbsolutePath());
        // In this case the incoming folder is new and we need to create a
        // local folder
        local = new DecryptedMiTree();
        local.setFolderName(f.getName());
    }/*from w ww  .  j  a  va  2  s.  c o m*/
    // Remember which files got processed in the first loop
    Set<String> processedFiles = new HashSet<>();
    // Get all files from the local metadata and compare them to the remote
    // metadata.
    for (String localMFileName : local.getFiles().keySet()) {
        log.debug("Comparing MFiles for [{}]", localMFileName);
        MFile localMFile = local.getFiles().get(localMFileName);
        MFile remoteMFile = remote.getFiles().get(localMFileName);
        // if the remote file is newer than the local file we want to update
        // it
        if (remoteMFile == null || remoteMFile.getLastModified().after(localMFile.getLastModified())) {
            log.info("remote file is newer than local file. will request download for [{}]", localMFileName);
            File file = new File(f, localMFileName);
            updateFileFromMetadata(file, localMFile, remoteMFile);
        }
        // we remember which files has been processed by us
        processedFiles.add(localMFileName);
    }
    // Now we want to iterate over all remote files which have not been
    // processed yet. This for we remove already processed files from the
    // remote files.
    Set<String> newRemoteFileNames = new HashSet<>(remote.getFiles().keySet());
    newRemoteFileNames.removeAll(processedFiles);
    for (String remoteMFileName : newRemoteFileNames) {
        log.info("incoming new file [{}]", remoteMFileName);
        MFile localMFile = null;
        MFile remoteFile = remote.getFiles().get(remoteMFileName);
        File file = new File(f, remoteMFileName);
        updateFileFromMetadata(file, localMFile, remoteFile);
    }
    // TODO don't forget to update the local metadata if a file got
    // updated!!!

    // And now lets compare the subfolders
    Map<String, EncryptedMiTreeInformation> localSubFolders = local.getSubfolder();
    Map<String, EncryptedMiTreeInformation> remoteSubFolders = remote.getSubfolder();

    Set<String> processedFolders = new HashSet<>();
    for (String localFolderName : localSubFolders.keySet()) {
        EncryptedMiTreeInformation localMiTreeInfo = localSubFolders.get(localFolderName);
        DecryptedMiTree localMiTree = encryptedMiTreeRepo.loadEncryptedMiTree(localMiTreeInfo.getFileName())
                .decrypt(localMiTreeInfo.getKey(), localMiTreeInfo.getIv());
        // TODO what happens if the folder has been deleted on another
        // client
        EncryptedMiTree remoteMiTreeEncrypted = encryptedMiTreeRepo
                .loadRemoteEncryptedMiTree(localMiTreeInfo.getFileName());
        DecryptedMiTree remoteMiTree = remoteMiTreeEncrypted.decrypt(localMiTreeInfo.getKey(),
                localMiTreeInfo.getIv());
        synchronizeLocalMetadataWithRemoteMetadata(new File(f, localFolderName), localMiTree, remoteMiTree);
        processedFolders.add(localFolderName);
    }

    Set<String> newRemoteFolders = new HashSet<>(remoteSubFolders.keySet());
    newRemoteFolders.removeAll(processedFolders);
    for (String remoteFolderName : newRemoteFolders) {
        EncryptedMiTreeInformation localMiTreeInfo = remoteSubFolders.get(remoteFolderName);
        DecryptedMiTree localMiTree = null;
        EncryptedMiTree remoteMiTreeEncrypted = encryptedMiTreeRepo
                .loadRemoteEncryptedMiTree(localMiTreeInfo.getFileName());
        DecryptedMiTree remoteMiTree = remoteMiTreeEncrypted.decrypt(localMiTreeInfo.getKey(),
                localMiTreeInfo.getIv());
        synchronizeLocalMetadataWithRemoteMetadata(new File(f, remoteFolderName), localMiTree, remoteMiTree);
    }
}

From source file:com.bluexml.side.Integration.alfresco.sql.synchronization.nodeService.NodeServiceImpl.java

private void deleteAllRelatedAssociations(NodeRef nodeRef) {
    Set<AssociationRef> assocs = new HashSet<AssociationRef>();
    assocs.addAll(nodeService.getSourceAssocs(nodeRef, RegexQNamePattern.MATCH_ALL));
    assocs.addAll(nodeService.getTargetAssocs(nodeRef, RegexQNamePattern.MATCH_ALL));
    assocs.removeAll(getProcessedAssociations());

    for (AssociationRef assoc : assocs) {
        if (filterer.acceptAssociationQName(assoc.getTypeQName())) {
            deleteAssociation(assoc.getSourceRef(), assoc.getTargetRef(), assoc.getTypeQName());
        }//from  ww w  .ja  v a2  s.co m
    }

    Set<ChildAssociationRef> childAssocs = new HashSet<ChildAssociationRef>();
    childAssocs.addAll(nodeService.getChildAssocs(nodeRef));
    childAssocs.addAll(nodeService.getParentAssocs(nodeRef));
    childAssocs.removeAll(getProcessedChildAssociations());

    for (ChildAssociationRef assoc : childAssocs) {
        if (filterer.acceptAssociationQName(assoc.getTypeQName())) {
            // TODO : check whether the node has several parents => certainly a problem (warn ?) in terms of the composition semantics
            deleteAssociation(assoc.getParentRef(), assoc.getChildRef(), assoc.getTypeQName());
        }
    }

    setProcessed(assocs, childAssocs, OperationType.DELETE);
}

From source file:com.redsqirl.workflow.server.ActionManager.java

public Collection<String> getPackageToNotify() throws RemoteException {
    Set<String> ans = new PackageManager().getAvailablePackageNames(System.getProperty("user.name"));
    ans.removeAll(getFooterPackages());
    return ans;/* w  w  w.  j  ava  2  s.  c  om*/
}

From source file:org.wso2.carbon.identity.application.authentication.framework.handler.request.impl.consent.ConsentMgtPostAuthnHandler.java

private Set<String> getClaimsWithoutConsent(List<String> claimWithConsent, AuthenticationContext context)
        throws PostAuthenticationFailedException {

    List<String> requestedClaims = new ArrayList<>(getSPRequestedLocalClaims(context));
    List<String> mandatoryClaims = new ArrayList<>(getSPMandatoryLocalClaims(context));
    Set<String> consentClaims = getUniqueLocalClaims(requestedClaims, mandatoryClaims);

    consentClaims.removeAll(claimWithConsent);
    consentClaims.removeAll(mandatoryClaims);
    return consentClaims;
}

From source file:net.sourceforge.fenixedu.domain.SchoolClass.java

public Set<Shift> findAvailableShifts() {
    final ExecutionDegree executionDegree = getExecutionDegree();
    final DegreeCurricularPlan degreeCurricularPlan = executionDegree.getDegreeCurricularPlan();

    final Set<Shift> shifts = new HashSet<Shift>();
    for (final CurricularCourse curricularCourse : degreeCurricularPlan.getCurricularCoursesSet()) {
        if (curricularCourse.hasScopeForCurricularYear(getAnoCurricular(), getExecutionPeriod())) {
            for (final ExecutionCourse executionCourse : curricularCourse.getAssociatedExecutionCoursesSet()) {
                if (executionCourse.getExecutionPeriod() == getExecutionPeriod()) {
                    shifts.addAll(executionCourse.getAssociatedShifts());
                }//from   w w  w  .  jav a  2  s.  com
            }
        }
    }
    shifts.removeAll(getAssociatedShiftsSet());
    return shifts;
}

From source file:com.kibana.multitenancy.plugin.kibana.KibanaSeed.java

public static void setDashboards(String user, Set<String> projects, Set<String> roles, Client esClient,
        String kibanaIndex, String kibanaVersion) {

    //GET .../.kibana/index-pattern/_search?pretty=true&fields=
    //  compare results to projects; handle any deltas (create, delete?)
    //check projects for default and remove
    for (String project : BLACKLIST_PROJECTS)
        if (projects.contains(project)) {
            logger.debug("Black-listed project '{}' found.  Not adding as an index pattern", project);
            projects.remove(project);//  www  .j ava2  s  .  c  om
        }

    Set<String> indexPatterns = getIndexPatterns(user, esClient, kibanaIndex);
    logger.debug("Found '{}' Index patterns for user", indexPatterns.size());

    // Check roles here, if user is a cluster-admin we should add .operations to their project? -- correct way to do this?
    logger.debug("Checking for '{}' in users roles '{}'", OPERATIONS_ROLES, roles);
    /*for ( String role : OPERATIONS_ROLES )
       if ( roles.contains(role) ) {
    logger.debug("{} is an admin user", user);
    projects.add(OPERATIONS_PROJECT);
    break;
       }*/

    List<String> sortedProjects = new ArrayList<String>(projects);
    Collections.sort(sortedProjects);

    if (sortedProjects.isEmpty())
        sortedProjects.add(BLANK_PROJECT);

    logger.debug("Setting dashboards given user '{}' and projects '{}'", user, projects);

    // If none have been set yet
    if (indexPatterns.isEmpty()) {
        create(user, sortedProjects, true, esClient, kibanaIndex, kibanaVersion);
        //TODO : Currently it is generating wrong search properties when integrated with ES 2.1
        //createSearchProperties(user, esClient, kibanaIndex);
    } else {
        List<String> common = new ArrayList<String>(indexPatterns);

        // Get a list of all projects that are common
        common.retainAll(sortedProjects);

        sortedProjects.removeAll(common);
        indexPatterns.removeAll(common);

        // for any to create (remaining in projects) call createIndices, createSearchmapping?, create dashboard
        create(user, sortedProjects, false, esClient, kibanaIndex, kibanaVersion);

        // cull any that are in ES but not in OS (remaining in indexPatterns)
        remove(user, indexPatterns, esClient, kibanaIndex);

        common.addAll(sortedProjects);
        Collections.sort(common);
        // Set default index to first index in common if we removed the default
        String defaultIndex = getDefaultIndex(user, esClient, kibanaIndex, kibanaVersion);

        logger.debug("Checking if '{}' contains '{}'", indexPatterns, defaultIndex);

        if (indexPatterns.contains(defaultIndex) || StringUtils.isEmpty(defaultIndex)) {
            logger.debug("'{}' does contain '{}' and common size is {}", indexPatterns, defaultIndex,
                    common.size());
            if (common.size() > 0)
                setDefaultIndex(user, common.get(0), esClient, kibanaIndex, kibanaVersion);
        }

    }
}

From source file:com.linkedin.pinot.controller.helix.core.rebalance.ReplicaGroupRebalanceSegmentStrategy.java

/**
 * Modifies in-memory idealstate to rebalance segments for table with replica-group based segment assignment
 * @param idealState old idealstate/*w w  w.j  av a 2 s. c om*/
 * @param tableConfig a table config
 * @param replicaGroupPartitionAssignment a replica group partition assignment
 * @return a rebalanced idealstate
 */
private IdealState rebalanceSegments(IdealState idealState, TableConfig tableConfig,
        ReplicaGroupPartitionAssignment replicaGroupPartitionAssignment) {

    Map<String, Map<String, String>> segmentToServerMapping = idealState.getRecord().getMapFields();
    Map<String, LinkedList<String>> serverToSegments = buildServerToSegmentMapping(segmentToServerMapping);

    List<String> oldServerInstances = new ArrayList<>(serverToSegments.keySet());
    List<String> serverInstances = replicaGroupPartitionAssignment.getAllInstances();

    // Compute added and removed servers
    List<String> addedServers = new ArrayList<>(serverInstances);
    addedServers.removeAll(oldServerInstances);
    List<String> removedServers = new ArrayList<>(oldServerInstances);
    removedServers.removeAll(serverInstances);

    // Add servers to the mapping
    for (String server : addedServers) {
        serverToSegments.put(server, new LinkedList<String>());
    }

    // Remove servers from the mapping
    for (String server : removedServers) {
        serverToSegments.remove(server);
    }

    // Check if rebalance can cause the data inconsistency
    Set<String> segmentsToCover = segmentToServerMapping.keySet();
    Set<String> coveredSegments = new HashSet<>();
    for (Map.Entry<String, LinkedList<String>> entry : serverToSegments.entrySet()) {
        coveredSegments.addAll(entry.getValue());
    }

    coveredSegments.removeAll(segmentsToCover);
    if (!coveredSegments.isEmpty()) {
        LOGGER.warn("Some segments may temporarily be unavailable during the rebalance. "
                + "This may cause incorrect answer for the query.");
    }

    // Fetch replica group configs
    ReplicaGroupStrategyConfig replicaGroupConfig = tableConfig.getValidationConfig()
            .getReplicaGroupStrategyConfig();
    boolean mirrorAssignment = replicaGroupConfig.getMirrorAssignmentAcrossReplicaGroups();
    int numPartitions = replicaGroupPartitionAssignment.getNumPartitions();
    int numReplicaGroups = replicaGroupPartitionAssignment.getNumReplicaGroups();

    // For now, we don't support for rebalancing partition level replica group so "numPartitions" will be 1.
    for (int partitionId = 0; partitionId < numPartitions; partitionId++) {
        List<String> referenceReplicaGroup = new ArrayList<>();
        for (int replicaId = 0; replicaId < numReplicaGroups; replicaId++) {
            List<String> serversInReplicaGroup = replicaGroupPartitionAssignment
                    .getInstancesfromReplicaGroup(partitionId, replicaId);
            if (replicaId == 0) {
                // We need to keep the first replica group in case of mirroring.
                referenceReplicaGroup.addAll(serversInReplicaGroup);
            } else if (mirrorAssignment) {
                // Copy the segment assignment from the reference replica group
                for (int i = 0; i < serversInReplicaGroup.size(); i++) {
                    serverToSegments.put(serversInReplicaGroup.get(i),
                            serverToSegments.get(referenceReplicaGroup.get(i)));
                }
                continue;
            }

            // Uniformly distribute the segments among servers in a replica group
            rebalanceReplicaGroup(serversInReplicaGroup, serverToSegments, segmentsToCover);
        }
    }

    // Update Idealstate with rebalanced segment assignment
    Map<String, Map<String, String>> serverToSegmentsMapping = buildSegmentToServerMapping(serverToSegments);
    for (Map.Entry<String, Map<String, String>> entry : serverToSegmentsMapping.entrySet()) {
        idealState.setInstanceStateMap(entry.getKey(), entry.getValue());
    }
    idealState.setReplicas(Integer.toString(numReplicaGroups));

    return idealState;
}

From source file:com.concursive.connect.web.modules.lists.portlets.AddProjectToListPortlet.java

/**
 * Deletes any existing task records that are not specified in the listIds. This
 * method allows a user to specify on the request which lists they want to bookmark
 * too, if any existing links were not specified they will be deleted.
 *
 * @param db            - connection to database
 * @param existingTasks - list of tasks already persisted
 * @param listIds       - list of tasks that were specified by user
 * @throws SQLException - generated trying to delete records
 *///  w ww. j  ava  2 s.  c  om
private void deleteFromLists(Connection db, TaskList existingTasks, Collection<Integer> listIds)
        throws SQLException {
    Set<Integer> deleteTaskIds = new HashSet<Integer>(existingTasks.size());
    for (Task task : existingTasks) {
        deleteTaskIds.add(task.getCategoryId());
    }
    // find all the task ids that were not requested (these will be deleted)
    deleteTaskIds.removeAll(listIds);
    if (deleteTaskIds.size() > 0) {
        for (Task task : existingTasks) {
            if (deleteTaskIds.contains(task.getCategoryId())) {
                task.delete(db);
            }
        }
    }
}

From source file:com.comcast.video.dawg.controller.house.HouseRestController.java

/**
 * Update a set of devices by DAWG deviceId with the corresponding data.
 * The server will attempt to retrieve existing devices by deviceId and apply the provided
 * key value pairs into those devices.  If no device exists for the device, nothing will happen.
 *
 * @param id An array of DAWG deviceIds to update
 * @param data A Map of key value pairs.
 * @return//from w  w  w.  j  a va2s.  co  m
 */
@SuppressWarnings({ "unchecked", "rawtypes" })
@RequestMapping(value = "update/tags/{op}", method = { RequestMethod.POST })
@ResponseBody
public void updateTags(@RequestBody Map<String, Object> payload, @PathVariable("op") String op) {

    List<String> id = (List<String>) payload.get("id");
    List<String> tag = (List<String>) payload.get("tag");
    boolean add = op.equals("add");

    if (null == id || id.size() < 1) {
        throw new DawgIllegalArgumentException("Could not update tags.  No ids were provided.");
    }

    if (null == tag || tag.size() < 1) {
        throw new DawgIllegalArgumentException("Could not update tags.  No tags were provided.");
    }

    tag = validateTags(tag);

    List<String> validIds = new ArrayList<String>();

    // Cleanup the ids
    for (String i : id) {
        try {
            validIds.add(clean(i));
        } catch (Exception e) {
            logger.warn("Updating tags, id unparsable. Id=" + i);
        }
    }

    if (validIds.size() < 1) {
        throw new DawgIllegalArgumentException("Could not update tags.  None of the provided ids were valid.");
    }

    // find all the data matching the device ids
    Map<String, Object>[] matches = service.getStbsById(validIds.toArray(new String[validIds.size()]));

    if (null == matches || matches.length < 1) {
        throw new DawgIllegalArgumentException("Could not update tags.  None of the provided existed.");
    }

    // update the tags
    List<PersistableDevice> devicesToUpdate = new ArrayList<PersistableDevice>();
    for (Map<String, Object> match : matches) {
        if (null != match) {
            try {
                Set<String> updated = new HashSet<String>();
                if (match.containsKey("tags")) {
                    updated.addAll((Collection) match.get("tags"));
                }
                if (add) {
                    updated.addAll(tag);
                } else {
                    updated.removeAll(tag);
                }
                match.put("tags", updated);
                devicesToUpdate.add(new PersistableDevice(match));

            } catch (Exception e) {
                logger.warn("Failed to update device.  deviceId=" + match.get("id") + ", data=" + match, e);
            }
        }
    }
    service.upsertStb(devicesToUpdate.toArray(new PersistableDevice[devicesToUpdate.size()]));
}