Example usage for java.util Set removeAll

List of usage examples for java.util Set removeAll

Introduction

In this page you can find the example usage for java.util Set removeAll.

Prototype

boolean removeAll(Collection<?> c);

Source Link

Document

Removes from this set all of its elements that are contained in the specified collection (optional operation).

Usage

From source file:com.gcrm.action.crm.ListAccountAction.java

/**
 * Unselects the entities/*from w  ww  .  j  a v  a 2 s .  c  o m*/
 * 
 * @return the SUCCESS result
 */
public String unselect() throws ServiceException {
    TargetList targetList = null;
    Document document = null;
    Set<Account> accounts = null;

    if ("TargetList".equals(this.getRelationKey())) {
        targetList = targetListService.getEntityById(TargetList.class,
                Integer.valueOf(this.getRelationValue()));
        accounts = targetList.getAccounts();
    } else if ("Document".equals(this.getRelationKey())) {
        document = documentService.getEntityById(Document.class, Integer.valueOf(this.getRelationValue()));
        accounts = document.getAccounts();
    }

    if (this.getSeleteIDs() != null) {
        String[] ids = seleteIDs.split(",");
        Collection<Account> selectedAccounts = new ArrayList<Account>();
        for (int i = 0; i < ids.length; i++) {
            Integer selectId = Integer.valueOf(ids[i]);
            A: for (Account account : accounts) {
                if (account.getId().intValue() == selectId.intValue()) {
                    selectedAccounts.add(account);
                    break A;
                }
            }
        }
        accounts.removeAll(selectedAccounts);
    }

    if ("TargetList".equals(this.getRelationKey())) {
        targetListService.makePersistent(targetList);
    } else if ("Document".equals(this.getRelationKey())) {
        documentService.makePersistent(document);
    }
    return SUCCESS;
}

From source file:com.ikanow.aleph2.analytics.hadoop.assets.UpdatedCombineFileInputFormat.java

@VisibleForTesting
void createSplits(Map<String, Set<OneBlockInfo>> nodeToBlocks, Map<OneBlockInfo, String[]> blockToNodes,
        Map<String, List<OneBlockInfo>> rackToBlocks, long totLength, long maxSize, long minSizeNode,
        long minSizeRack, List<InputSplit> splits) {
    ArrayList<OneBlockInfo> validBlocks = new ArrayList<OneBlockInfo>();
    long curSplitSize = 0;

    int totalNodes = nodeToBlocks.size();
    long totalLength = totLength;

    Multiset<String> splitsPerNode = HashMultiset.create();
    Set<String> completedNodes = new HashSet<String>();

    while (true) {
        // it is allowed for maxSize to be 0. Disable smoothing load for such cases

        // process all nodes and create splits that are local to a node. Generate
        // one split per node iteration, and walk over nodes multiple times to
        // distribute the splits across nodes. 
        for (Iterator<Map.Entry<String, Set<OneBlockInfo>>> iter = nodeToBlocks.entrySet().iterator(); iter
                .hasNext();) {//  ww  w  .  ja  v  a2  s.  c o  m
            Map.Entry<String, Set<OneBlockInfo>> one = iter.next();

            String node = one.getKey();

            // Skip the node if it has previously been marked as completed.
            if (completedNodes.contains(node)) {
                continue;
            }

            Set<OneBlockInfo> blocksInCurrentNode = one.getValue();

            // for each block, copy it into validBlocks. Delete it from
            // blockToNodes so that the same block does not appear in
            // two different splits.
            Iterator<OneBlockInfo> oneBlockIter = blocksInCurrentNode.iterator();
            while (oneBlockIter.hasNext()) {
                OneBlockInfo oneblock = oneBlockIter.next();

                // Remove all blocks which may already have been assigned to other
                // splits.
                if (!blockToNodes.containsKey(oneblock)) {
                    oneBlockIter.remove();
                    continue;
                }

                validBlocks.add(oneblock);
                blockToNodes.remove(oneblock);
                curSplitSize += oneblock.length;

                // if the accumulated split size exceeds the maximum, then
                // create this split.
                if (maxSize != 0 && curSplitSize >= maxSize) {
                    // create an input split and add it to the splits array
                    addCreatedSplit(splits, Collections.singleton(node), validBlocks);
                    totalLength -= curSplitSize;
                    curSplitSize = 0;

                    splitsPerNode.add(node);

                    // Remove entries from blocksInNode so that we don't walk these
                    // again.
                    blocksInCurrentNode.removeAll(validBlocks);
                    validBlocks.clear();

                    // Done creating a single split for this node. Move on to the next
                    // node so that splits are distributed across nodes.
                    break;
                }

            }
            if (validBlocks.size() != 0) {
                // This implies that the last few blocks (or all in case maxSize=0)
                // were not part of a split. The node is complete.

                // if there were any blocks left over and their combined size is
                // larger than minSplitNode, then combine them into one split.
                // Otherwise add them back to the unprocessed pool. It is likely
                // that they will be combined with other blocks from the
                // same rack later on.
                // This condition also kicks in when max split size is not set. All
                // blocks on a node will be grouped together into a single split.
                if (minSizeNode != 0 && curSplitSize >= minSizeNode && splitsPerNode.count(node) == 0) {
                    // haven't created any split on this machine. so its ok to add a
                    // smaller one for parallelism. Otherwise group it in the rack for
                    // balanced size create an input split and add it to the splits
                    // array
                    addCreatedSplit(splits, Collections.singleton(node), validBlocks);
                    totalLength -= curSplitSize;
                    splitsPerNode.add(node);
                    // Remove entries from blocksInNode so that we don't walk this again.
                    blocksInCurrentNode.removeAll(validBlocks);
                    // The node is done. This was the last set of blocks for this node.
                } else {
                    // Put the unplaced blocks back into the pool for later rack-allocation.
                    for (OneBlockInfo oneblock : validBlocks) {
                        blockToNodes.put(oneblock, oneblock.hosts);
                    }
                }
                validBlocks.clear();
                curSplitSize = 0;
                completedNodes.add(node);
            } else { // No in-flight blocks.
                if (blocksInCurrentNode.size() == 0) {
                    // Node is done. All blocks were fit into node-local splits.
                    completedNodes.add(node);
                } // else Run through the node again.
            }
        }

        // Check if node-local assignments are complete.
        if (completedNodes.size() == totalNodes || totalLength == 0) {
            // All nodes have been walked over and marked as completed or all blocks
            // have been assigned. The rest should be handled via rackLock assignment.
            LOG.info("DEBUG: Terminated node allocation with : CompletedNodes: " + completedNodes.size()
                    + ", size left: " + totalLength);
            break;
        }
    }

    // if blocks in a rack are below the specified minimum size, then keep them
    // in 'overflow'. After the processing of all racks is complete, these 
    // overflow blocks will be combined into splits.
    ArrayList<OneBlockInfo> overflowBlocks = new ArrayList<OneBlockInfo>();
    Set<String> racks = new HashSet<String>();

    // Process all racks over and over again until there is no more work to do.
    while (blockToNodes.size() > 0) {

        // Create one split for this rack before moving over to the next rack. 
        // Come back to this rack after creating a single split for each of the 
        // remaining racks.
        // Process one rack location at a time, Combine all possible blocks that
        // reside on this rack as one split. (constrained by minimum and maximum
        // split size).

        // iterate over all racks 
        for (Iterator<Map.Entry<String, List<OneBlockInfo>>> iter = rackToBlocks.entrySet().iterator(); iter
                .hasNext();) {

            Map.Entry<String, List<OneBlockInfo>> one = iter.next();
            racks.add(one.getKey());
            List<OneBlockInfo> blocks = one.getValue();

            // for each block, copy it into validBlocks. Delete it from 
            // blockToNodes so that the same block does not appear in 
            // two different splits.
            boolean createdSplit = false;
            for (OneBlockInfo oneblock : blocks) {
                if (blockToNodes.containsKey(oneblock)) {
                    validBlocks.add(oneblock);
                    blockToNodes.remove(oneblock);
                    curSplitSize += oneblock.length;

                    // if the accumulated split size exceeds the maximum, then 
                    // create this split.
                    if (maxSize != 0 && curSplitSize >= maxSize) {
                        // create an input split and add it to the splits array
                        addCreatedSplit(splits, getHosts(racks), validBlocks);
                        createdSplit = true;
                        break;
                    }
                }
            }

            // if we created a split, then just go to the next rack
            if (createdSplit) {
                curSplitSize = 0;
                validBlocks.clear();
                racks.clear();
                continue;
            }

            if (!validBlocks.isEmpty()) {
                if (minSizeRack != 0 && curSplitSize >= minSizeRack) {
                    // if there is a minimum size specified, then create a single split
                    // otherwise, store these blocks into overflow data structure
                    addCreatedSplit(splits, getHosts(racks), validBlocks);
                } else {
                    // There were a few blocks in this rack that 
                    // remained to be processed. Keep them in 'overflow' block list. 
                    // These will be combined later.
                    overflowBlocks.addAll(validBlocks);
                }
            }
            curSplitSize = 0;
            validBlocks.clear();
            racks.clear();
        }
    }

    assert blockToNodes.isEmpty();
    assert curSplitSize == 0;
    assert validBlocks.isEmpty();
    assert racks.isEmpty();

    // Process all overflow blocks
    for (OneBlockInfo oneblock : overflowBlocks) {
        validBlocks.add(oneblock);
        curSplitSize += oneblock.length;

        // This might cause an exiting rack location to be re-added,
        // but it should be ok.
        for (int i = 0; i < oneblock.racks.length; i++) {
            racks.add(oneblock.racks[i]);
        }

        // if the accumulated split size exceeds the maximum, then 
        // create this split.
        if (maxSize != 0 && curSplitSize >= maxSize) {
            // create an input split and add it to the splits array
            addCreatedSplit(splits, getHosts(racks), validBlocks);
            curSplitSize = 0;
            validBlocks.clear();
            racks.clear();
        }
    }

    // Process any remaining blocks, if any.
    if (!validBlocks.isEmpty()) {
        addCreatedSplit(splits, getHosts(racks), validBlocks);
    }
}

From source file:de.iteratec.iteraplan.elasticeam.emfimpl.EMFMetamodel.java

@SuppressWarnings("unchecked")
private <T extends TypeExpression> List<T> filterTypes(List<T> candidateTypes) {
    if (disableAccessControl) {
        return candidateTypes;
    }/* w w  w . j av  a2 s  . c o m*/
    if (ElasticeamContextUtil.getCurrentContext().isSupervisor()) {
        return candidateTypes;
    }
    Set<T> result = Sets.newLinkedHashSet();
    for (Role role : ElasticeamContextUtil.getCurrentContext().getRoles()) {
        if (this.readableTypes.containsKey(role)) {
            result.addAll(ListUtils.intersection(this.readableTypes.get(role), candidateTypes));
        }
    }
    for (T type : candidateTypes) {
        if (type instanceof DataTypeExpression) {
            result.add(type);
        }
    }
    Set<RelationshipTypeExpression> invisibleRTEs = Sets.newHashSet();
    for (T type : result) {
        if (type instanceof EMFRelationshipType) {
            EMFRelationshipType rte = (EMFRelationshipType) type;
            for (EReference eRef : rte.getWrapped().getEAllReferences()) {
                if (eRef.getLowerBound() > 0
                        && rte.findRelationshipEndByPersistentName(eRef.getName()) == null) {
                    // required relEnd is invisible for current user => rte is invisible, too
                    invisibleRTEs.add(rte);
                }
            }
        }
    }
    result.removeAll(invisibleRTEs);

    return Lists.newArrayList(result);
}

From source file:dk.statsbiblioteket.doms.domsutil.surveyable.SurveyableCombiner.java

/**
 * Read configuration for classes to survey, and initialise the classes.
 * This will reread the configuration on each call, and initialise any new
 * classes, or any classes the failed to initialise during the last call.
 * Classes that are no longer configured, will be removed from the list of
 * surveyed classes. Errors in initialisation will be logged as errors only
 * the first time the initialisation fails.
 *//*from  ww w  .  j  a  v a 2 s.  com*/
private void initializeSurveyables() {
    log.trace("Enter initializeSurveyables()");

    synchronized (surveyables) {
        Set<String> configuredClasses = new HashSet<String>();
        Set<String> surveyedClasses = new HashSet<String>();
        Set<String> dummies = new HashSet<String>();
        Set<String> newClassesToSurvey;
        Set<String> noLongerSurveyed;
        Iterator<Surveyable> i;

        Properties config = ConfigCollection.getProperties();
        String classes = config.getProperty(CONFIGURATION_SURVEYABLES_PARAMETER);
        List<String> configuredClassesParameter;
        log.trace("Read configuration: '" + classes + ".");

        // Get set of classes from configuration
        if (classes == null) {
            classes = "";
        }
        configuredClassesParameter = Arrays.asList(classes.split(";"));
        for (String configuredClass : configuredClassesParameter) {
            configuredClasses.add(configuredClass.trim());
        }
        configuredClasses.remove("");
        configuredClasses.remove(NoSurveyable.class.getName());

        // Get set of classes initialised.
        for (Surveyable s : surveyables) {
            surveyedClasses.add(s.getClass().getName());
        }

        // If configuration is empty, warn the first time and insert dummy
        if (configuredClasses.size() == 0) {
            if (!surveyedClasses.contains(NoSurveyable.class.getName()) || (surveyedClasses.size() != 1)) {
                log.warn("No classes specified for surveillance.");
                surveyables.clear();
                surveyables.add(new NoSurveyable());
            }
            return;
        }

        // Remove and remember dummies. Dummies are remembered in order to
        // log errors only the first time initialisations failed.
        i = surveyables.iterator();
        while (i.hasNext()) {
            Surveyable s = i.next();
            if (s.getClass().getName().equals(NoSurveyable.class.getName())) {
                i.remove();
                dummies.add(s.getStatus().getName());
            }
        }
        surveyedClasses.remove(NoSurveyable.class.getName());

        // Initialise newly configured classes, or previously failed. Insert
        // dummy on failure.
        newClassesToSurvey = new HashSet<String>(configuredClasses);
        newClassesToSurvey.removeAll(surveyedClasses);
        for (String classname : newClassesToSurvey) {
            log.info("Initializing class '" + classname + "' for surveillance");
            Surveyable surveyable;
            try {
                surveyable = SurveyableFactory.createSurveyable(classname);
            } catch (Exception e) {
                if (dummies.contains(classname)) {
                    log.debug("Still unable to initialise class for" + " surveillance: '" + classname + "'", e);
                } else {
                    log.error("Unable to initialise class for surveillance:" + " '" + classname + "'", e);
                }
                surveyable = new NoSurveyable(classname);
            }
            surveyables.add(surveyable);
        }

        // Remove classes to no longer survey
        noLongerSurveyed = new HashSet<String>(surveyedClasses);
        noLongerSurveyed.removeAll(configuredClasses);
        for (String classname : noLongerSurveyed) {
            log.debug("Removing class '" + classname + "' from surveillance");
            i = surveyables.iterator();
            while (i.hasNext()) {
                Surveyable s = i.next();
                if (s.getClass().getName().equals(classname)) {
                    i.remove();
                    log.info("Removed class '" + classname + "' from surveillance");
                }
            }
        }
    }
}

From source file:com.cloudera.director.azure.compute.provider.AzureComputeProviderHelper.java

/**
 * Poll pending tasks till all tasks are complete or timeout.
 * Azure platform operation can range from minutes to one hour.
 *
 * @param tasks            set of submitted tasks
 * @param durationInSecond overall timeout period
 * @param intervalInSecond poll interval
 * @param failedContexts   set of failed task contexts. This list contains all the contexts of
 *                         submitted tasks. Context of a successful task is removed from this
 *                         set. When this call returns the element in this set are the contexts
 *                         of failed tasks.
 * @return number of successful tasks/*www.jav  a2 s.  c om*/
 */
@SuppressWarnings("PMD.CollapsibleIfStatements")
public int pollPendingTasks(Set<Future<TaskResult>> tasks, int durationInSecond, int intervalInSecond,
        Set<ResourceContext> failedContexts) {
    Set<Future<TaskResult>> responses = new HashSet<>(tasks);
    int succeededCount = 0;
    int timerInMilliSec = durationInSecond * 1000;
    int intervalInMilliSec = intervalInSecond * 1000;

    try {
        while (timerInMilliSec > 0 && responses.size() > 0) {
            Set<Future<TaskResult>> dones = new HashSet<>();
            for (Future<TaskResult> task : responses) {
                try {
                    if (task.isDone()) {
                        dones.add(task);
                        TaskResult tr = task.get();
                        if (tr.isSuccessful()) {
                            succeededCount++;
                            // Remove successful contexts so that what remains are the failed contexts
                            if (failedContexts != null) {
                                if (!failedContexts.remove(tr.getContex())) {
                                    LOG.error(
                                            "ResourceContext {} does not exist in the submitted context list.",
                                            tr.getContex());
                                }
                            }
                        }
                    }
                } catch (ExecutionException e) {
                    LOG.error("Polling of pending tasks encountered an error: ", e);
                }
            }
            responses.removeAll(dones);

            Thread.sleep(intervalInMilliSec);

            timerInMilliSec = timerInMilliSec - intervalInMilliSec;
            LOG.debug("Polling pending tasks: remaining time = " + timerInMilliSec / 1000 + " seconds.");
        }
    } catch (InterruptedException e) {
        LOG.error("Polling of pending tasks was interrupted.", e);
        shutdownTaskRunnerService();
    }

    // Terminate all tasks if we timed out.
    if (timerInMilliSec <= 0 && responses.size() > 0) {
        shutdownTaskRunnerService();
    }

    // Always return the succeeded task count and let the caller decide if any resources needs to be
    // cleaned up
    return succeededCount;
}

From source file:com.appdynamics.analytics.processor.event.ElasticSearchEventService.java

private void deleteMovedDocs(Client client, String sourceIndex, String sourceDocType,
        Set<String> movedDocIds, Set<String> unmovedDocIds)
/*      */ {//from w  ww  .j a v  a  2  s  .  co  m
    /* 1002 */ if (movedDocIds.isEmpty()) {
        /* 1003 */ return;
        /*      */ }
    /* 1005 */ unmovedDocIds.removeAll(movedDocIds);
    /* 1006 */ Set<String> movedButNotDeletedDocIds = new HashSet(movedDocIds);
    /* 1007 */ for (int i = 0; i < 5; i++) {
        /*      */ try {
            /* 1009 */ BulkRequestBuilder bulkRequestBuilder = client.prepareBulk();
            /* 1010 */ for (String id : movedDocIds) {
                /* 1011 */ DeleteRequest indexRequest = (DeleteRequest) client
                        .prepareDelete(sourceIndex, sourceDocType, id).request();
                /* 1012 */ bulkRequestBuilder.add(indexRequest);
                /*      */ }
            /* 1014 */ BulkResponse bulkResponse = (BulkResponse) bulkRequestBuilder.execute().actionGet();
            /* 1015 */ if (bulkResponse.hasFailures()) {
                /* 1016 */ extractFailedDocIds(bulkResponse, movedButNotDeletedDocIds);
                /* 1017 */ log.error("bulk deletion failures: " + bulkResponse.buildFailureMessage());
                /*      */ } else {
                /* 1019 */ movedButNotDeletedDocIds.removeAll(movedDocIds);
                /* 1020 */ return;
                /*      */ }
            /*      */ } catch (Exception e) {
            /* 1023 */ log.error("Caught exception while trying to execute bulk delete: " + e.getMessage());
            /*      */ }
        /*      */ }
    /*      */
    /* 1027 */ String msg = "Failed to delete moved documents from source index [" + sourceIndex
            + "] and type [" + sourceDocType + "] - bulk response failures occurred in spite of retrying [" + 5
            + "] times. DocIds that were moved but not deleted: ["
            + Joiner.on(",").join(movedButNotDeletedDocIds) + "], docIds that were not moved: ["
            + Joiner.on(",").join(unmovedDocIds) + "]";
    /*      */
    /*      */
    /*      */
    /* 1031 */ log.error(msg);
    /* 1032 */ throw new MoveEventsException(msg, movedButNotDeletedDocIds, unmovedDocIds);
    /*      */ }

From source file:com.dell.asm.asmcore.asmmanager.util.ServiceTemplateUtil.java

public void deleteRemovedEncryptionIds(ServiceTemplate oldTemplate, ServiceTemplate newTemplate) {
    Set<String> encryptionIdsToDelete = getEncryptionIds(oldTemplate);
    encryptionIdsToDelete.removeAll(getEncryptionIds(newTemplate));
    for (String id : encryptionIdsToDelete) {
        IEncryptedString encryptedString = getEncryptionDAO().findEncryptedStringById(id);
        if (encryptedString != null)
            getEncryptionDAO().delete(encryptedString);
    }//from   w w w  .  j a  va 2 s.  co m
}

From source file:ch.entwine.weblounge.contentrepository.impl.AbstractWritableContentRepository.java

/**
 * {@inheritDoc}/*w  w w  .  j  av a  2  s .c  o  m*/
 * 
 * @see ch.entwine.weblounge.contentrepository.impl.AbstractContentRepository#getVersions(ch.entwine.weblounge.common.content.ResourceURI)
 */
@Override
public ResourceURI[] getVersions(ResourceURI uri) throws ContentRepositoryException {
    Set<ResourceURI> uris = new HashSet<ResourceURI>();
    uris.addAll(Arrays.asList(super.getVersions(uri)));

    // Iterate over the resources that are currently being processed
    synchronized (processor) {
        for (ContentRepositoryOperation<?> op : processor.getOperations()) {

            // Is this a resource operation?
            if (!(op instanceof ContentRepositoryResourceOperation<?>))
                continue;

            // Apply the changes to the original resource
            ContentRepositoryResourceOperation<?> resourceOp = (ContentRepositoryResourceOperation<?>) op;

            // Is the resource about to be deleted?
            ResourceURI opURI = resourceOp.getResourceURI();
            if (op instanceof DeleteOperation && equalsByIdOrPath(uri, opURI)) {
                DeleteOperation deleteOp = (DeleteOperation) op;
                List<ResourceURI> deleteCandidates = new ArrayList<ResourceURI>();
                for (ResourceURI u : uris) {
                    if (deleteOp.allVersions() || u.getVersion() == opURI.getVersion()) {
                        deleteCandidates.add(u);
                    }
                }
                uris.removeAll(deleteCandidates);
            }

            // Is the resource simply being updated?
            if (op instanceof PutOperation && equalsByIdOrPath(uri, opURI)) {
                uris.add(opURI);
            }

        }
    }

    return uris.toArray(new ResourceURI[uris.size()]);
}

From source file:edu.umass.cs.reconfiguration.Reconfigurator.java

/**
 * Handles a request to add or delete a reconfigurator from the set of all
 * reconfigurators in NodeConfig. The reconfiguration record corresponding
 * to NodeConfig is stored in the RC records table and the
 * "active replica state" or the NodeConfig info itself is stored in a
 * separate NodeConfig table in the DB./*from  www  .j  a v  a  2s.c o m*/
 * 
 * @param changeRC
 * @param ptasks
 * @return Messaging task typically null. No protocol tasks spawned.
 */
public GenericMessagingTask<?, ?>[] handleReconfigureRCNodeConfig(ReconfigureRCNodeConfig<NodeIDType> changeRC,
        ProtocolTask<NodeIDType, ReconfigurationPacket.PacketType, String>[] ptasks) {
    assert (changeRC.getServiceName().equals(AbstractReconfiguratorDB.RecordNames.RC_NODES.toString()));
    log.log(Level.INFO, "\n\n{0}\n{1} received {2} request {3} from initiator {4}\n{5}", new Object[] {
            separator, this, changeRC.getType(), changeRC.getSummary(), changeRC.getIssuer(), separator });
    if (!this.isPermitted(changeRC)) {
        String errorMessage = " Impermissible node config change request";
        log.severe(this + errorMessage + ": " + changeRC);
        // this.sendRCReconfigurationErrorToInitiator(changeRC).setFailed().setResponseMessage(errorMessage);
        return (new GenericMessagingTask<InetSocketAddress, ServerReconfigurationPacket<NodeIDType>>(
                changeRC.getIssuer(), changeRC.setFailed().setResponseMessage(errorMessage))).toArray();
    }
    // check first if NC is ready for reconfiguration
    ReconfigurationRecord<NodeIDType> ncRecord = this.DB.getReconfigurationRecord(changeRC.getServiceName());
    if (ncRecord == null)
        return null; // possible if startCleanSlate

    if (!ncRecord.isReady()) {
        String errorMessage = " Trying to conduct concurrent node config changes";
        log.warning(this + errorMessage + ": " + changeRC);
        return (new GenericMessagingTask<InetSocketAddress, ServerReconfigurationPacket<NodeIDType>>(
                changeRC.getIssuer(), changeRC.setFailed().setResponseMessage(errorMessage))).toArray();
    }
    // else try to reconfigure even though it may still fail
    Set<NodeIDType> curRCs = ncRecord.getActiveReplicas();
    Set<NodeIDType> newRCs = new HashSet<NodeIDType>(curRCs);
    newRCs.addAll(changeRC.getAddedNodeIDs());
    newRCs.removeAll(changeRC.getDeletedNodeIDs());
    // will use the nodeConfig before the change below.
    if (changeRC.newlyAddedNodes != null || changeRC.deletedNodes != null)
        this.initiateReconfiguration(AbstractReconfiguratorDB.RecordNames.RC_NODES.toString(), ncRecord, newRCs, // this.consistentNodeConfig.getNodeSocketAddress
                (changeRC.getIssuer()), changeRC.getMyReceiver(), null, null, null, changeRC.newlyAddedNodes);
    return null;
}

From source file:io.wcm.wcm.parsys.componentinfo.impl.AllowedComponentsProviderImpl.java

/**
 * Get allowed components for given resource path
 * @param resourcePath Resource path inside content page
 * @return Set of component paths (absolute resource types)
 *//*from w w  w .j ava 2s . c o  m*/
@Override
public Set<String> getAllowedComponents(String resourcePath, ResourceResolver resolver) {
    Set<String> allowedComponents = new HashSet<>();
    Set<String> deniedComponents = new HashSet<>();

    PageManager pageManager = resolver.adaptTo(PageManager.class);
    Page page = pageManager.getContainingPage(resourcePath);
    if (page == null && StringUtils.contains(resourcePath, "/" + JcrConstants.JCR_CONTENT)) {
        // if resource does not exist (e.g. inherited parsys) get page from resource path manually
        page = pageManager.getPage(StringUtils.substringBefore(resourcePath, "/" + JcrConstants.JCR_CONTENT));
    }
    if (page != null) {
        String pageComponentPath = page.getContentResource().getResourceType();
        String relativePath = resourcePath.substring(page.getPath().length() + 1);

        Iterable<ParsysConfig> parSysConfigs = parsysConfigManager.getParsysConfigs(pageComponentPath,
                relativePath, resolver);

        Resource parentResource = null;
        Resource grandParentResource = null;

        for (ParsysConfig pathDef : parSysConfigs) {

            boolean includePathDef = false;
            if (pathDef.getAllowedParents().size() == 0) {
                includePathDef = true;
            } else {
                Resource checkResource = null;
                if (pathDef.getParentAncestorLevel() == 1) {
                    if (parentResource == null) {
                        parentResource = resolver.getResource(resourcePath);
                    }
                    checkResource = parentResource;
                }
                if (pathDef.getParentAncestorLevel() == 2) {
                    if (grandParentResource == null) {
                        grandParentResource = resolver.getResource(resourcePath + "/..");
                    }
                    checkResource = grandParentResource;
                }
                if (checkResource != null) {
                    String resourceType = ResourceType.makeAbsolute(checkResource.getResourceType(), resolver);
                    includePathDef = pathDef.getAllowedParents().contains(resourceType);
                }
            }

            if (includePathDef) {
                allowedComponents.addAll(makeAbsolute(pathDef.getAllowedChildren(), resolver));
                deniedComponents.addAll(makeAbsolute(pathDef.getDeniedChildren(), resolver));
            }

        }

    }

    // filter out denied components
    allowedComponents.removeAll(deniedComponents);

    return allowedComponents;
}