Example usage for java.util Collection remove

List of usage examples for java.util Collection remove

Introduction

In this page you can find the example usage for java.util Collection remove.

Prototype

boolean remove(Object o);

Source Link

Document

Removes a single instance of the specified element from this collection, if it is present (optional operation).

Usage

From source file:ubic.gemma.persistence.service.expression.experiment.ExpressionExperimentServiceImpl.java

@Override
@Transactional//from  w  ww.  jav  a  2  s.  c  om
public ExpressionExperiment replaceRawVectors(ExpressionExperiment ee,
        Collection<RawExpressionDataVector> newVectors) {

    if (newVectors == null || newVectors.isEmpty()) {
        throw new UnsupportedOperationException("Only use this method for replacing vectors, not erasing them");
    }

    // to attach to session correctly.
    ExpressionExperiment eeToUpdate = this.load(ee.getId());

    Collection<QuantitationType> qtsToRemove = new HashSet<>();
    for (RawExpressionDataVector oldV : eeToUpdate.getRawExpressionDataVectors()) {
        qtsToRemove.add(oldV.getQuantitationType());
    }
    rawExpressionDataVectorDao.remove(eeToUpdate.getRawExpressionDataVectors()); // should not be necessary
    processedVectorService.remove(eeToUpdate.getProcessedExpressionDataVectors()); // should not be necessary
    eeToUpdate.getProcessedExpressionDataVectors().clear(); // this should be sufficient
    eeToUpdate.getRawExpressionDataVectors().clear(); // should be sufficient

    // These QTs might still be getting used by the replaced vectors.
    for (RawExpressionDataVector newVec : newVectors) {
        qtsToRemove.remove(newVec.getQuantitationType());
    }

    for (QuantitationType oldQt : qtsToRemove) {
        quantitationTypeDao.remove(oldQt);
    }

    // Split the vectors up by bioassay dimension, if need be. This could be modified to handle multiple quantitation types if need be.
    Map<BioAssayDimension, Collection<RawExpressionDataVector>> BADs = new HashMap<>();
    for (RawExpressionDataVector vec : newVectors) {
        BioAssayDimension b = vec.getBioAssayDimension();
        if (!BADs.containsKey(b)) {
            BADs.put(b, new HashSet<RawExpressionDataVector>());
        }
        BADs.get(b).add(vec);
    }

    for (Collection<RawExpressionDataVector> vectors : BADs.values()) {
        ee = this.addRawVectors(eeToUpdate, vectors);
    }
    return ee;
}

From source file:com.adito.activedirectory.ActiveDirectoryUserDatabase.java

private void loadRoles(String filter, InitialLdapContext context, boolean removeMissingEntries)
        throws Exception {
    final Collection<String> groupNames = groupContainer.retrievePrincipalNames();
    PagedResultMapper mapper = new AbstractPagedResultMapper() {
        public void mapSearchResult(SearchResult searchResult) throws NamingException {
            String dn = searchResult.getNameInNamespace();
            Attributes attributes = searchResult.getAttributes();
            String commonName = getAttributeValue(attributes, COMMON_NAME_ATTRIBUTE);
            if (commonName.length() != 0) {
                Long rid = ActiveDirectoryGroup
                        .getRIDFromSID((byte[]) attributes.get(OBJECT_SID_ATTRIBUTE).get());
                ActiveDirectoryGroup group = new ActiveDirectoryGroup(commonName, dn, getEscapedDn(dn), rid,
                        getRealm());//from   ww  w  . j  av  a  2s  .  c  o m
                String[] parents = getParents(attributes);
                String key = groupContainer.storeGroup(group, parents);
                groupNames.remove(key);
            }
        }
    };

    try {
        String replacedFilter = buildGroupFilter(filter);
        PagedResultTemplate pagedResultTemplate = configuration.getPagedResultTemplate();
        pagedResultTemplate.search(context, replacedFilter, GROUP_ATTRS, mapper);
    } finally {
        if (removeMissingEntries) {
            groupContainer.updateRemovedGroups(groupNames);
        }
    }
}

From source file:com.haulmont.cuba.gui.data.impl.CollectionPropertyDatasourceImpl.java

@SuppressWarnings("unchecked")
public void replaceItem(T item) {
    checkNotNullArgument(item, "item is null");
    Collection<T> collection = getCollection();
    if (collection != null) {
        for (T t : collection) {
            if (t.equals(item)) {
                detachListener(t);/*w  w w .  ja va 2  s. c o  m*/
                if (collection instanceof List) {
                    List list = (List) collection;
                    int itemIdx = list.indexOf(t);
                    list.set(itemIdx, item);
                } else if (collection instanceof LinkedHashSet) {
                    LinkedHashSet set = (LinkedHashSet) collection;

                    List list = new ArrayList(set);
                    int itemIdx = list.indexOf(t);
                    list.set(itemIdx, item);

                    set.clear();
                    set.addAll(list);
                } else {
                    collection.remove(t);
                    collection.add(item);
                }
                attachListener(item);

                if (item.equals(this.item)) {
                    this.item = item;
                }
                break;
            }
        }
        if (sortInfos != null)
            doSort();

        fireCollectionChanged(Operation.UPDATE, Collections.singletonList(item));
    }
}

From source file:ubic.gemma.loader.expression.arrayDesign.ArrayDesignSequenceProcessingServiceImpl.java

/**
 * Copy sequences into the original versions, or create new sequences in the DB, as needed.
 * /*  w  w w .  j a  v  a2  s.co m*/
 * @param accessionsToFetch
 * @param retrievedSequences
 * @param force If true, if an existing BioSequence that matches if found in the system, any existing sequence
 *        information in the BioSequence will be overwritten.
 * @return Items that were found.
 */
private Map<String, BioSequence> findOrUpdateSequences(Collection<String> accessionsToFetch,
        Collection<BioSequence> retrievedSequences, Taxon taxon, boolean force) {

    Map<String, BioSequence> found = new HashMap<String, BioSequence>();
    for (BioSequence sequence : retrievedSequences) {
        if (log.isDebugEnabled())
            log.debug("Processing retrieved sequence: " + sequence);
        sequence.setTaxon(taxon);
        sequence = createOrUpdateGenbankSequence(sequence, force);
        String accession = sequence.getSequenceDatabaseEntry().getAccession();
        found.put(accession, sequence);
        accessionsToFetch.remove(accession);
    }
    return found;
}

From source file:de.escidoc.core.test.aa.UserAttributeTestBase.java

/**
 * Check if xml is valid and contains all attributes that are in given attributeList and only these attributes.
 *
 * @param attributesXml xml with user-attributes
 * @param userId        userId/*from   w  w w.jav  a2s .c o m*/
 * @param attributeList list of expected user-attributes
 * @throws Exception If anything fails.
 */
//CHECKSTYLE:OFF
protected void assertValidUserAttributes(final String attributesXml, final String userId,
        Collection<String> attributeList) throws Exception {
    //CHECKSTYLE:ON

    assertXmlValidAttributes(attributesXml);

    String href = "/aa/user-account/" + userId + "/resources/attributes";
    Document attributesXmlDocument = getDocument(attributesXml);
    selectSingleNodeAsserted(attributesXmlDocument, "/attributes/@base");
    selectSingleNodeAsserted(attributesXmlDocument, "/attributes[@href = '" + href + "']");
    int count = attributeList.size();
    if (count > 0) {
        selectSingleNodeAsserted(attributesXmlDocument, "/attributes/attribute[" + count + "]");
    }

    // check if every entry from given collection is in the document
    NodeList attributeElements = selectNodeList(attributesXmlDocument, "/attributes/attribute");
    int elementCount = attributeElements.getLength();
    // iterate elements of the xml document
    for (int i = 0; i < elementCount; i++) {
        // check if key value pair is in given map
        String attributeName = attributeElements.item(i).getAttributes().getNamedItem("name").getNodeValue();
        String attributeValue = attributeElements.item(i).getTextContent();
        String isInternal = attributeElements.item(i).getAttributes().getNamedItem("internal").getNodeValue();
        if (!attributeList.contains(attributeName + attributeValue + isInternal)) {
            fail("Unexpected attribute found. [" + attributeName + attributeValue + isInternal + "]");
        }
        attributeList.remove(attributeName + attributeValue + isInternal);
    }
    // all entries should be removed from hashes(-out-of-map), now
    if (!attributeList.isEmpty()) {
        fail("Expected attributes not found. [" + attributeList.toString() + "]");
    }
}

From source file:eu.medsea.mimeutil.MimeUtil2.java

public final Collection getMimeTypes(final URL url, final MimeType unknownMimeType) throws MimeException {
    Collection mimeTypes = new MimeTypeHashSet();

    if (url == null) {
        log.error("URL reference cannot be null.");
    } else {//from ww  w .j a  v  a  2  s . c  o m
        if (log.isDebugEnabled()) {
            log.debug("Getting MIME types for URL [" + url + "].");
        }

        // Test if this is a directory
        File file = new File(url.getPath());
        if (file.isDirectory()) {
            mimeTypes.add(MimeUtil2.DIRECTORY_MIME_TYPE);
        } else {
            // defer these calls to the file name and stream methods
            mimeTypes.addAll(mimeDetectorRegistry.getMimeTypes(url));

            // We don't want the unknownMimeType added to the collection by MimeDetector(s)
            mimeTypes.remove(unknownMimeType);
        }
    }
    // If the collection is empty we want to add the unknownMimetype
    if (mimeTypes.isEmpty()) {
        mimeTypes.add(unknownMimeType);
    }
    if (log.isDebugEnabled()) {
        log.debug("Retrieved MIME types [" + mimeTypes.toString() + "]");
    }
    return mimeTypes;
}

From source file:org.fao.geonet.services.user.Update.java

private void setUserGroups(final User user, List<GroupElem> userGroups) throws Exception {
    UserGroupRepository userGroupRepository = ApplicationContextHolder.get().getBean(UserGroupRepository.class);
    GroupRepository groupRepository = ApplicationContextHolder.get().getBean(GroupRepository.class);

    Collection<UserGroup> all = userGroupRepository.findAll(UserGroupSpecs.hasUserId(user.getId()));

    // Have a quick reference of existing groups and profiles for this user
    Set<String> listOfAddedProfiles = new HashSet<String>();
    for (UserGroup ug : all) {
        String key = ug.getProfile().name() + ug.getGroup().getId();
        if (!listOfAddedProfiles.contains(key)) {
            listOfAddedProfiles.add(key);
        }//from  w w  w. ja v a 2  s  .  c o m
    }

    // We start removing all old usergroup objects. We will remove the
    // explicitly defined for this call
    Collection<UserGroup> toRemove = new ArrayList<UserGroup>();
    toRemove.addAll(all);

    // New pairs of group-profile we need to add
    Collection<UserGroup> toAdd = new ArrayList<UserGroup>();

    // For each of the parameters on the request, make sure the group is
    // updated.
    for (GroupElem element : userGroups) {
        Integer groupId = element.getId();
        Group group = groupRepository.findOne(groupId);
        String profile = element.getProfile();
        // The user has a new group and profile

        // Combine all groups editor and reviewer groups
        if (profile.equals(Profile.Reviewer.name())) {
            final UserGroup userGroup = new UserGroup().setGroup(group).setProfile(Profile.Editor)
                    .setUser(user);
            String key = Profile.Editor.toString() + group.getId();
            if (!listOfAddedProfiles.contains(key)) {
                toAdd.add(userGroup);
                listOfAddedProfiles.add(key);
            }

            // If the user is already part of this group with this profile,
            // leave it alone:
            for (UserGroup g : all) {
                if (g.getGroup().getId() == groupId && g.getProfile().equals(Profile.Editor)) {
                    toRemove.remove(g);
                }
            }
        }

        final UserGroup userGroup = new UserGroup().setGroup(group)
                .setProfile(Profile.findProfileIgnoreCase(profile)).setUser(user);
        String key = profile + group.getId();
        if (!listOfAddedProfiles.contains(key)) {
            toAdd.add(userGroup);
            listOfAddedProfiles.add(key);

        }

        // If the user is already part of this group with this profile,
        // leave it alone:
        for (UserGroup g : all) {
            if (g.getGroup().getId() == groupId && g.getProfile().name().equalsIgnoreCase(profile)) {
                toRemove.remove(g);
            }
        }
    }

    // Remove deprecated usergroups (if any)
    userGroupRepository.delete(toRemove);

    // Add only new usergroups (if any)
    userGroupRepository.save(toAdd);

}

From source file:uk.nhs.cfh.dsp.snomed.normaliser.impl.NormalFormGeneratorImpl.java

/**
 * Gets the non redundant merged relationships.
 *
 * @param relationships the relationships
 * @return the non redundant merged relationships
 *//*from w  w  w  . j a  v  a 2  s. c o  m*/
private Collection<SnomedRelationshipPropertyExpression> getNonRedundantMergedRelationships(
        Collection<SnomedRelationshipPropertyExpression> relationships) {

    List<SnomedRelationshipPropertyExpression> relationshipList = new ArrayList<SnomedRelationshipPropertyExpression>(
            relationships);
    for (int i = 0; i < relationshipList.size(); i++) {
        for (int j = i + 1; j < relationshipList.size(); j++) {
            SnomedRelationshipPropertyExpression r1 = relationshipList.get(i);
            SnomedRelationshipPropertyExpression r2 = relationshipList.get(j);
            ExpressionComparator.Subsumption_Relation relation = expressionComparator.getSubsumptionRelation(r1,
                    getExpressionWithShortNormalFormAsValue(r2));
            if (ExpressionComparator.Subsumption_Relation.SUBSUMED_BY == relation) {
                relationships.remove(r2);
            } else if (ExpressionComparator.Subsumption_Relation.SUBSUMES == relation) {
                relationships.remove(r1);
            } else if (ExpressionComparator.Subsumption_Relation.SAME == relation) {
                // remove one relationship
                relationships.remove(r2);
            }
        }
    }

    return relationships;
}

From source file:com.nextep.designer.sqlgen.postgre.impl.PostgreSqlCapturer.java

@Override
public Collection<IIndex> getIndexes(ICaptureContext context, IProgressMonitor monitor) {
    final Map<String, IIndex> indexMap = new HashMap<String, IIndex>();
    final Collection<IIndex> indexes = getPostgresIndexes(context, monitor);

    // Processing indexes to remove PK or UK indexes (DES-694)
    /*/*w w w.ja v  a2  s. co  m*/
     * FIXME [BGA] Now that indexes are captured by a PostgreSQL specific implementation, PK or
     * UK indexes can be filtered out upstream, so this block of code should not be necessary
     * anymore.
     */
    for (IIndex index : new ArrayList<IIndex>(indexes)) {
        final Object obj = context.getCapturedObject(IElementType.getInstance(UniqueKeyConstraint.TYPE_ID),
                CaptureHelper.getUniqueIndexName(index));
        if (obj != null) {
            indexes.remove(index);
        } else {
            indexMap.put(index.getIndexName(), index);
        }
    }
    // Fetching index tablespaces
    fillTablespaces("i", context, indexMap, IIndexPhysicalProperties.class); //$NON-NLS-1$
    return indexes;
}

From source file:org.nabucco.alfresco.enhScriptEnv.common.script.registry.VersionRegisterableScriptClasspathScanner.java

/**
 * Matches and processes a folder level defining a range of versions a script is applicable to.
 *
 * @param resourcePattern//from  w  ww.  ja v  a  2  s.co m
 *            the resource pattern for the current level
 * @param patternStack
 *            the stack of patterns evaluated thus far
 * @param versionDataContainer
 *            the container for collection version information
 * @param subRegistry
 *            the sub-registry to register the script in or {@code null} if no sub-registry is to be used
 * @param fileName
 *            the name of the file currently evaluated
 * @throws IOException
 *             if any exception occurs handling the resource
 */
protected void matchVersionRange(final String resourcePattern, final Collection<String> patternStack,
        final VersionRegisterableScriptAdapter<Script> versionDataContainer, final String subRegistry,
        final String fileName) throws IOException {
    LOGGER.debug("Matched version range {} fragment", fileName);

    final Matcher matcher = Pattern.compile(VERSION_RANGE_PATTERN).matcher(fileName);
    matcher.find();

    final String lowerBoundExclusivity = matcher.group(2);
    final String lowerVersion = matcher.group(3);
    final String upperVersion = matcher.group(6);
    final String upperBoundExclusivity = matcher.group(8);

    versionDataContainer.setAppliesFromExclusive("[".equals(lowerBoundExclusivity));
    versionDataContainer.setAppliesToExclusive("]".equals(upperBoundExclusivity));
    if (lowerVersion != null && lowerVersion.length() != 0) {
        versionDataContainer.setAppliesFrom(lowerVersion);
    }
    if (upperVersion != null && upperVersion.length() != 0) {
        versionDataContainer.setAppliesTo(upperVersion);
    }

    patternStack.add(VERSION_RANGE_PATTERN);
    try {
        this.scanNextLevel(resourcePattern + "/" + fileName, patternStack, versionDataContainer, subRegistry);
    } finally {
        versionDataContainer.setAppliesFromExclusive(false);
        versionDataContainer.setAppliesToExclusive(false);
        versionDataContainer.setAppliesFrom((VersionNumber) null);
        versionDataContainer.setAppliesTo((VersionNumber) null);
        patternStack.remove(VERSION_RANGE_PATTERN);
    }
}