Example usage for java.util Set retainAll

List of usage examples for java.util Set retainAll

Introduction

In this page you can find the example usage for java.util Set retainAll.

Prototype

boolean retainAll(Collection<?> c);

Source Link

Document

Retains only the elements in this set that are contained in the specified collection (optional operation).

Usage

From source file:org.jax.haplotype.analysis.EMMAAssociationTest.java

/**
 * Emma scan using flat files// w  w  w  . j  a v a  2 s.  c o m
 * @param genoFileName
 *          the genotype file
 * @param aAlleleColumn
 *          the column index for the A allele
 * @param bAlleleColumn
 *          the column index for the B allele
 * @param firstGenotypeColumn
 *          the 1st genotype column
 * @param lastGenotypeColumnExclusive
 *          the index after the last genotype column. You can use -1 to indicate that
 *          all of the remaining columns after firstGenotypeColumn are
 *          genotype columns
 * @param phenoFileName
 *          the phenotype file
 * @param phenotype
 *          the name of the phenotype to scan (you can use null here only
 *          if there is only a single phenotype in the phenotype file)
 * @param sexToScan
 *          the sex type used to filter phenotype data
 * @return
 *          the flattened kinship matrix
 * @throws IllegalFormatException
 *          if there is a problem with how with how the file is formatted
 * @throws IOException
 *          if there is a problem with file IO while reading the flat file
 */
public double[] emmaScan(String genoFileName, int aAlleleColumn, int bAlleleColumn, int firstGenotypeColumn,
        int lastGenotypeColumnExclusive, String phenoFileName, String phenotype, SexFilter sexToScan)
        throws IllegalFormatException, IOException {
    // start with the geno headers
    FlatFileReader genoFFR = new FlatFileReader(new FileReader(genoFileName), CommonFlatFileFormat.CSV_UNIX);
    String[] currRow = genoFFR.readRow();
    if (currRow == null) {
        throw new IllegalFormatException("Failed to read the header");
    }

    if (lastGenotypeColumnExclusive == -1) {
        lastGenotypeColumnExclusive = currRow.length;
    }
    String[] headerStrains = new String[lastGenotypeColumnExclusive - firstGenotypeColumn];
    for (int i = 0; i < headerStrains.length; i++) {
        headerStrains[i] = currRow[i + firstGenotypeColumn];
    }

    // now get the strains in common with phenotype data
    MPDIndividualStrainPhenotypeParser phenoParser = new MPDIndividualStrainPhenotypeParser();
    FileInputStream phenoIn = new FileInputStream(phenoFileName);
    Set<String> phenoStrains = phenoParser.parseAvailableStrainNames(phenoIn);
    phenoIn.close();

    Set<String> commonStrainSet = new HashSet<String>(phenoStrains);
    commonStrainSet.retainAll(Arrays.asList(headerStrains));
    int strainCount = commonStrainSet.size();
    String[] commonStrainArray = commonStrainSet.toArray(new String[0]);
    Arrays.sort(commonStrainArray);
    int[] commonStrainIndices = new int[strainCount];

    {
        List<String> headerRowList = Arrays.asList(currRow);
        for (int i = 0; i < strainCount; i++) {
            commonStrainIndices[i] = headerRowList.indexOf(commonStrainArray[i]);
        }
    }

    // TODO need failure if there are fewer than 3 strains in common

    // read the phenotype data
    if (phenotype == null || phenotype.length() == 0) {
        phenoIn = new FileInputStream(phenoFileName);
        Set<String> phenos = phenoParser.parseAvailablePhenotypes(phenoIn);
        phenoIn.close();

        if (phenos.size() != 1) {
            throw new IllegalFormatException();
        } else {
            phenotype = phenos.iterator().next();
        }
    }

    if (LOG.isLoggable(Level.FINE)) {
        LOG.fine("the phenotype is: " + phenotype);
    }

    phenoIn = new FileInputStream(phenoFileName);
    Map<String, List<Double>> phenoData = phenoParser.parsePhenotypesFromStream(phenotype, phenoIn, sexToScan,
            commonStrainSet);

    if (LOG.isLoggable(Level.FINE)) {
        LOG.fine("the # of phenotypes is: " + phenoData.size());
    }

    double[] phenotypeMeans = new double[phenoData.size()];
    for (int i = 0; i < strainCount; i++) {
        phenotypeMeans[i] = 0.0;
        List<Double> currData = phenoData.get(commonStrainArray[i]);
        for (Double measure : currData) {
            phenotypeMeans[i] += measure;
        }
        phenotypeMeans[i] /= currData.size();
    }

    // read the genotype data
    List<double[]> callValues = new LinkedList<double[]>();
    while ((currRow = genoFFR.readRow()) != null) {
        double[] currSnpGenos = new double[strainCount];
        for (int strainIndex = 0; strainIndex < strainCount; strainIndex++) {
            currSnpGenos[strainIndex] = toCallValue(currRow[aAlleleColumn], currRow[bAlleleColumn],
                    currRow[commonStrainIndices[strainIndex]]);
        }
        callValues.add(currSnpGenos);
    }

    // flatten the genotype matrix
    double[] flatCallValues = new double[strainCount * callValues.size()];
    Iterator<double[]> iter = callValues.iterator();
    for (int rowIndex = 0; iter.hasNext(); rowIndex++) {
        double[] currSnpGenos = iter.next();
        for (int strainIndex = 0; strainIndex < currSnpGenos.length; strainIndex++) {
            int flatIndex = rowIndex * strainCount + strainIndex;
            flatCallValues[flatIndex] = currSnpGenos[strainIndex];
        }
    }

    // calculate kinship matrix and do the scan
    double[] kinship = calculateKinship(strainCount, flatCallValues);
    return emmaScan(strainCount, phenotypeMeans, flatCallValues, kinship);
}

From source file:com.netflix.discovery.shared.Applications.java

/**
 * Shuffle the instances and filter for only {@link InstanceStatus#UP} if
 * required./*from w  w  w. j  a  va  2 s .  c  o m*/
 *
 */
private void shuffleAndFilterInstances(Map<String, AbstractQueue<InstanceInfo>> srcMap,
        Map<String, AtomicReference<List<InstanceInfo>>> destMap, Map<String, AtomicLong> vipIndexMap,
        boolean filterUpInstances) {
    for (Map.Entry<String, AbstractQueue<InstanceInfo>> entries : srcMap.entrySet()) {
        AbstractQueue<InstanceInfo> instanceInfoQueue = entries.getValue();
        List<InstanceInfo> l = new ArrayList<InstanceInfo>(instanceInfoQueue);
        if (filterUpInstances) {
            Iterator<InstanceInfo> it = l.iterator();

            while (it.hasNext()) {
                InstanceInfo instanceInfo = it.next();
                if (!InstanceStatus.UP.equals(instanceInfo.getStatus())) {
                    it.remove();
                }
            }
        }
        Collections.shuffle(l);
        AtomicReference<List<InstanceInfo>> instanceInfoList = destMap.get(entries.getKey());
        if (instanceInfoList == null) {
            instanceInfoList = new AtomicReference<List<InstanceInfo>>(l);
            destMap.put(entries.getKey(), instanceInfoList);
        }
        instanceInfoList.set(l);
        vipIndexMap.put(entries.getKey(), new AtomicLong(0));
    }

    // finally remove all vips that are completed deleted (i.e. missing) from the srcSet
    Set<String> srcVips = srcMap.keySet();
    Set<String> destVips = destMap.keySet();
    destVips.retainAll(srcVips);
}

From source file:com.qmetry.qaf.automation.step.client.AbstractScenarioFileParser.java

/**
 * To apply groups and enabled filter//  w ww .  ja va2  s.  co  m
 * 
 * @param includeGroups
 * @param excludeGroups
 * @param metadata
 * @return
 */
@SuppressWarnings("unchecked")
protected boolean include(List<String> includeGroups, List<String> excludeGroups,
        Map<String, Object> metadata) {
    // check for enabled
    if (metadata.containsKey("enabled") && !((Boolean) metadata.get("enabled")))
        return false;

    Set<Object> groups = new HashSet<Object>(
            metadata.containsKey(ScenarioFactory.GROUPS) ? (List<String>) metadata.get(ScenarioFactory.GROUPS)
                    : new ArrayList<String>());
    if (!includeGroups.isEmpty()) {
        groups.retainAll(includeGroups);
    }
    groups.removeAll(excludeGroups);
    return (!groups.isEmpty() || (includeGroups.isEmpty() && excludeGroups.isEmpty()));
}

From source file:org.gradoop.flink.model.impl.operators.matching.single.cypher.planning.planner.greedy.GreedyPlanner.java

/**
 * Computes the overlapping query variables of the specified entries.
 *
 * @param firstEntry first entry/*from   w  w w  . j  a v  a2 s .  c o m*/
 * @param secondEntry second entry
 * @return variables that are available in both input entries
 */
private List<String> getOverlap(PlanTableEntry firstEntry, PlanTableEntry secondEntry) {
    Set<String> overlap = firstEntry.getAllVariables();
    overlap.retainAll(secondEntry.getAllVariables());
    return new ArrayList<>(overlap);
}

From source file:uk.gov.gchq.gaffer.data.elementdefinition.view.View.java

private Map<String, ViewElementDefinition> expandGlobalDefinitions(
        final Map<String, ViewElementDefinition> elements, final Set<String> groups,
        final List<GlobalViewElementDefinition> globalElements, final boolean skipMissingGroups) {

    final Map<String, ViewElementDefinition> newElements = new LinkedHashMap<>();
    for (final GlobalViewElementDefinition globalElement : globalElements) {
        final Set<String> globalGroups;
        if (null != globalElement.groups) {
            globalGroups = new HashSet<>(globalElement.groups);
            final boolean hasMissingGroups = globalGroups.retainAll(groups);
            if (hasMissingGroups) {
                if (!skipMissingGroups) {
                    final Set<String> missingGroups = new HashSet<>(globalElement.groups);
                    missingGroups.removeAll(groups);
                    throw new IllegalArgumentException(
                            "A global element definition is invalid, these groups do not exist: "
                                    + missingGroups);
                }/*from w w  w . j  a  v  a 2s.  co  m*/
            }
        } else {
            globalGroups = groups;
        }
        for (final String group : globalGroups) {
            final ViewElementDefinition.Builder builder = new ViewElementDefinition.Builder();
            if (newElements.containsKey(group)) {
                builder.merge(newElements.get(group));
            }
            builder.merge(globalElement.clone());
            newElements.put(group, builder.build());
        }
    }

    if (null != elements) {
        for (final Map.Entry<String, ViewElementDefinition> entry : elements.entrySet()) {
            final String group = entry.getKey();
            if (newElements.containsKey(group)) {
                newElements.put(group, new ViewElementDefinition.Builder().merge(newElements.get(group))
                        .merge(entry.getValue()).build());
            } else {
                newElements.put(group, entry.getValue());
            }
        }
    }

    return Collections.unmodifiableMap(newElements);
}

From source file:io.minio.policy.BucketPolicy.java

/**
 * Returns all statements of given prefix.
 */// ww w.  ja v a  2s . c o  m
private void removeStatements(String prefix) {
    String bucketResource = Constants.AWS_RESOURCE_PREFIX + bucketName;
    String objectResource = Constants.AWS_RESOURCE_PREFIX + bucketName + "/" + prefix + "*";
    boolean[] inUse = getInUsePolicy(prefix);
    boolean readOnlyInUse = inUse[0];
    boolean writeOnlyInUse = inUse[1];

    List<Statement> out = new ArrayList<Statement>();
    Set<String> s3PrefixValues = new HashSet<String>();
    List<Statement> readOnlyBucketStatements = new ArrayList<Statement>();

    for (Statement statement : statements) {
        if (!statement.isValid(bucketName)) {
            out.add(statement);
            continue;
        }

        if (statement.resources().contains(bucketResource)) {
            if (statement.conditions() != null) {
                statement.removeBucketActions(prefix, bucketResource, false, false);
            } else {
                statement.removeBucketActions(prefix, bucketResource, readOnlyInUse, writeOnlyInUse);
            }
        } else if (statement.resources().contains(objectResource)) {
            statement.removeObjectActions(objectResource);
        }

        if (!statement.actions().isEmpty()) {
            if (statement.resources().contains(bucketResource)
                    && statement.actions().containsAll(Constants.READ_ONLY_BUCKET_ACTIONS)
                    && statement.effect().equals("Allow") && statement.principal().aws().contains("*")) {

                if (statement.conditions() != null) {
                    ConditionKeyMap stringEqualsValue = statement.conditions().get("StringEquals");
                    if (stringEqualsValue != null) {
                        Set<String> values = stringEqualsValue.get("s3:prefix");
                        if (values != null) {
                            for (String v : values) {
                                s3PrefixValues.add(bucketResource + "/" + v + "*");
                            }
                        }
                    }
                } else if (!s3PrefixValues.isEmpty()) {
                    readOnlyBucketStatements.add(statement);
                    continue;
                }
            }

            out.add(statement);
        }
    }

    boolean skipBucketStatement = true;
    String resourcePrefix = Constants.AWS_RESOURCE_PREFIX + bucketName + "/";
    for (Statement statement : out) {
        Set<String> intersection = new HashSet<String>(s3PrefixValues);
        intersection.retainAll(statement.resources());

        if (!statement.resources().startsWith(resourcePrefix).isEmpty() && intersection.isEmpty()) {
            skipBucketStatement = false;
            break;
        }
    }

    for (Statement statement : readOnlyBucketStatements) {
        Set<String> aws = statement.principal().aws();
        if (skipBucketStatement && statement.resources().contains(bucketResource)
                && statement.effect().equals("Allow") && aws != null && aws.contains("*")
                && statement.conditions() == null) {
            continue;
        }

        out.add(statement);
    }

    if (out.size() == 1) {
        Statement statement = out.get(0);
        Set<String> aws = statement.principal().aws();
        if (statement.resources().contains(bucketResource)
                && statement.actions().containsAll(Constants.COMMON_BUCKET_ACTIONS)
                && statement.effect().equals("Allow") && aws != null && aws.contains("*")
                && statement.conditions() == null) {
            out = new ArrayList<Statement>();
        }
    }

    statements = out;
}

From source file:ubic.BAMSandAllen.MatrixPairs.ConnectivityAndAllenDataPair.java

public void makeVirtualConnectivityRegionColumn(String bName) {
    // if ( squareConnectivity ) log.warn( "Virtual regions incompatable with square connectivity" );
    // put in a datastructure for convertAnametoBname
    // take the cols and OR them together
    Set<String> aNames = convertBNametoA(bName);
    // only those we have data for
    aNames.retainAll(matrixA.getColNames());

    log.info("Making virtual region for:" + matrixB.getName() + " " + bName + " -> " + aNames);

    virtualRegions.add(bName);/*from w w w.  j  a  v  a 2 s.  c o m*/
    double[] newColValues = new double[matrixA.rows()];

    for (String aName : aNames) {
        double[] matrixColumn = matrixA.getColumnByName(aName);
        for (int row = 0; row < matrixColumn.length; row++) {
            // logical OR operation
            if (matrixColumn[row] == 1) {
                newColValues[row] = 1;
            }
        }
    }

    // remove the three cols and add a new one - use the Allen Name
    matrixA = matrixA.removeColumns(aNames);
    matrixA = matrixA.addColumn(bName, newColValues);

}

From source file:spade.query.scaffold.BerkeleyDB.java

@Override
public Map<String, Set<String>> getPaths(String source_hash, String destination_hash, int maxLength) {

    Map<String, Set<String>> lineageUp = getLineage(source_hash, DIRECTION_ANCESTORS, maxLength);
    Map<String, Set<String>> lineageDown = getLineage(destination_hash, DIRECTION_DESCENDANTS, maxLength);
    Map<String, Set<String>> paths = new HashMap<>();
    Set<String> keys = lineageUp.keySet();
    keys.retainAll(lineageDown.keySet());

    for (String key : keys) {
        Set<String> pathEntry = paths.get(key);
        if (pathEntry == null) {
            pathEntry = new HashSet<>();
        }//ww  w.  ja  v  a2 s.  c  om
        pathEntry.addAll(lineageUp.get(key));
        pathEntry.addAll(lineageDown.get(key));
    }

    return paths;
}

From source file:edu.stanford.muse.email.Filter.java

public boolean matches(Document d) throws ReadContentsException {
    // look for any reason to return false, if none of them fire, return true.

    DatedDocument dd = null;/*from   w  w w .  j  a v  a  2s.  c  o  m*/
    if (d instanceof DatedDocument) {
        dd = (DatedDocument) d;
        if (!matchesDate(dd))
            return false;
    }

    if (keywords != null && keywords.size() > 0) {
        log.warn("Filtering by keywords during fetch&index is currently disabled");
        Util.softAssert(false);
        //         String s = d.getContents().toLowerCase();
        //         // check for all keywords, if any absent return false
        //         for (String keyword: keywords)
        //            if (s.indexOf(keyword) < 0)
        //               return false;
    }

    // extra checks for email doc
    if (d instanceof EmailDocument) {
        // check if any of the people involved in this message are one of personContacts
        EmailDocument ed = (EmailDocument) d;

        if (personContacts.size() > 0) {
            // if we don't have an address book, so assume that it's a match
            // (because this filter might be getting called from EmailFetcher.run()
            // which is still in the process of building the addr book.
            // if this is the case, we will explicitly apply the filter again, so its ok.
            if (addressBook != null) {
                List<String> list = ed.getAllAddrs();
                Set<Contact> contactsInThisMessage = new LinkedHashSet<Contact>();
                for (String s : list) {
                    Contact c = addressBook.lookupByEmail(s);
                    if (c != null)
                        contactsInThisMessage.add(c);
                }

                contactsInThisMessage.retainAll(personContacts);
                if (contactsInThisMessage.size() == 0)
                    return false;
            }
        }

        if (sentMessagesOnly) {
            if (ownCI != null) {
                String fromEmail = ed.getFromEmailAddress();
                Set<String> ownAddrs = ownCI.getEmails();
                if (!ownAddrs.contains(fromEmail))
                    return false;
            } else {
                log.warn(
                        "WARNING: user error: trying to use sent-only option without setting user's own contact info");
                // in this case, we assume a match implicitly because we don't want to filter out all messages
            }
        }
    }
    return true;
}

From source file:org.finra.herd.service.impl.CurrentUserServiceImpl.java

/**
 * Filters a set of roles based on a list of role values specific for herd.
 *
 * @param roles A given set of roles/*from w w w  .j ava2 s .  c o m*/
 *
 * @return Valid roles from the specified set of roles
 */
private Set<String> getValidSecurityRoles(final Set<String> roles) {
    // Copy the set of specified roles to another set
    Set<String> incomingRoles = new HashSet<>(roles);

    // Copy the roles to a set for easier computation
    Set<SecurityRoleEntity> securityRoleEntities = new HashSet<>(securityRoleDao.getAllSecurityRoles());

    // Collect all security role codes from the entities
    Set<String> securityRoles = securityRoleEntities.stream().map(SecurityRoleEntity::getCode)
            .collect(Collectors.toSet());

    // The Set of valid roles is the intersection of the two collections
    incomingRoles.retainAll(securityRoles);

    // Return valid roles
    return incomingRoles;
}