Example usage for java.util Set retainAll

List of usage examples for java.util Set retainAll

Introduction

In this page you can find the example usage for java.util Set retainAll.

Prototype

boolean retainAll(Collection<?> c);

Source Link

Document

Retains only the elements in this set that are contained in the specified collection (optional operation).

Usage

From source file:nl.knaw.huc.di.tag.tagml.xml.exporter.XMLBuilder.java

@Override
public void exitOpenTag(final TAGMarkup markup) {
    Set<String> layers = markup.getLayers();
    layers.retainAll(relevantLayers);
    if (useTrojanHorse) {
        String thId = markup.getTag() + thIdCounter.getAndIncrement();
        thIds.put(markup, thId);/*from ww w .  ja  v a 2 s .  c  om*/
        final String thDoc = getThDoc(layers);

        String id = markup.isAnonymous() ? "soleId" : "sId";
        xmlBuilder.append(" th:doc=\"").append(thDoc).append("\"").append(" th:").append(id).append("=\"")
                .append(thId).append("\"/");

    } else if (markup.isAnonymous()) {
        xmlBuilder.append("/");
    }
    xmlBuilder.append(">");
}

From source file:nl.knaw.huc.di.tag.tagml.xml.exporter.XMLBuilder.java

@Override
public void exitCloseTag(final TAGMarkup markup) {
    String markupName = getMarkupName(markup);
    Set<String> layers = markup.getLayers();
    layers.retainAll(relevantLayers);
    if (markup.isAnonymous()) {
        return;/* w  ww. j  ava2s .com*/
    }
    xmlBuilder.append("<");
    if (!useTrojanHorse) {
        xmlBuilder.append("/");
    }
    xmlBuilder.append(markupName);
    if (useTrojanHorse) {
        final String thDoc = getThDoc(layers);
        String thId = thIds.remove(markup);
        xmlBuilder.append(" th:doc=\"").append(thDoc).append("\"").append(" th:eId=\"").append(thId)
                .append("\"/");
    }
    xmlBuilder.append(">");
}

From source file:org.syncope.core.notification.NotificationManager.java

/**
 * Create notification tasks for each notification matching the passed
 * workflow result.//from ww  w .  j  av  a2  s.  com
 * @param wfResult workflow result
 * @throws NotFoundException if user contained in the workflow result
 * cannot be found
 */
public void createTasks(final WorkflowResult<Long> wfResult) throws NotFoundException {

    SyncopeUser user = userDAO.find(wfResult.getResult());
    if (user == null) {
        throw new NotFoundException("User " + wfResult.getResult());
    }

    final String emailSchema = confDAO.find("email.schema", "email").getValue();

    for (Notification notification : notificationDAO.findAll()) {
        if (searchDAO.matches(user, notification.getAbout())) {
            Set<String> events = new HashSet<String>(notification.getEvents());
            events.retainAll(wfResult.getPerformedTasks());

            if (!events.isEmpty()) {
                LOG.debug("Creating notification task for events {} about {}", events, user);

                taskDAO.save(getNotificationTask(notification, user, emailSchema));
            } else {
                LOG.debug("No events found about {}", user);
            }
        }
    }
}

From source file:it.smartcommunitylab.aac.apikey.APIKeyManager.java

/**
 * Update key scopes. //from w  w  w. j  a  v  a 2  s  .  c om
 * @param key
 * @param data
 * @return
 * @throws #{@link EntityNotFoundException} if the key does not exists
 */
public APIKey updateKeyScopes(String key, Set<String> scopes) throws EntityNotFoundException {
    APIKeyEntity entity = keyRepo.findOne(key);
    if (entity != null) {
        if (scopes != null) {
            ClientDetailsEntity client = clientRepo.findByClientId(entity.getClientId());
            Set<String> targetScopes = new HashSet<>(scopes);
            targetScopes.retainAll(client.getScope());
            entity.setScope(StringUtils.collectionToCommaDelimitedString(targetScopes));
        }

        keyRepo.save(entity);
        APIKey result = new APIKey(entity);
        log.debug("Update API Key data " + key);
        keyCache.put(key, result);
        return result;
    }
    throw new EntityNotFoundException(key);
}

From source file:org.apache.zeppelin.notebook.AuthorizationService.java

private boolean isMember(Set<String> a, Set<String> b) {
    Set<String> intersection = new HashSet<>(b);
    intersection.retainAll(a);
    return (b.isEmpty() || (intersection.size() > 0));
}

From source file:it.smartcommunitylab.aac.apikey.APIKeyManager.java

/**
 * Create a new key for the specified client app
 * @param clientId//from   w w  w.  j a v a2s.c  om
 * @param validity
 * @param data
 * @return
 * @throws #{@link EntityNotFoundException} if the specified client does not exist 
 */
public APIKey createKey(String clientId, Long validity, Map<String, Object> data, Set<String> scopes)
        throws EntityNotFoundException {
    ClientDetailsEntity client = clientRepo.findByClientId(clientId);
    if (client == null)
        throw new EntityNotFoundException("Client not found: " + clientId);

    APIKeyEntity entity = new APIKeyEntity();
    entity.setAdditionalInformation(APIKey.toDataString(data));
    entity.setValidity(validity);
    entity.setClientId(clientId);
    entity.setApiKey(UUID.randomUUID().toString());
    entity.setIssuedTime(System.currentTimeMillis());
    entity.setUserId(client.getDeveloperId());
    entity.setUsername(userManager.getUserInternalName(client.getDeveloperId()));
    entity.setRoles(APIKey.toRolesString(userManager.getUserRoles(client.getDeveloperId())));
    if (scopes != null && !scopes.isEmpty()) {
        Set<String> targetScopes = new HashSet<>(scopes);
        targetScopes.retainAll(client.getScope());
        entity.setScope(StringUtils.collectionToCommaDelimitedString(targetScopes));
    }
    keyRepo.save(entity);
    log.debug("Saved API Key  " + entity.getApiKey());

    APIKey result = new APIKey(entity);
    keyCache.put(result.getApiKey(), result);
    return result;
}

From source file:org.rhq.enterprise.server.content.metadata.ContentSourceMetadataManagerBean.java

/**
 * Return a new Set with elements both in first and second passed collection.
 *
 * @param  first  First set/*from  w w w.j a  v a2 s  .  c  o  m*/
 * @param  second Second set
 *
 * @return a new set (depending on input type) with elements in first and second
 */
private <T> Set<T> intersection(Set<T> first, Set<T> second) {
    Set<T> result = new HashSet<T>();

    if ((first != null) && (second != null)) {
        result.addAll(first);
        result.retainAll(second);
    }

    return result;
}

From source file:HSqlManager.java

private static void commonInitialize(int bps, Connection connection) throws SQLException, IOException {
    String base = new File("").getAbsolutePath();
    CSV.makeDirectory(new File(base + "/PhageData"));
    INSTANCE = ImportPhagelist.getInstance();
    INSTANCE.parseAllPhages(bps);/*ww w  . ja v a  2  s . c o m*/
    written = true;
    Connection db = connection;
    db.setAutoCommit(false);
    Statement stat = db.createStatement();
    stat.execute("SET FILES LOG FALSE\n");
    PreparedStatement st = db.prepareStatement("Insert INTO Primerdb.Primers"
            + "(Bp,Sequence, CommonP, UniqueP, Picked, Strain, Cluster)" + " Values(?,?,true,false,false,?,?)");
    ResultSet call = stat.executeQuery("Select * From Primerdb.Phages;");
    List<String[]> phages = new ArrayList<>();
    while (call.next()) {
        String[] r = new String[3];
        r[0] = call.getString("Strain");
        r[1] = call.getString("Cluster");
        r[2] = call.getString("Name");
        phages.add(r);
    }
    phages.parallelStream().map(x -> x[0]).collect(Collectors.toSet()).parallelStream().forEach(x -> {
        phages.stream().filter(y -> y[0].equals(x)).map(y -> y[1]).collect(Collectors.toSet()).forEach(z -> {
            try {
                List<String> clustphages = phages.stream().filter(a -> a[0].equals(x) && a[1].equals(z))
                        .map(a -> a[2]).collect(Collectors.toList());
                Set<String> primers = Collections.synchronizedSet(CSV
                        .readCSV(base + "/PhageData/" + Integer.toString(bps) + clustphages.get(0) + ".csv"));
                clustphages.remove(0);
                clustphages.parallelStream().forEach(phage -> {
                    primers.retainAll(
                            CSV.readCSV(base + "/PhageData/" + Integer.toString(bps) + phage + ".csv"));
                });
                int i = 0;
                for (CharSequence a : primers) {
                    try {
                        //finish update
                        st.setInt(1, bps);
                        st.setString(2, a.toString());
                        st.setString(3, x);
                        st.setString(4, z);
                        st.addBatch();
                    } catch (SQLException e) {
                        e.printStackTrace();
                        System.out.println("Error occurred at " + x + " " + z);
                    }
                    i++;
                    if (i == 1000) {
                        i = 0;
                        st.executeBatch();
                        db.commit();
                    }
                }
                if (i > 0) {
                    st.executeBatch();
                    db.commit();
                }
            } catch (SQLException e) {
                e.printStackTrace();
                System.out.println("Error occurred at " + x + " " + z);
            }
        });
    });
    stat.execute("SET FILES LOG TRUE\n");
    st.close();
    stat.close();
    System.out.println("Common Updated");
}

From source file:net.big_oh.algorithms.graph.clique.BronKerboschMaximalCliqueFinder.java

private Set<Set<V>> bronKerbosch1(Set<V> r, Set<V> p, Set<V> x, Graph<V> g, int minCliqueSize,
        int searchDepth) {

    if (logger.isDebugEnabled()) {
        logger.debug("Depth:" + searchDepth + " - R:" + r.size() + " - P:" + p.size() + " - X:" + x.size());
    }//from   w  w w .  j a  v  a  2s  .c o  m

    // base case ...
    // if P and X are both empty, then we're done
    if (p.isEmpty() && x.isEmpty()) {
        if (r.size() >= minCliqueSize) {
            // the maximal clique contained in r is big enough to report
            return Collections.singleton(r);
        } else {
            // the maximal clique contained in r is too small
            return Collections.emptySet();
        }
    }

    // recursive case ...
    Set<Set<V>> maximalCliques = new HashSet<Set<V>>();

    Set<V> pCopy = new HashSet<V>(p);
    for (V v : pCopy) {

        // rPrime = R  {v}
        Set<V> rPrime = new HashSet<V>(r);
        rPrime.add(v);

        // pPrime = P  N(v)
        Set<V> pPrime = new HashSet<V>(p);
        pPrime.retainAll(g.getAllNeighbors(v));

        // xPrime = X  N(v)
        Set<V> xPrime = new HashSet<V>(x);
        xPrime.retainAll(g.getAllNeighbors(v));

        maximalCliques.addAll(bronKerbosch1(rPrime, pPrime, xPrime, g, minCliqueSize, searchDepth + 1));

        // P := P \ {v}
        p.remove(v);

        // X := X  {v}
        x.add(v);

    }

    return maximalCliques;

}

From source file:net.recommenders.rival.evaluation.statistics.ConfidenceInterval.java

/**
 * Method that takes two metrics as parameters. It will compute the
 * differences between both (only considering the keys in the overlap)
 *
 * @param <V> type of keys for metrics
 * @param alpha probability of incorrectly rejecting the null hypothesis (1
 * - confidence_level)//from  w w  w  .jav  a2s.co m
 * @param baselineMetricPerDimension baseline metric, one value for each
 * dimension
 * @param testMetricPerDimension test metric, one value for each dimension
 * @param pairedSamples flag to indicate if the comparison should be made
 * for the distribution of difference scores (when true) or for the
 * distribution of differences between means
 * @return array with the confidence interval: [mean - margin of error, mean
 * + margin of error]
 */
public <V> double[] getConfidenceInterval(final double alpha, final Map<V, Double> baselineMetricPerDimension,
        final Map<V, Double> testMetricPerDimension, final boolean pairedSamples) {
    if (pairedSamples) {
        Set<V> overlap = new HashSet<V>(baselineMetricPerDimension.keySet());
        overlap.retainAll(testMetricPerDimension.keySet());

        // paired or matched samples --> analyse distribution of difference scores
        SummaryStatistics differences = new SummaryStatistics();
        for (V key : overlap) {
            double diff = Math.abs(testMetricPerDimension.get(key) - baselineMetricPerDimension.get(key));
            differences.addValue(diff);
        }
        return getConfidenceInterval(alpha / 2, (int) differences.getN() - 1, (int) differences.getN(),
                differences.getStandardDeviation(), differences.getMean());
    } else {
        // independent samples --> analyse distribution of differences between means
        SummaryStatistics statsBaseline = new SummaryStatistics();
        for (double d : baselineMetricPerDimension.values()) {
            statsBaseline.addValue(d);
        }
        SummaryStatistics statsTest = new SummaryStatistics();
        for (double d : testMetricPerDimension.values()) {
            statsTest.addValue(d);
        }
        long dfT = statsBaseline.getN() + statsTest.getN() - 2;
        double sDif = Math.sqrt((1.0 / statsBaseline.getN() + 1.0 / statsTest.getN())
                * (statsBaseline.getVariance() * (statsBaseline.getN() - 1)
                        + statsTest.getVariance() * (statsTest.getN() - 1)));
        double mDif = Math.abs(statsTest.getMean() - statsBaseline.getMean());
        return getConfidenceInterval(alpha, (int) dfT, (int) dfT, sDif, mDif);
    }
}