Example usage for java.util Collection contains

List of usage examples for java.util Collection contains

Introduction

In this page you can find the example usage for java.util Collection contains.

Prototype

boolean contains(Object o);

Source Link

Document

Returns true if this collection contains the specified element.

Usage

From source file:podd.util.PoddWebappTestUtil.java

public static <T> void checkMembers(boolean checkGetterMethods, Set<String> skipList, Collection<T> collection,
        T... instances) {/*w w w  .  j a  v  a2  s  . c  o m*/
    if (null == instances) {
        assertTrue(collection.isEmpty());
    } else {
        assertEquals(instances.length, collection.size());
        for (T instance : instances) {
            assertTrue("Containing: " + instance, collection.contains(instance));
        }
        if (checkGetterMethods) {
            for (T obj1 : collection) {
                for (T obj2 : instances) {
                    if (obj1.equals(obj2)) {
                        checkGetterMethods(obj1, obj2, skipList);
                        break;
                    }
                }
            }
        }
    }
}

From source file:javadepchecker.Main.java

/**
 * Check if a dependency is needed by a given package
 *
 * @param pkg Gentoo package name/*from   w  w  w.j  a  va  2 s . c  o  m*/
 * @param deps collection of dependencies for the package
 * @return boolean if the dependency is needed or not
 * @throws IOException
 */
private static boolean depNeeded(String pkg, Collection<String> deps) throws IOException {
    Collection<String> jars = getPackageJars(pkg);

    // We have a virtual with VM provider here
    if (jars.isEmpty()) {
        return true;
    }
    for (String jarName : jars) {
        JarFile jar = new JarFile(jarName);
        for (Enumeration<JarEntry> e = jar.entries(); e.hasMoreElements();) {
            String name = e.nextElement().getName();
            if (deps.contains(name)) {
                return true;
            }
        }
    }
    return false;
}

From source file:com.cloudera.oryx.ml.mllib.als.AUC.java

static double areaUnderCurve(JavaSparkContext sparkContext, MatrixFactorizationModel mfModel,
        JavaRDD<Rating> positiveData) {

    // This does not use Spark's BinaryClassificationMetrics.areaUnderROC because it
    // is intended to operate on one large set of (score,label) pairs. The computation
    // here is really many small AUC problems, for which a much faster direct computation
    // is available.

    // Extract all positive (user,product) pairs
    JavaPairRDD<Integer, Integer> positiveUserProducts = positiveData
            .mapToPair(new PairFunction<Rating, Integer, Integer>() {
                @Override/*from  w w  w  .  ja  v a 2  s.com*/
                public Tuple2<Integer, Integer> call(Rating rating) {
                    return new Tuple2<>(rating.user(), rating.product());
                }
            });

    JavaPairRDD<Integer, Iterable<Rating>> positivePredictions = predictAll(mfModel, positiveData,
            positiveUserProducts);

    // All distinct item IDs, to be broadcast
    final Broadcast<List<Integer>> allItemIDsBC = sparkContext
            .broadcast(positiveUserProducts.values().distinct().collect());

    JavaPairRDD<Integer, Integer> negativeUserProducts = positiveUserProducts.groupByKey()
            .flatMapToPair(new PairFlatMapFunction<Tuple2<Integer, Iterable<Integer>>, Integer, Integer>() {
                private final RandomGenerator random = RandomManager.getRandom();

                @Override
                public Iterable<Tuple2<Integer, Integer>> call(
                        Tuple2<Integer, Iterable<Integer>> userIDsAndItemIDs) {
                    Integer userID = userIDsAndItemIDs._1();
                    Collection<Integer> positiveItemIDs = Sets.newHashSet(userIDsAndItemIDs._2());
                    int numPositive = positiveItemIDs.size();
                    Collection<Tuple2<Integer, Integer>> negative = new ArrayList<>(numPositive);
                    List<Integer> allItemIDs = allItemIDsBC.value();
                    int numItems = allItemIDs.size();
                    // Sample about as many negative examples as positive
                    for (int i = 0; i < numItems && negative.size() < numPositive; i++) {
                        Integer itemID = allItemIDs.get(random.nextInt(numItems));
                        if (!positiveItemIDs.contains(itemID)) {
                            negative.add(new Tuple2<>(userID, itemID));
                        }
                    }
                    return negative;
                }
            });

    JavaPairRDD<Integer, Iterable<Rating>> negativePredictions = predictAll(mfModel, positiveData,
            negativeUserProducts);

    return positivePredictions.join(negativePredictions).values()
            .mapToDouble(new DoubleFunction<Tuple2<Iterable<Rating>, Iterable<Rating>>>() {
                @Override
                public double call(Tuple2<Iterable<Rating>, Iterable<Rating>> t) {
                    // AUC is also the probability that random positive examples
                    // rank higher than random examples at large. Here we compare all random negative
                    // examples to all positive examples and report the totals as an alternative
                    // computation for AUC
                    long correct = 0;
                    long total = 0;
                    for (Rating positive : t._1()) {
                        for (Rating negative : t._2()) {
                            if (positive.rating() > negative.rating()) {
                                correct++;
                            }
                            total++;
                        }
                    }
                    return (double) correct / total;
                }
            }).mean();
}

From source file:com.cloudera.oryx.app.batch.mllib.als.Evaluation.java

/**
 * Computes AUC (area under the ROC curve) as a recommender evaluation metric.
 * Really, it computes what might be described as "Mean AUC", as it computes AUC per
 * user and averages them.//  w  ww  .jav a2 s.c om
 */
static double areaUnderCurve(JavaSparkContext sparkContext, MatrixFactorizationModel mfModel,
        JavaRDD<Rating> positiveData) {

    // This does not use Spark's BinaryClassificationMetrics.areaUnderROC because it
    // is intended to operate on one large set of (score,label) pairs. The computation
    // here is really many small AUC problems, for which a much faster direct computation
    // is available.

    // Extract all positive (user,product) pairs
    JavaPairRDD<Integer, Integer> positiveUserProducts = positiveData
            .mapToPair(rating -> new Tuple2<>(rating.user(), rating.product()));

    JavaPairRDD<Integer, Iterable<Rating>> positivePredictions = predictAll(mfModel, positiveData,
            positiveUserProducts);

    // All distinct item IDs, to be broadcast
    Broadcast<List<Integer>> allItemIDsBC = sparkContext
            .broadcast(positiveUserProducts.values().distinct().collect());

    JavaPairRDD<Integer, Integer> negativeUserProducts = positiveUserProducts.groupByKey()
            .flatMapToPair(new PairFlatMapFunction<Tuple2<Integer, Iterable<Integer>>, Integer, Integer>() {
                private final RandomGenerator random = RandomManager.getRandom();

                @Override
                public Iterable<Tuple2<Integer, Integer>> call(
                        Tuple2<Integer, Iterable<Integer>> userIDsAndItemIDs) {
                    Integer userID = userIDsAndItemIDs._1();
                    Collection<Integer> positiveItemIDs = Sets.newHashSet(userIDsAndItemIDs._2());
                    int numPositive = positiveItemIDs.size();
                    Collection<Tuple2<Integer, Integer>> negative = new ArrayList<>(numPositive);
                    List<Integer> allItemIDs = allItemIDsBC.value();
                    int numItems = allItemIDs.size();
                    // Sample about as many negative examples as positive
                    for (int i = 0; i < numItems && negative.size() < numPositive; i++) {
                        Integer itemID = allItemIDs.get(random.nextInt(numItems));
                        if (!positiveItemIDs.contains(itemID)) {
                            negative.add(new Tuple2<>(userID, itemID));
                        }
                    }
                    return negative;
                }
            });

    JavaPairRDD<Integer, Iterable<Rating>> negativePredictions = predictAll(mfModel, positiveData,
            negativeUserProducts);

    return positivePredictions.join(negativePredictions).values().mapToDouble(t -> {
        // AUC is also the probability that random positive examples
        // rank higher than random examples at large. Here we compare all random negative
        // examples to all positive examples and report the totals as an alternative
        // computation for AUC
        long correct = 0;
        long total = 0;
        for (Rating positive : t._1()) {
            for (Rating negative : t._2()) {
                if (positive.rating() > negative.rating()) {
                    correct++;
                }
                total++;
            }
        }
        if (total == 0) {
            return 0.0;
        }
        return (double) correct / total;
    }).mean();
}

From source file:fll.scheduler.TableOptimizer.java

/**
 * Check if any elements in set2 are in set1.
 *//*from  w ww .  j  a  va2 s  .c  o  m*/
private static boolean containsAny(final Collection<String> set1, final Collection<String> set2) {
    for (final String needle : set2) {
        if (set1.contains(needle)) {
            return true;
        }
    }
    return false;
}

From source file:de.tudarmstadt.ukp.dkpro.core.corenlp.internal.DKPro2CoreNlp.java

@SuppressWarnings("unchecked")
public static <T extends HasWord> List<T> applyPtbEscaping(List<T> words, Collection<String> quoteBegin,
        Collection<String> quoteEnd) {
    PTBEscapingProcessor<T, String, Word> escaper = new PTBEscapingProcessor<T, String, Word>();
    // Apply escaper to the whole sentence, not to each token individually. The
    // escaper takes context into account, e.g. when transforming regular double
    // quotes into PTB opening and closing quotes (`` and '').
    words = (List<T>) escaper.apply(words);

    for (HasWord w : words) {
        if (quoteBegin != null && quoteBegin.contains(w.word())) {
            w.setWord("``");
        } else if (quoteEnd != null && quoteEnd.contains(w.word())) {
            w.setWord("\'\'");
        }//from ww w . j av a2 s  .co m
    }

    return words;
}

From source file:com.cloudera.oryx.app.mllib.als.Evaluation.java

/**
 * Computes AUC (area under the ROC curve) as a recommender evaluation metric.
 * Really, it computes what might be described as "Mean AUC", as it computes AUC per
 * user and averages them.//ww  w .j av  a2  s  .c  o  m
 */
static double areaUnderCurve(JavaSparkContext sparkContext, MatrixFactorizationModel mfModel,
        JavaRDD<Rating> positiveData) {

    // This does not use Spark's BinaryClassificationMetrics.areaUnderROC because it
    // is intended to operate on one large set of (score,label) pairs. The computation
    // here is really many small AUC problems, for which a much faster direct computation
    // is available.

    // Extract all positive (user,product) pairs
    JavaPairRDD<Integer, Integer> positiveUserProducts = positiveData
            .mapToPair(new PairFunction<Rating, Integer, Integer>() {
                @Override
                public Tuple2<Integer, Integer> call(Rating rating) {
                    return new Tuple2<>(rating.user(), rating.product());
                }
            });

    JavaPairRDD<Integer, Iterable<Rating>> positivePredictions = predictAll(mfModel, positiveData,
            positiveUserProducts);

    // All distinct item IDs, to be broadcast
    final Broadcast<List<Integer>> allItemIDsBC = sparkContext
            .broadcast(positiveUserProducts.values().distinct().collect());

    JavaPairRDD<Integer, Integer> negativeUserProducts = positiveUserProducts.groupByKey()
            .flatMapToPair(new PairFlatMapFunction<Tuple2<Integer, Iterable<Integer>>, Integer, Integer>() {
                private final RandomGenerator random = RandomManager.getRandom();

                @Override
                public Iterable<Tuple2<Integer, Integer>> call(
                        Tuple2<Integer, Iterable<Integer>> userIDsAndItemIDs) {
                    Integer userID = userIDsAndItemIDs._1();
                    Collection<Integer> positiveItemIDs = Sets.newHashSet(userIDsAndItemIDs._2());
                    int numPositive = positiveItemIDs.size();
                    Collection<Tuple2<Integer, Integer>> negative = new ArrayList<>(numPositive);
                    List<Integer> allItemIDs = allItemIDsBC.value();
                    int numItems = allItemIDs.size();
                    // Sample about as many negative examples as positive
                    for (int i = 0; i < numItems && negative.size() < numPositive; i++) {
                        Integer itemID = allItemIDs.get(random.nextInt(numItems));
                        if (!positiveItemIDs.contains(itemID)) {
                            negative.add(new Tuple2<>(userID, itemID));
                        }
                    }
                    return negative;
                }
            });

    JavaPairRDD<Integer, Iterable<Rating>> negativePredictions = predictAll(mfModel, positiveData,
            negativeUserProducts);

    return positivePredictions.join(negativePredictions).values()
            .mapToDouble(new DoubleFunction<Tuple2<Iterable<Rating>, Iterable<Rating>>>() {
                @Override
                public double call(Tuple2<Iterable<Rating>, Iterable<Rating>> t) {
                    // AUC is also the probability that random positive examples
                    // rank higher than random examples at large. Here we compare all random negative
                    // examples to all positive examples and report the totals as an alternative
                    // computation for AUC
                    long correct = 0;
                    long total = 0;
                    for (Rating positive : t._1()) {
                        for (Rating negative : t._2()) {
                            if (positive.rating() > negative.rating()) {
                                correct++;
                            }
                            total++;
                        }
                    }
                    return (double) correct / total;
                }
            }).mean();
}

From source file:eu.ggnet.dwoss.report.assist.ReportUtil.java

/**
 * Returns all Lines of the Report for Category Invoiced.
 * This consists of://from  w w w  .  ja  v  a2 s .  co  m
 * <ul>
 * <li>Position of Type Capital Asset</li>
 * <li>Position of Type Invoice, with no References</li>
 * <li>Position of Type UNIT_ANNEX in DocumentType CREDIT_MEMO/ANNULATIION_INVOICE and a Referencing Invoice in the same report.</li>
 * </ul>
 * <p>
 * It's not allowed to have a null value in the collection.
 * <p>
 * @param lines
 * @return all Lines of the Report for Category Invoiced.
 */
//TODO: We could also substract the value of a unitannex from the invoice and not return the unit annex at all.
//But consider the impact in the ui, especially if we allow selection of such a "combined" line.
public static NavigableSet<ReportLine> filterInvoiced(Collection<ReportLine> lines) {
    NavigableSet<ReportLine> result = new TreeSet<>();
    for (ReportLine line : lines) {
        if (line.getDocumentType() == CAPITAL_ASSET)
            result.add(line);
        if (line.getDocumentType() == INVOICE && !line.isFullRepayedIn(lines))
            result.add(line);
        if (line.isPartialRepayment() && !line.isFullRepayedIn(lines)) {
            ReportLine invoiceRef = line.getSingleReference(INVOICE);
            if (invoiceRef == null)
                /*  No Invoice exists, probably before 2014 */ result.add(line);
            else if (lines.contains(invoiceRef))
                result.add(line);
        }
    }
    return result;
}

From source file:doc.action.SelectedDocsUtils.java

public static Collection saveSelectedDocsIDs(HttpServletRequest request) {
    //System.out.println("start");
    Collection documents = (Collection) request.getSession().getAttribute(Constant.SELECTED_PRESIDENTS);
    //System.out.println(documents);
    if (documents == null) {
        documents = new ArrayList();
        request.getSession().setAttribute(Constant.SELECTED_PRESIDENTS, documents);
    }//from   w ww  .  j a  v  a 2s .  co  m

    Enumeration parameterNames = request.getParameterNames();
    while (parameterNames.hasMoreElements()) {
        String parameterName = (String) parameterNames.nextElement();
        if (parameterName.startsWith("chkbx_")) {
            String docId = StringUtils.substringAfter(parameterName, "chkbx_");
            String parameterValue = request.getParameter(parameterName);
            if (parameterValue.equals(Constant.SELECTED)) {
                if (!documents.contains(docId)) {
                    documents.add(docId);
                }
            } else {
                documents.remove(docId);
            }
        }
    }

    return documents;
}

From source file:fr.certu.chouette.manager.AbstractNeptuneManager.java

/**
 * merge source collection in target one : <br/>
 * add source objects if not already in target
 * // www  .  j  a v a 2  s .  co m
 * @param <U>
 *           type of source and target entries
 * @param target
 *           collection to fill
 * @param source
 *           objects to add
 */
protected static <U> void mergeCollection(Collection<U> target, Collection<U> source) {
    if (source == null || source.isEmpty())
        return;
    for (U object : source) {
        if (!target.contains(object)) {
            target.add(object);
        }
    }
}