Example usage for java.util Set stream

List of usage examples for java.util Set stream

Introduction

In this page you can find the example usage for java.util Set stream.

Prototype

default Stream<E> stream() 

Source Link

Document

Returns a sequential Stream with this collection as its source.

Usage

From source file:com.netflix.genie.core.jpa.specifications.JpaSpecificationUtils.java

/**
 * Get the sorted like statement for tags used in specification queries.
 *
 * @param tags The tags to use. Not null.
 * @return The tags sorted while ignoring case delimited with percent symbol.
 *//*ww  w.  j av  a 2 s . c  om*/
public static String getTagLikeString(@NotNull final Set<String> tags) {
    final StringBuilder builder = new StringBuilder();
    tags.stream().filter(StringUtils::isNotBlank).sorted(String.CASE_INSENSITIVE_ORDER)
            .forEach(tag -> builder.append(PERCENT).append(CommonFieldsEntity.TAG_DELIMITER).append(tag)
                    .append(CommonFieldsEntity.TAG_DELIMITER));
    return builder.append(PERCENT).toString();
}

From source file:com.netflix.genie.web.data.repositories.jpa.specifications.JpaSpecificationUtils.java

/**
 * Get the sorted like statement for tags used in specification queries.
 *
 * @param tags The tags to use. Not null.
 * @return The tags sorted while ignoring case delimited with percent symbol.
 *///from  w w  w.  j  av a2  s. c o m
static String getTagLikeString(@NotNull final Set<String> tags) {
    final StringBuilder builder = new StringBuilder();
    tags.stream().filter(StringUtils::isNotBlank).sorted(String.CASE_INSENSITIVE_ORDER)
            .forEach(tag -> builder.append(PERCENT).append(TAG_DELIMITER).append(tag).append(TAG_DELIMITER));
    return builder.append(PERCENT).toString();
}

From source file:edu.pitt.dbmi.ccd.db.specification.AnnotationTargetSpecification.java

private static List<Predicate> inTitleOrAddressOrName(Root<AnnotationTarget> root, CriteriaBuilder cb,
        Set<String> terms) {
    return terms.stream().map(t -> containsLike(t))
            .map(t -> cb.or(titleContains(root, cb, t),
                    (root.get(ADDRESS) != null) ? addressContains(root, cb, t) : fileNameContains(root, cb, t)))
            .collect(Collectors.toList());
}

From source file:edu.pitt.dbmi.ccd.db.specification.AnnotationTargetSpecification.java

private static List<Predicate> notInTitleOrAddressOrName(Root<AnnotationTarget> root, CriteriaBuilder cb,
        Set<String> terms) {
    return terms
            .stream().map(/*from  ww w.  j  ava 2s .c o m*/
                    t -> containsLike(t))
            .map(t -> cb.not(
                    cb.or(titleContains(root, cb, t), (root.get(ADDRESS) != null) ? addressContains(root, cb, t)
                            : fileNameContains(root, cb, t))))
            .collect(Collectors.toList());
}

From source file:com.github.robozonky.app.version.UpdateMonitor.java

static String findFirstStable(final Set<String> versions) {
    return versions.stream().filter(UpdateMonitor::isStable).findFirst()
            .orElseThrow(() -> new IllegalStateException("No stable version found."));
}

From source file:com.thinkbiganalytics.nifi.rest.model.flow.NiFiFlowConnectionConverter.java

/**
 * Convert a Nifi Connection {@link ConnectionDTO} to a simplified {@link NifiFlowConnection}
 *
 * @param connectionDTO the Nifi connection
 * @return the converted simplified connection
 *///from   www.j av  a  2s .  c o m
public static NifiFlowConnection toNiFiFlowConnection(ConnectionDTO connectionDTO) {
    if (connectionDTO != null) {
        String name = connectionDTO.getName();
        if (StringUtils.isBlank(name)) {
            Set<String> relationships = connectionDTO.getSelectedRelationships();
            if (relationships != null) {
                name = relationships.stream().collect(Collectors.joining(","));
            }
        }
        NifiFlowConnection connection = new NifiFlowConnection(connectionDTO.getId(), name,
                connectionDTO.getSource() != null ? connectionDTO.getSource().getId() : null,
                connectionDTO.getDestination() != null ? connectionDTO.getDestination().getId() : null);
        return connection;
    }
    return null;
}

From source file:com.github.horrorho.inflatabledonkey.cloud.AssetPool.java

static Set<Asset> validate(Set<Asset> assets) {
    return assets.stream().filter(u -> {
        if (u.fileSignature().isPresent()) {
            return true;
        }/*  ww  w  . j  a va2  s .  com*/
        logger.warn("-- validate() - missing file signature: {}", u);
        return false;
    }).collect(toSet());
}

From source file:com.movies.jsf.JsfUtil.java

public static String getReturnUrl(String adress, Map<String, Object> parameterMap) {
    StringBuilder builder = new StringBuilder().append(adress);
    if (MapUtils.isEmpty(parameterMap)) {
        throw new IllegalArgumentException("Parameter map can not be empty!");
    } else {//ww  w. j  a v a2 s . com
        Set<Entry<String, Object>> set = parameterMap.entrySet();
        set.stream().forEach((entry) -> {
            builder.append(QUESTION).append(REDIRECT).append(AND).append(entry.getKey()).append(EQUALS)
                    .append(entry.getValue());
        });
    }
    return builder.toString();
}

From source file:edu.brandeis.wisedb.scheduler.experiments.SkewDistributionExperiment.java

public static void calculateBurn(int samples) throws Exception {
    TightenableSLA sla = PercentSLA.nintyTenSLA();
    //TightenableSLA sla = new SimpleLatencyModelSLA(9 * 60 * 1000, 1);
    //TightenableSLA sla = PerQuerySLA.getLatencyTimesN(2.0);
    //TightenableSLA sla = new AverageLatencyModelSLA(7 * 60 * 1000, 1);
    QueryTimePredictor qtp = new QueryTimePredictor();

    File f = new File("distSkew.csv");
    if (f.exists())
        f.delete();//from  ww  w .j  a  va 2s . c o  m

    try (Trainer t = new Trainer("distSkew.csv", sla)) {
        t.train(2000, 12);
    }

    DTSearcher dt = new DTSearcher("distSkew.csv", qtp, sla);
    AStarGraphSearch astar = new AStarGraphSearch(new UnassignedQueryTimeHeuristic(qtp), sla, qtp);
    //FirstFitDecreasingGraphSearch astar = new FirstFitDecreasingGraphSearch(sla, qtp);

    ChiSquareTest cst = new ChiSquareTest();
    ChiSquaredDistribution cqd = new ChiSquaredDistribution(qtp.QUERY_TYPES.length - 1);
    double[] expceted = Arrays.stream(qtp.QUERY_TYPES).mapToDouble(i -> 20.0 / (qtp.QUERY_TYPES.length))
            .toArray();

    System.out.println("Chi\tDT\tOpt");

    for (int i = 0; i < samples; i++) {
        Set<ModelQuery> smp = ModelWorkloadGenerator.randomQueries(20);

        // reject samples that don't have at least one of each query type
        long repr = smp.stream().mapToInt(q -> q.getType()).distinct().count();
        if (repr != qtp.QUERY_TYPES.length) {
            i--;
            continue;
        }

        Map<Integer, List<ModelQuery>> groups = smp.stream().collect(Collectors.groupingBy(q -> q.getType()));

        long obs[] = Arrays.stream(qtp.QUERY_TYPES).mapToLong(v -> groups.get(v).size()).toArray();

        double chi = cst.chiSquare(expceted, obs);
        chi = cqd.cumulativeProbability(chi);

        Cost dtCost = dt.getCostForQueries(smp, sla);
        Cost optCost = astar.getCostForQueries(smp, sla);

        System.out.println(chi + "\t" + dtCost.getTotalCost() + "\t" + optCost.getTotalCost());
    }

}

From source file:Inmemantlr.java

private static Set<File> getFilesForOption(CommandLine cmd, String opt) {
    Set<File> ret = new HashSet<>();
    if (cmd.hasOption(opt)) {
        Set<String> us = new HashSet<>();
        us.addAll(Arrays.asList(cmd.getOptionValues(opt)));
        ret.addAll(us.stream().map(File::new).collect(Collectors.toSet()));
    }/*from   ww  w . j  ava2 s.co m*/
    return ret;
}