Example usage for java.util HashSet stream

List of usage examples for java.util HashSet stream

Introduction

In this page you can find the example usage for java.util HashSet stream.

Prototype

default Stream<E> stream() 

Source Link

Document

Returns a sequential Stream with this collection as its source.

Usage

From source file:ai.grakn.graql.internal.util.StringConverter.java

/**
 * @return all Graql keywords/*w  w w  . j  a  va  2  s.  co  m*/
 */
private static Stream<String> getKeywords() {
    HashSet<String> keywords = new HashSet<>();

    for (int i = 1; GraqlLexer.VOCABULARY.getLiteralName(i) != null; i++) {
        String name = GraqlLexer.VOCABULARY.getLiteralName(i);
        keywords.add(name.replaceAll("'", ""));
    }

    return keywords.stream().filter(keyword -> !ALLOWED_ID_KEYWORDS.contains(keyword));
}

From source file:org.apache.sysml.hops.codegen.opt.PlanAnalyzer.java

public static Collection<PlanPartition> analyzePlanPartitions(CPlanMemoTable memo, ArrayList<Hop> roots,
        boolean ext) {
    //determine connected sub graphs of plans
    Collection<HashSet<Long>> parts = getConnectedSubGraphs(memo, roots);

    //determine roots and materialization points
    Collection<PlanPartition> ret = new ArrayList<>();
    for (HashSet<Long> partition : parts) {
        HashSet<Long> R = getPartitionRootNodes(memo, partition);
        HashSet<Long> I = getPartitionInputNodes(R, partition, memo);
        ArrayList<Long> M = getMaterializationPoints(R, partition, memo);
        HashSet<Long> Pnpc = getNodesWithNonPartitionConsumers(R, partition, memo);
        InterestingPoint[] Mext = !ext ? null : getMaterializationPointsExt(R, partition, M, memo);
        boolean hasOuter = partition.stream().anyMatch(k -> memo.contains(k, TemplateType.OUTER));
        ret.add(new PlanPartition(partition, R, I, Pnpc, M, Mext, hasOuter));
    }/*from w  w  w .  j av  a2s .c o  m*/

    return ret;
}

From source file:org.apache.samza.system.kafka.KafkaSystemAdmin.java

/**
 * A helper method that takes oldest, newest, and upcoming offsets for each
 * system stream partition, and creates a single map from stream name to
 * SystemStreamMetadata.//w  w  w. ja v a2s  .  c  o m
 *
 * @param newestOffsets map of SSP to newest offset
 * @param oldestOffsets map of SSP to oldest offset
 * @param upcomingOffsets map of SSP to upcoming offset
 * @return a {@link Map} from {@code system} to {@link SystemStreamMetadata}
 */
@VisibleForTesting
static Map<String, SystemStreamMetadata> assembleMetadata(Map<SystemStreamPartition, String> oldestOffsets,
        Map<SystemStreamPartition, String> newestOffsets, Map<SystemStreamPartition, String> upcomingOffsets) {
    HashSet<SystemStreamPartition> allSSPs = new HashSet<>();
    allSSPs.addAll(oldestOffsets.keySet());
    allSSPs.addAll(newestOffsets.keySet());
    allSSPs.addAll(upcomingOffsets.keySet());

    Map<String, SystemStreamMetadata> assembledMetadata = allSSPs.stream()
            .collect(Collectors.groupingBy(SystemStreamPartition::getStream)).entrySet().stream()
            .collect(Collectors.toMap(Map.Entry::getKey, entry -> {
                Map<Partition, SystemStreamMetadata.SystemStreamPartitionMetadata> partitionMetadata = entry
                        .getValue().stream()
                        .collect(Collectors.toMap(SystemStreamPartition::getPartition,
                                ssp -> new SystemStreamMetadata.SystemStreamPartitionMetadata(
                                        oldestOffsets.getOrDefault(ssp, null),
                                        newestOffsets.getOrDefault(ssp, null), upcomingOffsets.get(ssp))));
                return new SystemStreamMetadata(entry.getKey(), partitionMetadata);
            }));

    return assembledMetadata;
}

From source file:no.ntnu.okse.web.controller.SubscriberController.java

/**
 * Thid method deletes all the subscribers registered in the SubscriptionService.
 *
 * @return A JSON serialized string/*w w w.  ja va 2 s .  co  m*/
 */
@RequestMapping(method = RequestMethod.DELETE, value = DELETE_ALL_SUBSCRIBERS)
public @ResponseBody String deleteAllSubscribers() {
    SubscriptionService ss = SubscriptionService.getInstance();
    HashSet<Subscriber> allSubscribers = ss.getAllSubscribers();
    allSubscribers.stream().forEach(s -> ss.removeSubscriber(s));
    return "{ \"deleted\" :true }";
}

From source file:no.ntnu.okse.web.controller.TopicController.java

/**
 * This method returns all topics registered in the TopicService
 *
 * @return A JSON serialization of all registered topics
 *//*from  w ww . j  av  a 2s  . co m*/
@RequestMapping(method = RequestMethod.GET, value = GET_ALL_TOPICS)
public @ResponseBody List<HashMap<String, Object>> getAlltopics() {
    TopicService ts = TopicService.getInstance();
    SubscriptionService ss = SubscriptionService.getInstance();
    HashSet<Topic> allTopics = ts.getAllTopics();

    List<HashMap<String, Object>> results = new ArrayList<>();

    allTopics.stream().forEach(t -> {
        int subscribers = ss.getAllSubscribersForTopic(t.getFullTopicString()).size();
        HashMap<String, Object> topicInfo = new HashMap<String, Object>() {
            {
                put("subscribers", subscribers);
                put("topic", t);
            }
        };
        results.add(topicInfo);
    });

    results.sort((t1, t2) -> ((Topic) t1.get("topic")).getFullTopicString()
            .compareTo(((Topic) t2.get("topic")).getFullTopicString()));

    return results;
}

From source file:org.ecloudmanager.web.faces.DeploymentActionController.java

private void setStatusClass(CyNode node, Action.Status status) {
    String statusClass = status.name().toLowerCase();
    HashSet<String> classes = Sets.newHashSet(node.getClasses().split(" "));
    if (classes.contains(statusClass)) {
        return;//from   w  w w  .  j  a  v a  2s . co  m
    }
    HashSet<Action.Status> classesToRemove = Sets.newHashSet(Action.Status.values());
    classesToRemove.remove(status);
    classes.removeAll(
            classesToRemove.stream().map(Enum::name).map(String::toLowerCase).collect(Collectors.toSet()));
    classes.add(statusClass);
    node.setClasses(StringUtils.join(classes, " ").trim());
}

From source file:org.mycore.common.MCRUtils.java

/**
 * merges to HashSets of MyCoreIDs after specific rules
 * //from  w  ww  .j a  va  2  s  .com
 * @see #COMMAND_OR
 * @see #COMMAND_AND
 * @see #COMMAND_XOR
 * @param set1
 *            1st HashSet to be merged
 * @param set2
 *            2nd HashSet to be merged
 * @param operation
 *            available COMMAND_XYZ
 * @return merged HashSet
 * @deprecated use {@link Stream}s for this
 */
@Deprecated
public static <T> HashSet<T> mergeHashSets(HashSet<? extends T> set1, HashSet<? extends T> set2,
        char operation) {
    Predicate<T> inSet1 = set1::contains;
    Predicate<T> inSet2 = set2::contains;
    Predicate<T> op;

    switch (operation) {
    case COMMAND_OR:
        op = t -> true;//inSet1.or(inSet2);
        break;

    case COMMAND_AND:
        op = inSet1.and(inSet2);
        break;

    case COMMAND_XOR:
        op = inSet1.and(inSet2).negate();
        break;

    default:
        throw new IllegalArgumentException("operation not permited: " + operation);
    }

    return Stream.concat(set1.stream(), set2.stream()).filter(op)
            .collect(Collectors.toCollection(HashSet::new));
}

From source file:com.ludgerpeters.acl.UserAclManagerImp.java

public boolean checkUserPermissions(String userId, String permissions[]) {
    HashSet<String> permissionSet = new HashSet<>();
    Arrays.asList(permissions).forEach(s -> {
        permissionSet.add(s);/*w ww. j av a  2  s  . co  m*/
        String[] split = s.split("\\.");
        IntStream.range(0, split.length).forEach(i -> {
            String join = "";
            for (int j = 0; j < i; j++) {
                join += split[j] + ".";
            }
            join += "*";
            permissionSet.add(join);

        });
    });
    Set<String> userPermissions = userRepository.getPermissions(userId);
    return permissionSet.stream().anyMatch(userPermissions::contains);
}

From source file:structuredPredictionNLG.DatasetParser.java

/**
 *
 * @param attribute// w ww.jav  a2 s  .c  om
 * @param attrValuesToBeMentioned
 * @return
 */
public String chooseNextValue(String attribute, HashSet<String> attrValuesToBeMentioned) {
    HashMap<String, Integer> relevantValues = new HashMap<>();
    attrValuesToBeMentioned.stream().forEach((attrValue) -> {
        String attr = attrValue.substring(0, attrValue.indexOf('='));
        String value = attrValue.substring(attrValue.indexOf('=') + 1);
        if (attr.equals(attribute)) {
            relevantValues.put(value, 0);
        }
    });
    if (!relevantValues.isEmpty()) {
        if (relevantValues.keySet().size() == 1) {
            for (String value : relevantValues.keySet()) {
                return value;
            }
        } else {
            String bestValue = "";
            int minIndex = Integer.MAX_VALUE;
            for (String value : relevantValues.keySet()) {
                if (value.startsWith("x")) {
                    int vI = Integer.parseInt(value.substring(1));
                    if (vI < minIndex) {
                        minIndex = vI;
                        bestValue = value;
                    }
                }
            }
            if (!bestValue.isEmpty()) {
                return bestValue;
            }
            for (ArrayList<String> mentionedValueSeq : observedAttrValueSequences) {
                boolean doesSeqContainValues = true;
                minIndex = Integer.MAX_VALUE;
                for (String value : relevantValues.keySet()) {
                    int index = mentionedValueSeq.indexOf(attribute + "=" + value);
                    if (index != -1 && index < minIndex) {
                        minIndex = index;
                        bestValue = value;
                    } else if (index == -1) {
                        doesSeqContainValues = false;
                    }
                }
                if (doesSeqContainValues) {
                    relevantValues.put(bestValue, relevantValues.get(bestValue) + 1);
                }
            }
            int max = -1;
            for (String value : relevantValues.keySet()) {
                if (relevantValues.get(value) > max) {
                    max = relevantValues.get(value);
                    bestValue = value;
                }
            }
            return bestValue;
        }
    }
    return "";
}

From source file:br.unicamp.ic.recod.gpsi.applications.gpsiJGAPSelectorEvolver.java

@Override
public void run() throws InvalidConfigurationException, InterruptedException, Exception {

    int i, j, k;/*from w  ww.j  av  a2  s. c  o m*/
    byte nFolds = 5;
    gpsiDescriptor descriptor;
    gpsiMLDataset mlDataset;
    gpsiVoxelRawDataset dataset;
    GPGenotype gp;
    double[][] fitnessCurves;
    String[] curveLabels = new String[] { "train", "train_val", "val" };
    double bestScore, currentScore;
    IGPProgram current;
    IGPProgram[] elite = null;

    Mean mean = new Mean();
    StandardDeviation sd = new StandardDeviation();

    double validationScore, trainScore;
    double[][][] samples;

    for (byte f = 0; f < nFolds; f++) {

        System.out.println("\nRun " + (f + 1) + "\n");

        rawDataset.assignFolds(new byte[] { f, (byte) ((f + 1) % nFolds), (byte) ((f + 2) % nFolds) },
                new byte[] { (byte) ((f + 3) % nFolds) }, new byte[] { (byte) ((f + 4) % nFolds) });
        dataset = (gpsiVoxelRawDataset) rawDataset;
        gp = create(config, dataset.getnBands(), fitness, null);

        // 0: train, 1: train_val, 2: val
        fitnessCurves = new double[super.numGenerations + numGenerationsSel][];
        bestScore = -Double.MAX_VALUE;

        if (validation > 0)
            elite = new IGPProgram[validation];

        for (int generation = 0; generation < numGenerationsSel; generation++) {

            gp.evolve(1);
            gp.getGPPopulation().sortByFitness();

            if (validation > 0)
                elite = mergeElite(elite, gp.getGPPopulation().getGPPrograms(), generation);

            if (this.dumpGens) {

                double[][][] dists;
                descriptor = new gpsiScalarSpectralIndexDescriptor(
                        new gpsiJGAPVoxelCombiner(fitness.getB(), gp.getGPPopulation().getGPPrograms()[0]));
                mlDataset = new gpsiMLDataset(descriptor);
                mlDataset.loadWholeDataset(rawDataset, true);

                dists = (new gpsiWholeSampler()).sample(mlDataset.getTrainingEntities(), this.classLabels);
                for (i = 0; i < this.classLabels.length; i++) {
                    stream.register(new gpsiDoubleCsvIOElement(dists[i], null,
                            "gens/f" + (f + 1) + "/" + classLabels[i] + "/" + (generation + 1) + ".csv"));
                }

            }

            fitnessCurves[generation] = new double[] { gp.getAllTimeBest().getFitnessValue() - 1.0 };
            System.out.printf("%3dg: %.4f\n", generation + 1, fitnessCurves[generation][0]);

        }

        HashSet<Integer> variables = new HashSet<>();
        for (IGPProgram ind : elite) {
            for (CommandGene node : ind.getChromosome(0).getFunctions()) {
                if (node instanceof Variable) {
                    variables.add(Integer.parseInt(node.getName().replace('b', '0')));
                }
            }
        }

        int[] vars = variables.stream().mapToInt(p -> p).toArray();
        Arrays.sort(vars);
        stream.register(new gpsiStringIOElement(Arrays.toString(vars), "selected_bands/f" + (f + 1) + ".out"));

        gp = create(config, dataset.getnBands(), fitness, vars);
        gp.addFittestProgram(elite[0]);

        for (int generation = numGenerationsSel; generation < numGenerationsSel
                + super.numGenerations; generation++) {

            gp.evolve(1);
            gp.getGPPopulation().sortByFitness();

            if (validation > 0)
                elite = mergeElite(elite, gp.getGPPopulation().getGPPrograms(), generation);

            if (this.dumpGens) {

                double[][][] dists;
                descriptor = new gpsiScalarSpectralIndexDescriptor(
                        new gpsiJGAPVoxelCombiner(fitness.getB(), gp.getGPPopulation().getGPPrograms()[0]));
                mlDataset = new gpsiMLDataset(descriptor);
                mlDataset.loadWholeDataset(rawDataset, true);

                dists = (new gpsiWholeSampler()).sample(mlDataset.getTrainingEntities(), this.classLabels);
                for (i = 0; i < this.classLabels.length; i++) {
                    stream.register(new gpsiDoubleCsvIOElement(dists[i], null,
                            "gens/f" + (f + 1) + "/" + classLabels[i] + "/" + (generation + 1) + ".csv"));
                }

            }

            fitnessCurves[generation] = new double[] { gp.getAllTimeBest().getFitnessValue() - 1.0 };
            System.out.printf("%3dg: %.4f\n", generation + 1, fitnessCurves[generation][0]);

        }

        best = new IGPProgram[2];
        best[0] = gp.getAllTimeBest();
        for (i = 0; i < super.validation; i++) {

            current = elite[i];

            descriptor = new gpsiScalarSpectralIndexDescriptor(
                    new gpsiJGAPVoxelCombiner(fitness.getB(), current));
            mlDataset = new gpsiMLDataset(descriptor);
            mlDataset.loadWholeDataset(rawDataset, true);

            samples = this.fitness.getSampler().sample(mlDataset.getValidationEntities(), classLabels);

            validationScore = fitness.getScore().score(samples);
            trainScore = current.getFitnessValue() - 1.0;

            currentScore = mean.evaluate(new double[] { trainScore, validationScore })
                    - sd.evaluate(new double[] { trainScore, validationScore });

            if (currentScore > bestScore) {
                best[1] = current;
                bestScore = currentScore;
            }

        }

        stream.register(new gpsiDoubleCsvIOElement(fitnessCurves, curveLabels, "curves/f" + (f + 1) + ".csv"));

        System.out.println("Best solution for trainning: " + gp.getAllTimeBest().toStringNorm(0));
        stream.register(new gpsiStringIOElement(gp.getAllTimeBest().toStringNorm(0),
                "programs/f" + (f + 1) + "train.program"));

        if (validation > 0) {
            System.out.println("Best solution for trainning and validation: " + best[1].toStringNorm(0));
            stream.register(new gpsiStringIOElement(best[1].toStringNorm(0),
                    "programs/f" + (f + 1) + "train_val.program"));
        }

        descriptor = new gpsiScalarSpectralIndexDescriptor(new gpsiJGAPVoxelCombiner(fitness.getB(), best[0]));
        gpsi1NNToMomentScalarClassificationAlgorithm classificationAlgorithm = new gpsi1NNToMomentScalarClassificationAlgorithm(
                new Mean());
        gpsiClassifier classifier = new gpsiClassifier(descriptor, classificationAlgorithm);

        classifier.fit(this.rawDataset.getTrainingEntities());
        classifier.predict(this.rawDataset.getTestEntities());

        int[][] confusionMatrix = classifier.getConfusionMatrix();

        stream.register(new gpsiIntegerCsvIOElement(confusionMatrix, null,
                "confusion_matrices/f" + (f + 1) + "_train.csv"));

        if (validation > 0) {
            descriptor = new gpsiScalarSpectralIndexDescriptor(
                    new gpsiJGAPVoxelCombiner(fitness.getB(), best[1]));
            classificationAlgorithm = new gpsi1NNToMomentScalarClassificationAlgorithm(new Mean());
            classifier = new gpsiClassifier(descriptor, classificationAlgorithm);

            classifier.fit(this.rawDataset.getTrainingEntities());
            classifier.predict(this.rawDataset.getTestEntities());

            confusionMatrix = classifier.getConfusionMatrix();

            stream.register(new gpsiIntegerCsvIOElement(confusionMatrix, null,
                    "confusion_matrices/f" + (f + 1) + "_train_val.csv"));

        }

    }

}