Example usage for java.util ArrayList stream

List of usage examples for java.util ArrayList stream

Introduction

In this page you can find the example usage for java.util ArrayList stream.

Prototype

default Stream<E> stream() 

Source Link

Document

Returns a sequential Stream with this collection as its source.

Usage

From source file:com.eventsourcing.index.NavigableIndexTest.java

@Test
public void retrieveGreater() {
    IndexedCollection<EntityHandle<Car>> collection = new ConcurrentIndexedCollection<>();
    SortedKeyStatisticsIndex<String, EntityHandle<Car>> PRICE_INDEX = onAttribute(Car.PRICE);
    PRICE_INDEX.clear(noQueryOptions());
    collection.addIndex(PRICE_INDEX);//from  w  w  w  .j  a  va2s  . c  o  m

    collection.addAll(CarFactory.createCollectionOfCars(10));

    try (ResultSet<EntityHandle<Car>> resultSet = collection.retrieve(greaterThan(Car.PRICE, 8500.00))) {
        assertEquals(resultSet.size(), 1);
        assertEquals(resultSet.uniqueResult().get().getModel(), "M6");
    }

    try (ResultSet<EntityHandle<Car>> resultSet = collection
            .retrieve(greaterThanOrEqualTo(Car.PRICE, 8500.00))) {
        assertEquals(resultSet.size(), 2);
        ArrayList<EntityHandle<Car>> values = Lists.newArrayList(resultSet.iterator());
        assertTrue(values.stream().anyMatch(h -> h.get().getModel().contentEquals("M6")));
        assertTrue(values.stream().anyMatch(h -> h.get().getModel().contentEquals("Prius")));
    }

}

From source file:com.thoughtworks.go.server.service.ElasticAgentPluginService.java

public void createAgentsFor(List<JobPlan> old, List<JobPlan> newPlan) {
    Collection<JobPlan> starvingJobs = new ArrayList<>();
    for (JobPlan jobPlan : newPlan) {
        if (jobPlan.requiresElasticAgent()) {
            if (!jobCreationTimeMap.containsKey(jobPlan.getJobId())) {
                continue;
            }//from   w w  w .  j  a  v a 2s.  c o  m
            long lastTryTime = jobCreationTimeMap.get(jobPlan.getJobId());
            if ((timeProvider.currentTimeMillis() - lastTryTime) >= goConfigService
                    .elasticJobStarvationThreshold()) {
                starvingJobs.add(jobPlan);
            }
        }
    }

    ArrayList<JobPlan> jobsThatRequireAgent = new ArrayList<>();
    jobsThatRequireAgent.addAll(Sets.difference(new HashSet<>(newPlan), new HashSet<>(old)));
    jobsThatRequireAgent.addAll(starvingJobs);

    List<JobPlan> plansThatRequireElasticAgent = jobsThatRequireAgent.stream().filter(isElasticAgent())
            .collect(Collectors.toList());
    //      messageTimeToLive is lesser than the starvation threshold to ensure there are no duplicate create agent message
    long messageTimeToLive = goConfigService.elasticJobStarvationThreshold() - 10000;

    for (JobPlan plan : plansThatRequireElasticAgent) {
        jobCreationTimeMap.put(plan.getJobId(), timeProvider.currentTimeMillis());
        ElasticProfile elasticProfile = plan.getElasticProfile();
        ClusterProfile clusterProfile = plan.getClusterProfile();
        if (clusterProfile == null) {
            String cancellationMessage = "\nThis job was cancelled by GoCD. The version of your GoCD server requires elastic profiles to be associated with a cluster(required from Version 19.3.0). "
                    + "This job is configured to run on an Elastic Agent, but the associated elastic profile does not have information about the cluster.  \n\n"
                    + "The possible reason for the missing cluster information on the elastic profile could be, an upgrade of the GoCD server to a version >= 19.3.0 before the completion of the job.\n\n"
                    + "A re-run of this job should fix this issue.";
            logToJobConsole(plan.getIdentifier(), cancellationMessage);
            scheduleService.cancelJob(plan.getIdentifier());
        } else if (elasticAgentPluginRegistry.has(clusterProfile.getPluginId())) {
            String environment = environmentConfigService.envForPipeline(plan.getPipelineName());
            createAgentQueue.post(
                    new CreateAgentMessage(goConfigService.serverConfig().getAgentAutoRegisterKey(),
                            environment, elasticProfile, clusterProfile, plan.getIdentifier()),
                    messageTimeToLive);
            serverHealthService.removeByScope(HealthStateScope.forJob(plan.getIdentifier().getPipelineName(),
                    plan.getIdentifier().getStageName(), plan.getIdentifier().getBuildName()));
        } else {
            String jobConfigIdentifier = plan.getIdentifier().jobConfigIdentifier().toString();
            String description = format(
                    "Plugin [%s] associated with %s is missing. Either the plugin is not "
                            + "installed or could not be registered. Please check plugins tab "
                            + "and server logs for more details.",
                    clusterProfile.getPluginId(), jobConfigIdentifier);
            serverHealthService.update(ServerHealthState.error(
                    format("Unable to find agent for %s", jobConfigIdentifier), description,
                    HealthStateType.general(HealthStateScope.forJob(plan.getIdentifier().getPipelineName(),
                            plan.getIdentifier().getStageName(), plan.getIdentifier().getBuildName()))));
            LOGGER.error(description);
        }
    }
}

From source file:org.ow2.proactive.workflow_catalog.rest.controller.WorkflowRevisionControllerQueryIntegrationTest.java

private void executeTest(TypeTest typeTest) {
    ValidatableResponse response = null;
    Set<String> expected = null;

    switch (typeTest) {
    case WORKFLOWS:
        response = findMostRecentWorkflowRevisions(assertion.query);
        expected = assertion.expectedMostRecentWorkflowRevisionNames;
        break;//from   w w w .j  a va  2  s .co  m
    case WORKFLOW_REVISIONS:
        response = findAllWorkflowRevisions(assertion.query, workflowA.id);
        expected = assertion.expectedWorkflowRevisionsNames;
        break;
    case SECOND_BUCKET:
        response = findMostRecentWorkflowRevisionsFromBucket(assertion.query, secondBucket.id);
        expected = assertion.expectedWorkflowRevisionsNamesFromSecondBucket;
        break;
    default:
        fail();
    }

    if (expected == null) {
        response.assertThat().statusCode(HttpStatus.SC_BAD_REQUEST);
        return;
    }

    response.assertThat().statusCode(HttpStatus.SC_OK);

    if (expected.isEmpty()) {
        response.assertThat().body("page.totalElements", is(0));
        return;
    }

    ArrayList<HashMap<Object, Object>> workflowRevisionsFound = response.extract().body().jsonPath()
            .get("_embedded.workflowMetadataList");

    Set<String> names = workflowRevisionsFound.stream()
            .map(workflowRevision -> (String) workflowRevision.get("name")).collect(Collectors.toSet());

    Sets.SetView<String> difference = Sets.symmetricDifference(names, expected);

    if (!difference.isEmpty()) {
        fail("Expected " + expected + " but received " + names);
    }
}

From source file:com.wormsim.simulation.Walker.java

/**
 * Creates a new walker using the specified class of random number generator
 * and the provided seed. Note there is a requirement that the provided random
 * number generator is {@link java.io.Serializable}.
 *
 * @param cls                The random number generator class
 * @param seed               The seed//from   w w w .  jav a 2  s  . c  o  m
 * @param zoo                The animal zoo to use
 * @param fitness            The fitness tracker
 * @param tracked_quantities Quantities tracked in the simulation.
 *
 * @throws IllegalArgumentException If the provided generator does not have a
 *                                  empty argument constructor or the
 *                                  constructor is illegal to access.
 */
public Walker(Class<? extends RandomGenerator> cls, long seed, AnimalZoo2 zoo, TrackedCalculation fitness,
        ArrayList<TrackedCalculation> tracked_quantities) throws IllegalArgumentException {
    try {
        if (!Serializable.class.isAssignableFrom(cls)) {
            throw new IllegalArgumentException("The provided class must be serializable.");
        }
        this.rng = cls.newInstance();
        this.seed = seed;
        this.rng.setSeed(seed);
        this.zoo = zoo.generate(rng);
        this.fitness = fitness.generate(rng);
        this.tracked_quantities = tracked_quantities.stream().map((v) -> v.generate(rng))
                .collect(Collectors.toCollection(ArrayList::new));
        this.tracked_quantities.add(this.fitness);
    } catch (InstantiationException | IllegalAccessException ex) {
        Logger.getLogger(Walker.class.getName()).log(Level.SEVERE, null, ex);
        throw new IllegalArgumentException(ex);
    }
}

From source file:be.dataminded.nifi.plugins.PutCloudWatchCountMetricAndAlarm.java

@Override
public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException {
    FlowFile flowFile = session.get();/*w w  w  . ja  v  a 2  s  . co m*/
    if (flowFile == null) {
        return;
    }

    long totalTableCount = 0;
    long sumCount = 0;
    String tableName = "";
    String schemaName = "";
    String source = "";
    String tenantName = "";

    try (InputStream inputStream = session.read(flowFile)) {

        StringWriter writer = new StringWriter();
        IOUtils.copy(inputStream, writer, "UTF-8");
        String flowFileContent = writer.toString();

        // The MergeContent controller will be configured to append the JSON content with commas
        // We have to surround this list with square brackets to become a valid JSON Array
        String jsonContent = "[" + flowFileContent + "]";

        JSONArray jsonArray = new JSONArray(jsonContent);

        Iterator iterator = jsonArray.iterator();

        ArrayList<Long> counts = new ArrayList<>();

        while (iterator.hasNext()) {
            JSONObject o = (JSONObject) iterator.next();
            counts.add(o.getLong(context.getProperty(NAME_ELEMENT_TO_SUM).getValue()));
        }
        sumCount = counts.stream().mapToLong(Long::longValue).sum();

        JSONObject firstElement = (JSONObject) jsonArray.get(0);
        totalTableCount = firstElement.getLong(context.getProperty(NAME_ELEMENT_TOTAL_COUNT).getValue());
        tableName = firstElement.getString(TABLE_NAME);
        schemaName = firstElement.getString(SCHEMA_NAME);
        source = firstElement.getString(SOURCE_NAME);
        tenantName = firstElement.getString(TENANT_NAME);

    } catch (IOException e) {
        logger.error("Something went wrong when trying to read the flowFile body: " + e.getMessage());
    } catch (org.json.JSONException e) {
        logger.error("Something when trying to parse the JSON body of the flowFile: " + e.getMessage());
    } catch (Exception e) {
        logger.error("something else went wrong in body processing of this FlowFile: " + e.getMessage());
        session.transfer(flowFile, REL_FAILURE);
    }

    try {

        String environment = context.getProperty(ENVIRONMENT).getValue();
        String alarmPrefix = context.getProperty(NAME_PREFIX_ALARM).getValue();

        Map<String, Long> metrics = new HashMap<>();
        // first metric: this is the total count of the records that were exported
        metrics.put("COUNT_", sumCount);
        // second metric: this is the difference between the records exported
        // and the total amount of records counted in the DB, should always be 0 !!!
        // we take a margin into account because we can't be sure there won't be any deletes
        // between counting and executing the queries
        long diff = Math.abs(totalTableCount - sumCount);
        double diffProcent = Math.round((diff / totalTableCount) * 1000);
        metrics.put("DIFF_", (long) diffProcent);

        ArrayList<Dimension> dimensions = new ArrayList<>();
        dimensions.add(new Dimension().withName("tableName").withValue(tableName));
        dimensions.add(new Dimension().withName("tenantName").withValue(tenantName));
        dimensions.add(new Dimension().withName("sourceName").withValue(source));
        dimensions.add(new Dimension().withName("schemaName").withValue(schemaName));
        dimensions.add(new Dimension().withName("environment").withValue(environment));

        for (Map.Entry<String, Long> metric : metrics.entrySet()) {
            MetricDatum datum = new MetricDatum();
            datum.setMetricName(metric.getKey() + tableName);
            datum.setValue((double) metric.getValue());
            datum.setUnit("Count");
            datum.setDimensions(dimensions);

            final PutMetricDataRequest metricDataRequest = new PutMetricDataRequest().withNamespace("NIFI")
                    .withMetricData(datum);

            putMetricData(metricDataRequest);
        }

        // the alarm we create is a static one that will check if the diff is zero
        String comparisonOperator = context.getProperty(ALARM_COMPARISON_OPERATOR).getValue();
        String alarmStatistic = context.getProperty(ALARM_STATISTIC).getValue();
        String alarmPeriod = context.getProperty(ALARM_PERIOD).getValue();
        String alarmEvaluatePeriods = context.getProperty(ALARM_EVALUATE_PERIODS).getValue();
        String alarmAction = context.getProperty(ALARM_ACTION).getValue();

        PutMetricAlarmRequest putMetricAlarmRequest = new PutMetricAlarmRequest()
                .withMetricName("DIFF_" + tableName)
                .withAlarmName(environment + "_" + alarmPrefix + "_" + "DIFF_" + tableName)
                .withDimensions(dimensions).withComparisonOperator(comparisonOperator).withNamespace("NIFI")
                .withStatistic(alarmStatistic).withPeriod(Integer.parseInt(alarmPeriod))
                .withEvaluationPeriods(Integer.parseInt(alarmEvaluatePeriods)).withThreshold((double) 0)
                //.withTreatMissingData("notBreaching") // aws java SDK has to be upgraded for this
                .withAlarmDescription("The daily Count Alarm for table " + tableName).withActionsEnabled(true)
                .withAlarmActions(alarmAction);
        putAlarmData(putMetricAlarmRequest);

        session.transfer(flowFile, REL_SUCCESS);
        getLogger().info("Successfully published cloudwatch metric for {}", new Object[] { flowFile });
    } catch (final Exception e) {
        getLogger().error("Failed to publish cloudwatch metric for {} due to {}", new Object[] { flowFile, e });
        flowFile = session.penalize(flowFile);
        session.transfer(flowFile, REL_FAILURE);
    }

}

From source file:com.wso2.code.quality.matrices.ChangesFinder.java

/**
 * This method is used to save the line ranges being modified in a given file to a list and add that list to the root list of
 *
 * @param fileNames   Arraylist of files names that are being affected by the relevant commit
 * @param patchString Array list having the patch string value for each of the file being changed
 */// ww w . j a va2  s.com

public void saveRelaventEditLineNumbers(ArrayList<String> fileNames, ArrayList<String> patchString) {
    //filtering only the line ranges that are modified and saving to a string array

    // cannot ues parallel streams here as the order of the line changes must be preserved
    patchString.stream().map(patch -> StringUtils.substringsBetween(patch, "@@ ", " @@"))
            .forEach(lineChanges -> {
                //filtering the lines ranges that existed in the previous file, that exists in the new file and saving them in to the same array
                IntStream.range(0, lineChanges.length).forEach(j -> {
                    //@@ -22,7 +22,7 @@ => -22,7 +22,7 => 22,28/22,28
                    String tempString = lineChanges[j];
                    String lineRangeInTheOldFileBeingModified = StringUtils.substringBetween(tempString, "-",
                            " +"); // for taking the authors and commit hashes of the previous lines
                    String lineRangeInTheNewFileResultedFromModification = StringUtils
                            .substringAfter(tempString, "+"); // for taking the parent commit

                    int intialLineNoInOldFile = Integer
                            .parseInt(StringUtils.substringBefore(lineRangeInTheOldFileBeingModified, ","));
                    int tempEndLineNoInOldFile = Integer
                            .parseInt(StringUtils.substringAfter(lineRangeInTheOldFileBeingModified, ","));
                    int endLineNoOfOldFile;
                    if (intialLineNoInOldFile != 0) {
                        // to filterout the newly created files
                        endLineNoOfOldFile = intialLineNoInOldFile + (tempEndLineNoInOldFile - 1);
                    } else {
                        endLineNoOfOldFile = tempEndLineNoInOldFile;
                    }
                    int intialLineNoInNewFile = Integer.parseInt(
                            StringUtils.substringBefore(lineRangeInTheNewFileResultedFromModification, ","));
                    int tempEndLineNoInNewFile = Integer.parseInt(
                            StringUtils.substringAfter(lineRangeInTheNewFileResultedFromModification, ","));
                    int endLineNoOfNewFile = intialLineNoInNewFile + (tempEndLineNoInNewFile - 1);
                    // storing the line ranges that are being modified in the same array by replacing values
                    lineChanges[j] = intialLineNoInOldFile + "," + endLineNoOfOldFile + "/"
                            + intialLineNoInNewFile + "," + endLineNoOfNewFile;
                });
                ArrayList<String> tempArrayList = new ArrayList<>(Arrays.asList(lineChanges));
                //adding to the array list which keep track of the line ranges being changed
                lineRangesChanged.add(tempArrayList);
            });
    System.out.println("done saving file names and their relevant modification line ranges");
    System.out.println(fileNames);
    System.out.println(lineRangesChanged + "\n");
}

From source file:sbu.srl.rolextract.SpockDataReader.java

public void generateSEMAFORFrameAnnotation(String frameElementsFileName, String rawSentencesFileName,
        int offset) throws FileNotFoundException {
    Set<String> roles = getRoleLabels(); // without NONE!
    PrintWriter writer = new PrintWriter(frameElementsFileName);
    PrintWriter sentWriter = new PrintWriter(rawSentencesFileName);
    int sentCounter = 0;
    //int offset = 2780;
    for (int i = 0; i < sentences.size(); i++) {
        Sentence currentSentence = sentences.get(i);
        if (currentSentence.isAnnotated()) {
            // Is there a positive role?
            int cntRole = 0; // Number of roles
            String frame = currentSentence.getProcessName();
            String lexicalUnitFrames = currentSentence.getLexicalUnitFrame();
            String lexicalUnitIndexRange = currentSentence.getLexicalUnitFrameRange();
            String formLexicalUnitFrame = currentSentence.getLexicalUnitFormFrame();

            StringBuilder roleSpanStrBuilder = new StringBuilder();
            for (String role : roles) {
                ArrayList<ArgumentSpan> spans = currentSentence.getMultiClassAnnotatedArgumentSpan(role, -1);
                if (!spans.isEmpty()) {
                    cntRole++;// w  w w.j  a  v  a 2 s . c om
                    ArgumentSpan maxSpan = spans.stream()
                            .max(Comparator.comparing(arg -> arg.getEndIdx() - arg.getStartIdx() + 1)).get();
                    if (maxSpan.getStartIdx() != maxSpan.getEndIdx()) {
                        roleSpanStrBuilder.append(role).append("\t")
                                .append((maxSpan.getStartIdx() - 1) + ":" + (maxSpan.getEndIdx() - 1))
                                .append("\t");
                    } else {
                        roleSpanStrBuilder.append(role).append("\t").append((maxSpan.getStartIdx() - 1))
                                .append("\t");
                    }
                }
                // if there is more than one then select the longer one
                // group by start + end id
                // count number of roles
                // lexical unit == process name
                // sentence number
                // role span pairs
            }
            if (cntRole > 0) {
                StringBuilder frameElementsStrB = new StringBuilder();
                frameElementsStrB.append(cntRole + +1).append("\t").append(frame).append("\t")
                        .append(lexicalUnitFrames).append("\t").append(lexicalUnitIndexRange).append("\t")
                        .append(formLexicalUnitFrame).append("\t").append((sentCounter + offset)).append("\t")
                        .append(roleSpanStrBuilder.toString().trim());
                writer.println(frameElementsStrB.toString());
                sentWriter.println(currentSentence.getRawText().trim());
                sentCounter++;
            }
        }
    }
    writer.close();
    sentWriter.close();
}

From source file:chatbot.Chatbot.java

/** **************************************************************************************************
 * Remove stop words from a sentence./*from  w  w w.java  2 s  . c  o m*/
 * @return a string that is the sentence minus the stop words.
 */
public String removeStopWords(String sentence) {

    if (isNullOrEmpty(sentence))
        return "";
    String result = "";
    ArrayList<String> al = splitToArrayList(sentence);
    if (al == null)
        return "";
    return al.stream().filter(s -> !stopwords.contains(s.toLowerCase())).collect(Collectors.joining(" "));
}

From source file:org.springframework.cloud.gateway.handler.predicate.PathRoutePredicateFactory.java

@Override
public Predicate<ServerWebExchange> apply(Config config) {
    final ArrayList<PathPattern> pathPatterns = new ArrayList<>();
    synchronized (this.pathPatternParser) {
        pathPatternParser.setMatchOptionalTrailingSeparator(config.isMatchOptionalTrailingSeparator());
        config.getPatterns().forEach(pattern -> {
            PathPattern pathPattern = this.pathPatternParser.parse(pattern);
            pathPatterns.add(pathPattern);
        });//from   ww w. j  a v a 2  s  .c o m
    }
    return exchange -> {
        PathContainer path = parsePath(exchange.getRequest().getURI().getRawPath());

        Optional<PathPattern> optionalPathPattern = pathPatterns.stream()
                .filter(pattern -> pattern.matches(path)).findFirst();

        if (optionalPathPattern.isPresent()) {
            PathPattern pathPattern = optionalPathPattern.get();
            traceMatch("Pattern", pathPattern.getPatternString(), path, true);
            PathMatchInfo pathMatchInfo = pathPattern.matchAndExtract(path);
            putUriTemplateVariables(exchange, pathMatchInfo.getUriVariables());
            return true;
        } else {
            traceMatch("Pattern", config.getPatterns(), path, false);
            return false;
        }
    };
}

From source file:de.tolina.common.validation.AnnotationValidation.java

/**
 * Calls dependent on the type of the given Object:
 * <br> - {@link AnnotationUtils#getAnnotations(Method)} or
 * <br> - {@link AnnotationUtils#getAnnotations(java.lang.reflect.AnnotatedElement)}
 *///from  w  w  w  .j a  v a  2 s. c  o  m
@Nullable
private Annotation[] getAllAnnotationsFor(@Nonnull final Object annotated) {
    if (annotated instanceof Field) {
        return getAnnotations((Field) annotated);
    }

    if (annotated instanceof Method) {
        final Method annotatedMethod = (Method) annotated;
        final Class<?> declaringClass = annotatedMethod.getDeclaringClass();
        final List<Class<?>> allClasses = new ArrayList<>();
        allClasses.add(declaringClass);
        allClasses.addAll(ClassUtils.getAllSuperclasses(declaringClass));

        final ArrayList<Annotation> allAnnotations = new ArrayList<>();

        for (final Class<?> aClass : allClasses) {
            final ArrayList<Method> allMethods = new ArrayList<>();
            allMethods.addAll(Arrays.asList(aClass.getDeclaredMethods()));

            final List<Class<?>> interfaces = ClassUtils.getAllInterfaces(aClass);
            for (final Class<?> anInterface : interfaces) {
                allMethods.addAll(Arrays.asList(anInterface.getDeclaredMethods()));
            }

            allMethods.stream().filter(method -> isSameMethod(method, annotatedMethod))
                    .forEachOrdered(method -> addIfNotPresent(allAnnotations, getAnnotations(method)));
        }

        return allAnnotations.toArray(new Annotation[] {});
    }

    final Class<?> annotatedClass = (Class<?>) annotated;
    final List<Class<?>> allClasses = new ArrayList<>();
    allClasses.add(annotatedClass);
    allClasses.addAll(ClassUtils.getAllSuperclasses(annotatedClass));

    final ArrayList<Annotation> allAnnotations = new ArrayList<>();

    for (final Class<?> aClass : allClasses) {
        addIfNotPresent(allAnnotations, getAnnotations(aClass));
        final List<Class<?>> interfaces = ClassUtils.getAllInterfaces(aClass);
        for (final Class<?> anInterface : interfaces) {
            addIfNotPresent(allAnnotations, getAnnotations(anInterface));
        }
    }

    return allAnnotations.toArray(new Annotation[] {});
}