Example usage for java.util ArrayList stream

List of usage examples for java.util ArrayList stream

Introduction

In this page you can find the example usage for java.util ArrayList stream.

Prototype

default Stream<E> stream() 

Source Link

Document

Returns a sequential Stream with this collection as its source.

Usage

From source file:sbu.srl.rolextract.SBURolePredict.java

public void performPrediction(String testingFileName)
        throws IOException, FileNotFoundException, ClassNotFoundException {
    ArrayList<Sentence> sentences = (ArrayList<Sentence>) FileUtil.deserializeFromFile(testingFileName);
    for (int i = 0; i < sentences.size(); i++) {
        Sentence currentSentence = sentences.get(i);
        System.out.println("[Prediction] " + i + " / " + sentences.size());
        ArrayList<ArgumentSpan> spans = currentSentence.getAllAnnotatedArgumentSpan();
        HashMap<String, String> argumentSpanThatHasAnnotation = currentSentence
                .getAllArgumentsThatHaveAnnotation();
        spans = (ArrayList<ArgumentSpan>) spans.stream().distinct().collect(toList());
        if (knownAnnotation) {
            spans = (ArrayList<ArgumentSpan>) spans.stream()
                    .filter(s -> argumentSpanThatHasAnnotation
                            .get(currentSentence.getId() + "_" + s.getStartIdx() + "_" + s.getEndIdx()) != null)
                    .collect(toList());//  www . j  a va2 s  . co  m
        }
        HashMap<String, ArrayList<ArgumentSpan>> roleArgPrediction = new HashMap<String, ArrayList<ArgumentSpan>>();
        if (!knownAnnotation && spans.size() == 0) {
            continue;
        }
        System.out.println(currentSentence.getRawText());
        for (int j = 0; j < spans.size(); j++) {
            HashMap<String, Double> roleProbPair = new HashMap<String, Double>();
            HashMap<String, String> roleVectorPair = new HashMap<String, String>();
            ArgumentSpan currentSpan = spans.get(j);
            if (isMulticlass) {
                String roleLabel = "Multi";
                FeatureExtractor fExtractor = fExtractors.get(roleLabel);
                ArrayList<Integer> tokenIdx = currentSpan.getRoleIdx();
                DependencyTree depTree = StanfordDepParserSingleton.getInstance()
                        .parse(currentSentence.getRawText());
                // Check ada common ancestor yang berada di tokenIdx gak
                DependencyNode headNode = depTree.getHeadNode(tokenIdx);
                //for (int k = 0; k < tokenIdx.size(); k++) {
                String rawVector = fExtractor.extractFeatureVectorValue(headNode.getId(), currentSentence,
                        currentSpan, false, isMulticlass);
                //System.out.println(rawVector);
                //liblinear.Linear.predictProbability(;, x, prob_estimates)
                FeatureNode[] x = LibLinearWrapper.toFeatureNode(rawVector, models.get(roleLabel));
                int prediction = (int) Linear.predict(models.get(roleLabel), x);
                double probs[] = new double[fExtractor.multiClassLabel.size()];
                Linear.predictProbability(models.get(roleLabel), x, probs);
                Model m = models.get(roleLabel);
                int[] labels = m.getLabels();
                //int positiveIdx = labels[0] == 1 ? 0 : 1;
                for (String label : fExtractor.multiClassLabel.keySet()) {
                    int labelID = fExtractor.multiClassLabel.get(label);
                    int probID = -1;
                    for (int k = 0; k < labels.length; k++) {
                        if (labels[k] == labelID) {
                            probID = k;
                            break;
                        }

                    }
                    roleProbPair.put(label, probs[probID]);
                }

                roleVectorPair.put(roleLabel, rawVector);
                currentSpan.setRoleProbPair(roleProbPair);
                currentSpan.setRoleFeatureVector(roleVectorPair);
                currentSpan.predictRoleType(true);
            } else {
                for (String roleLabel : classLabels) {
                    if (fExtractors.get(roleLabel) != null) {
                        FeatureExtractor fExtractor = fExtractors.get(roleLabel);
                        ArrayList<Integer> tokenIdx = currentSpan.getRoleIdx();
                        DependencyTree depTree = StanfordDepParserSingleton.getInstance()
                                .parse(currentSentence.getRawText());
                        // Check ada common ancestor yang berada di tokenIdx gak
                        DependencyNode headNode = depTree.getHeadNode(tokenIdx);
                        //for (int k = 0; k < tokenIdx.size(); k++) {
                        String rawVector = fExtractor.extractFeatureVectorValue(headNode.getId(),
                                currentSentence, currentSpan, false, isMulticlass);
                        //liblinear.Linear.predictProbability(;, x, prob_estimates)
                        FeatureNode[] x = LibLinearWrapper.toFeatureNode(rawVector, models.get(roleLabel));
                        int prediction = (int) Linear.predict(models.get(roleLabel), x);
                        double probs[] = new double[2];
                        Linear.predictProbability(models.get(roleLabel), x, probs);
                        Model m = models.get(roleLabel);
                        int[] labels = m.getLabels();
                        int positiveIdx = labels[0] == 1 ? 0 : 1;
                        roleProbPair.put(roleLabel, probs[positiveIdx]);
                        roleVectorPair.put(roleLabel, rawVector);
                    }
                }
                currentSpan.setRoleProbPair(roleProbPair);
                currentSpan.setRoleFeatureVector(roleVectorPair);
                currentSpan.normalizeProbScore();
                currentSpan.predictRoleType(false);
            }

            // store in this in the hashMap
            if (roleArgPrediction.get(currentSpan.getRolePredicted()) != null) {
                ArrayList<ArgumentSpan> predictedSpan = roleArgPrediction.get(currentSpan.getRolePredicted());
                predictedSpan.add(currentSpan);
                roleArgPrediction.put(currentSpan.getRolePredicted(), predictedSpan);
            } else {
                ArrayList<ArgumentSpan> predictedSpan = new ArrayList<ArgumentSpan>();
                predictedSpan.add(currentSpan);
                roleArgPrediction.put(currentSpan.getRolePredicted(), predictedSpan);
            }
        }
        if (roleArgPrediction == null) {
            System.out.println("Something is going wrong here");
            System.exit(0);
        }
        currentSentence.setRoleArgPrediction(roleArgPrediction);
    }

    // populateProbabilityILP(procDataAnnArr);
    // make unique of the SAME arguments,  based on startID and endID
    if (testingFileName.contains("gold")) {
        FileUtil.serializeToFile(sentences, testingFileName.replace("gold", "predict"));
    } else {

        FileUtil.serializeToFile(sentences, testingFileName.replace("test.", "predict."));
    }
}

From source file:chatbot.Chatbot.java

/****************************************************************************************************
 *
 * @param responses/*from w  w w .j a v  a2s  .  co  m*/
 * @param input
 * @return
 */
private ArrayList<String> rankResponsesOnSentiment(ArrayList<String> responses, String input) {

    if (DB.sentiment.keySet().size() < 1)
        DB.readSentimentArray();
    if (isExcludingNegativeSentiment)
        responses = responses.stream().filter(r -> DB.computeSentiment(r) >= 0)
                .collect(Collectors.toCollection(ArrayList::new));
    else if (isMatchingSentiment)
        responses = responses.stream()
                .filter(r -> compareSentiment(DB.computeSentiment(r), DB.computeSentiment(input)))
                .collect(Collectors.toCollection(ArrayList::new));

    return responses.size() > 0 ? responses : new ArrayList<>(Collections.singletonList("I don't know"));
}

From source file:org.apache.hadoop.hive.ql.parse.MergeSemanticAnalyzer.java

/**
 * Generates the Insert leg of the multi-insert SQL to represent WHEN NOT MATCHED THEN INSERT clause.
 * @param targetTableNameInSourceQuery - simple name/alias
 * @throws SemanticException//ww w.  j  ava  2  s.  com
 */
private void handleInsert(ASTNode whenNotMatchedClause, StringBuilder rewrittenQueryStr, ASTNode target,
        ASTNode onClause, Table targetTable, String targetTableNameInSourceQuery, String onClauseAsString,
        String hintStr) throws SemanticException {
    ASTNode whenClauseOperation = getWhenClauseOperation(whenNotMatchedClause);
    assert whenNotMatchedClause.getType() == HiveParser.TOK_NOT_MATCHED;
    assert whenClauseOperation.getType() == HiveParser.TOK_INSERT;

    // identify the node that contains the values to insert and the optional column list node
    ArrayList<Node> children = whenClauseOperation.getChildren();
    ASTNode valuesNode = (ASTNode) children.stream()
            .filter(n -> ((ASTNode) n).getType() == HiveParser.TOK_FUNCTION).findFirst().get();
    ASTNode columnListNode = (ASTNode) children.stream()
            .filter(n -> ((ASTNode) n).getType() == HiveParser.TOK_TABCOLNAME).findFirst().orElse(null);

    // if column list is specified, then it has to have the same number of elements as the values
    // valuesNode has a child for struct, the rest are the columns
    if (columnListNode != null && columnListNode.getChildCount() != (valuesNode.getChildCount() - 1)) {
        throw new SemanticException(
                String.format("Column schema must have the same length as values (%d vs %d)",
                        columnListNode.getChildCount(), valuesNode.getChildCount() - 1));
    }

    rewrittenQueryStr.append("INSERT INTO ").append(getFullTableNameForSQL(target));
    if (columnListNode != null) {
        rewrittenQueryStr.append(' ').append(getMatchedText(columnListNode));
    }
    addPartitionColsToInsert(targetTable.getPartCols(), rewrittenQueryStr);

    rewrittenQueryStr.append("    -- insert clause\n  SELECT ");
    if (hintStr != null) {
        rewrittenQueryStr.append(hintStr);
    }

    OnClauseAnalyzer oca = new OnClauseAnalyzer(onClause, targetTable, targetTableNameInSourceQuery, conf,
            onClauseAsString);
    oca.analyze();

    String valuesClause = getMatchedText(valuesNode);
    valuesClause = valuesClause.substring(1, valuesClause.length() - 1); //strip '(' and ')'
    valuesClause = replaceDefaultKeywordForMerge(valuesClause, targetTable, columnListNode);
    rewrittenQueryStr.append(valuesClause).append("\n   WHERE ").append(oca.getPredicate());

    String extraPredicate = getWhenClausePredicate(whenNotMatchedClause);
    if (extraPredicate != null) {
        //we have WHEN NOT MATCHED AND <boolean expr> THEN INSERT
        rewrittenQueryStr.append(" AND ").append(getMatchedText(((ASTNode) whenNotMatchedClause.getChild(1))))
                .append('\n');
    }
}

From source file:com.joyent.manta.client.multipart.JobsMultipartManagerIT.java

private void canUploadMultipartBinary(final long sizeInMb, final int noOfParts) throws IOException {
    final long size = sizeInMb * 1024L * 1024L;

    File[] parts = new File[noOfParts];

    for (int i = 0; i < noOfParts; i++) {
        parts[i] = createTemporaryDataFile(size, 1);
    }/*  ww  w  .jav a2s  . c  om*/

    final File expectedFile = concatenateFiles(parts);
    final byte[] expectedMd5 = md5(expectedFile);

    final String name = uploadName("can-upload-5mb-multipart-binary");
    final String path = testPathPrefix + name;

    final JobsMultipartUpload upload = multipart.initiateUpload(path);

    final ArrayList<MantaMultipartUploadTuple> uploadedParts = new ArrayList<>();

    for (int i = 0; i < parts.length; i++) {
        File part = parts[i];
        int partNumber = i + 1;
        MantaMultipartUploadTuple uploaded = multipart.uploadPart(upload, partNumber, part);
        uploadedParts.add(uploaded);
    }

    multipart.validateThatThereAreSequentialPartNumbers(upload);
    Instant start = Instant.now();
    multipart.complete(upload, uploadedParts.stream());
    multipart.waitForCompletion(upload, (Function<UUID, Void>) uuid -> {
        fail("Completion operation didn't succeed within timeout");
        return null;
    });
    Instant end = Instant.now();

    MantaMultipartStatus status = multipart.getStatus(upload);
    assertEquals(status, MantaMultipartStatus.COMPLETED);

    MantaObjectResponse head = mantaClient.head(path);
    byte[] remoteMd5 = head.getMd5Bytes();

    if (!Arrays.equals(remoteMd5, expectedMd5)) {
        StringBuilder builder = new StringBuilder();
        builder.append("MD5 values do not match - job id: ").append(multipart.findJob(upload));
        fail(builder.toString());
    }

    Duration totalCompletionTime = Duration.between(start, end);

    LOG.info("Concatenating {} parts took {} seconds", parts.length, totalCompletionTime.toMillis() / 1000);
}

From source file:com.joyent.manta.client.multipart.EncryptedJobsMultipartManagerIT.java

private void canUploadMultipartBinary(final long sizeInMb, final int noOfParts) throws IOException {
    final long size = sizeInMb * 1024L * 1024L;

    File[] parts = new File[noOfParts];

    for (int i = 0; i < noOfParts; i++) {
        parts[i] = createTemporaryDataFile(size, 1);
    }//from w  ww  .j  av a 2  s. c  om

    final File expectedFile = concatenateFiles(parts);
    final byte[] expectedMd5 = md5(expectedFile);

    final String name = uploadName("can-upload-5mb-multipart-binary");
    final String path = testPathPrefix + name;

    final EncryptedMultipartUpload<JobsMultipartUpload> upload = multipart.initiateUpload(path);

    final ArrayList<MantaMultipartUploadTuple> uploadedParts = new ArrayList<>();

    for (int i = 0; i < parts.length; i++) {
        File part = parts[i];
        int partNumber = i + 1;
        MantaMultipartUploadTuple uploaded = multipart.uploadPart(upload, partNumber, part);
        uploadedParts.add(uploaded);
    }

    multipart.validateThatThereAreSequentialPartNumbers(upload);
    Instant start = Instant.now();
    multipart.complete(upload, uploadedParts.stream());
    multipart.getWrapped().waitForCompletion(upload, (Function<UUID, Void>) uuid -> {
        fail("Completion operation didn't succeed within timeout");
        return null;
    });
    Instant end = Instant.now();

    MantaMultipartStatus status = multipart.getStatus(upload);
    assertEquals(status, MantaMultipartStatus.COMPLETED);

    // If we are using encryption the remote md5 is the md5 of the
    // cipher text.  To prove we uploaded the right bytes and can
    // get them back again, we need to download and calculate.

    final byte[] remoteMd5;
    try (MantaObjectInputStream gotObject = mantaClient.getAsInputStream(path)) {
        remoteMd5 = DigestUtils.md5(gotObject);
    }

    if (!Arrays.equals(remoteMd5, expectedMd5)) {
        StringBuilder builder = new StringBuilder();
        builder.append("MD5 values do not match - job id: ").append(multipart.getWrapped().findJob(upload));
        fail(builder.toString());
    }

    Duration totalCompletionTime = Duration.between(start, end);

    LOG.info("Concatenating {} parts took {} seconds", parts.length, totalCompletionTime.toMillis() / 1000);
}

From source file:info.savestate.saveybot.JSONFileManipulator.java

public String nameList(boolean largeResponse) {
    JSONArray json = getJSON();// ww  w. j  a v a2 s  .c  om
    ArrayList<String> names = new ArrayList<>();
    for (int i = 0; i < json.length(); i++) {
        JSONObject savestate = json.getJSONObject(i);
        String name = savestate.getString("name");
        if (!names.contains(name))
            names.add(name);
    }
    if (largeResponse) {
        StringBuilder sb = new StringBuilder();
        names.stream().forEach((String name) -> {
            sb.append(name).append(", ");
        });
        String returnString = sb.toString();
        return "SaveyBot's personal dongs!! :D :D :D/ : "
                + returnString.substring(0, returnString.length() - 2);
    } else {
        return "SaveyBot has " + names.size() + " personal dongs!! :D :D :D/";
    }
}

From source file:pathwaynet.PathwayCalculator.java

private <E> HashMap<E, TestResultForEachVertex> testForEachComponent(Graph<E, String> graph,
        Collection<E> componentsInGroup, Collection<E> componentsConsidered, boolean onlyFromSource) {
    HashMap<E, TestResultForEachVertex> significance = new HashMap<>();

    // calculate and cache all distances
    DijkstraDistance<E, String> distances = new DijkstraDistance<>(graph);
    HashMap<E, Map<E, Number>> distancesMap = new HashMap<>();
    graph.getVertices().stream().forEach((component) -> {
        Map<E, Number> distancesFromThis = distances.getDistanceMap(component);
        distancesMap.put(component, distancesFromThis);
    });/*from   w  w  w . j  a  v a2s  . co m*/

    // calculate real in-group and out-group distances
    HashMap<E, Map<E, Number>> distancesInsideGroup = getDistancesWithGroup(distancesMap, componentsInGroup,
            componentsConsidered, onlyFromSource, true);
    HashMap<E, Map<E, Number>> distancesOutsideGroup = getDistancesWithGroup(distancesMap, componentsInGroup,
            componentsConsidered, onlyFromSource, false);

    if (distancesInsideGroup.isEmpty() || distancesOutsideGroup.isEmpty()) {
        System.err.println("WARNING: Please double check the enzyme list!");
    } else {
        HashMap<E, ArrayList<Double>> differencesProp = new HashMap<>();
        distancesInsideGroup.keySet().stream().forEach((component) -> {
            ArrayList<Double> diffIncreaseProp = estimateDifferenceOfProportionAtDistances(
                    distancesInsideGroup.get(component).values(),
                    distancesOutsideGroup.get(component).values());
            differencesProp.put(component, diffIncreaseProp);
            //System.err.println(enzyme.getID()+"\t"+diffIncreaseProp);
        });

        // for each enzyme in the given group, estimate its significance of neighbor enrichment of enzymes in the group
        //System.err.println();
        distancesInsideGroup.keySet().stream().forEach((component) -> {
            // do permutation (for numPermutations times) to generate random group with the same size and with this enzyme
            HashSet<E> allComponentsAvailable = new HashSet<>();
            allComponentsAvailable.addAll(graph.getVertices());
            allComponentsAvailable.retainAll(componentsConsidered);
            ArrayList<HashSet<E>> componentsInGroupPermutations = generatePermutatedGroupsWithFixedNode(
                    component, allComponentsAvailable, distancesInsideGroup.size());

            // for each permutation, calculate the differences of proportion between within-group and between-group path at each path length
            ArrayList<ArrayList<Double>> differencesPropPermutations = new ArrayList<>();
            componentsInGroupPermutations.stream().forEach((componentsInGroupThisPermutation) -> {
                HashSet<E> componentsOutGroupThisPermutation = new HashSet<>();
                componentsOutGroupThisPermutation.addAll(graph.getVertices());
                componentsOutGroupThisPermutation.removeAll(componentsInGroupThisPermutation);

                HashMap<E, Number> distancesInPermut = new HashMap<>();
                HashMap<E, Number> distancesOutPermut = new HashMap<>();
                allComponentsAvailable.forEach((component2) -> {
                    Number minDist = getShortestDistance(distancesMap, component, component2, onlyFromSource);

                    if (componentsInGroupThisPermutation.contains(component2) && (!component.equals(component2))
                            && minDist != null)
                        distancesInPermut.put(component2, minDist);
                    else if (componentsOutGroupThisPermutation.contains(component2) && minDist != null)
                        distancesOutPermut.put(component2, minDist);
                });
                differencesPropPermutations.add(estimateDifferenceOfProportionAtDistances(
                        distancesInPermut.values(), distancesOutPermut.values()));
            });

            // calculate the significance
            // P: based on Pearson's correlation between differences of proportions and distances
            // domain: based on the quantile of difference at each distance
            //System.err.println(component);
            double p = calculatePValue(differencesProp.get(component), differencesPropPermutations);
            int radius = estimateDomainRadius(differencesProp.get(component), differencesPropPermutations, 0.9);
            significance.put(component, new TestResultForEachVertex(p, radius));

            if (cache) {

            }
        });
    }

    return significance;
}

From source file:io.github.collaboratory.LauncherCWL.java

private Map<String, Object> runCWLCommand(String cwlFile, String jsonSettings, String outputDir,
        String workingDir) {//from ww  w .j  av a  2  s .c om
    // Get extras from config file
    ArrayList<String> extraFlags = (ArrayList) config.getList("cwltool-extra-parameters");

    if (extraFlags.size() > 0) {
        System.out.println("########### WARNING ###########");
        System.out.println(
                "You are using extra flags for CWLtool which may not be supported. Use at your own risk.");
    }

    // Trim the input
    extraFlags = (ArrayList) extraFlags.stream().map(flag -> trimAndPrintInput(flag))
            .collect(Collectors.toList());

    // Create cwltool command
    ArrayList<String> command = new ArrayList<>(Arrays.asList("cwltool", "--enable-dev", "--non-strict",
            "--enable-net", "--outdir", outputDir, "--tmpdir-prefix", workingDir, cwlFile, jsonSettings));
    command.addAll(1, extraFlags);

    final String joined = Joiner.on(" ").join(command);
    System.out.println("Executing: " + joined);
    final ImmutablePair<String, String> execute = Utilities.executeCommand(joined);
    // mutate stderr and stdout into format for output

    String stdout = execute.getLeft().replaceAll("(?m)^", "\t");
    String stderr = execute.getRight().replaceAll("(?m)^", "\t");

    final String cwltool = "cwltool";
    outputIntegrationOutput(outputDir, execute, stdout, stderr, cwltool);
    Map<String, Object> obj = (Map<String, Object>) yaml.load(execute.getLeft());
    return obj;
}

From source file:pathwaynet.PathwayCalculator.java

private double calculatePValue(ArrayList<Double> differencesPropReal,
        ArrayList<ArrayList<Double>> differencesPropPermut) {
    double[] differencesPropVector = new double[differencesPropReal.size()];
    double[] distanceThreshold = new double[differencesPropReal.size()];
    for (int i = 0; i < differencesPropReal.size(); i++) {
        differencesPropVector[i] = differencesPropReal.get(i);
        distanceThreshold[i] = i + 1;//from  w  w w.  j  a  v  a2s . c  o  m
    }
    Double corrReal = new PearsonsCorrelation().correlation(differencesPropVector, distanceThreshold);
    //System.err.println(corrReal);

    ArrayList<Double> corrPermut = new ArrayList<>();
    differencesPropPermut.stream().forEach((differencesPropThisPermut) -> {
        double[] differencesPropVectorPermut = new double[differencesPropReal.size()];
        for (int i = 0; i < differencesPropReal.size(); i++) {
            if (differencesPropThisPermut.size() > i)
                differencesPropVectorPermut[i] = differencesPropThisPermut.get(i);
            else
                differencesPropVectorPermut[i] = 0;
        }
        corrPermut.add(new PearsonsCorrelation().correlation(differencesPropVectorPermut, distanceThreshold));
    });

    long numPermutNotLargerThanReal = corrPermut.stream().filter(notGreaterThan(corrReal)).count();
    double p = ((double) numPermutNotLargerThanReal) / differencesPropPermut.size();

    return p;
}

From source file:dictionary.GUI.GUI_Main.java

private void initData(ArrayList<String> list) {
    DefaultListModel<String> model = new DefaultListModel();
    System.out.println("\n" + list.size());
    list.stream().forEach((list1) -> {
        model.addElement(list1);/*from   www. j ava 2  s.  c  o  m*/
    });
    listEntries.setModel(model);

}