Example usage for java.util ArrayList stream

List of usage examples for java.util ArrayList stream

Introduction

In this page you can find the example usage for java.util ArrayList stream.

Prototype

default Stream<E> stream() 

Source Link

Document

Returns a sequential Stream with this collection as its source.

Usage

From source file:Vista.CuentasCobrar.java

private void insertarCB(JComboBox seleccion, ArrayList<String> lista) {
    lista.stream().forEach((s) -> {
        if (s.contains(";")) {
            String[] temp = s.split(";");
            seleccion.addItem(temp[temp.length - 1]);
        } else {// w ww. j  av  a2s .co  m
            seleccion.addItem(s);
        }
    });
}

From source file:it.tizianofagni.sparkboost.DataUtils.java

/**
 * Load data file in LibSVm format. The documents IDs are assigned arbitrarily by Spark.
 *
 * @param sc               The spark context.
 * @param dataFile         The data file.
 * @param minNumPartitions The minimum number of partitions to split data in "dataFile".
 * @return An RDD containing the read points.
 *//*from w  w w .ja  v  a2s.  c  om*/
public static JavaRDD<MultilabelPoint> loadLibSvmFileFormatData(JavaSparkContext sc, String dataFile,
        boolean labels0Based, boolean binaryProblem, int minNumPartitions) {
    if (sc == null)
        throw new NullPointerException("The Spark Context is 'null'");
    if (dataFile == null || dataFile.isEmpty())
        throw new IllegalArgumentException("The dataFile is 'null'");
    JavaRDD<String> lines = sc.textFile(dataFile, minNumPartitions).cache();
    int localNumFeatures = computeNumFeatures(lines);
    Broadcast<Integer> distNumFeatures = sc.broadcast(localNumFeatures);
    JavaRDD<MultilabelPoint> docs = lines.filter(line -> !line.isEmpty()).zipWithIndex().map(item -> {
        int numFeatures = distNumFeatures.getValue();
        String line = item._1();
        long indexLong = item._2();
        int index = (int) indexLong;
        String[] fields = line.split("\\s+");
        String[] t = fields[0].split(",");
        int[] labels = new int[0];
        if (!binaryProblem) {
            labels = new int[t.length];
            for (int i = 0; i < t.length; i++) {
                String label = t[i];
                // Labels should be already 0-based.
                if (labels0Based)
                    labels[i] = new Double(Double.parseDouble(label)).intValue();
                else
                    labels[i] = new Double(Double.parseDouble(label)).intValue() - 1;
                if (labels[i] < 0)
                    throw new IllegalArgumentException(
                            "In current configuration I obtain a negative label ID value. Please check if this is a problem binary or multiclass "
                                    + "and if the labels IDs are in form 0-based or 1-based");
                assert (labels[i] >= 0);
            }
        } else {
            if (t.length > 1)
                throw new IllegalArgumentException(
                        "In binary problem you can only specify one label ID (+1 or -1) per document as valid label IDs");
            int label = new Double(Double.parseDouble(t[0])).intValue();
            if (label > 0) {
                labels = new int[] { 0 };
            }
        }
        ArrayList<Integer> indexes = new ArrayList<Integer>();
        ArrayList<Double> values = new ArrayList<Double>();
        for (int j = 1; j < fields.length; j++) {
            String data = fields[j];
            if (data.startsWith("#"))
                // Beginning of a comment. Skip it.
                break;
            String[] featInfo = data.split(":");
            // Transform feature ID value in 0-based.
            int featID = Integer.parseInt(featInfo[0]) - 1;
            double value = Double.parseDouble(featInfo[1]);
            indexes.add(featID);
            values.add(value);
        }

        SparseVector v = (SparseVector) Vectors.sparse(numFeatures, indexes.stream().mapToInt(i -> i).toArray(),
                values.stream().mapToDouble(i -> i).toArray());
        return new MultilabelPoint(index, v, labels);
    });

    lines.unpersist();
    return docs;
}

From source file:it.tizianofagni.sparkboost.DataUtils.java

/**
 * Load data file in LibSVm format. The documents IDs are specified at beginning of each
 * line containing document data./* w  w  w. j  a v a 2s. c o  m*/
 *
 * @param sc               The spark context.
 * @param dataFile         The data file.
 * @param minNumPartitions The minimum number of partitions to split data in "dataFile".
 * @return An RDD containing the read points.
 */
public static JavaRDD<MultilabelPoint> loadLibSvmFileFormatDataWithIDs(JavaSparkContext sc, String dataFile,
        boolean labels0Based, boolean binaryProblem, int minNumPartitions) {
    if (sc == null)
        throw new NullPointerException("The Spark Context is 'null'");
    if (dataFile == null || dataFile.isEmpty())
        throw new IllegalArgumentException("The dataFile is 'null'");
    JavaRDD<String> lines = sc.textFile(dataFile, minNumPartitions).cache();
    int localNumFeatures = computeNumFeatures(lines);
    Broadcast<Integer> distNumFeatures = sc.broadcast(localNumFeatures);
    JavaRDD<MultilabelPoint> docs = lines.filter(line -> !line.isEmpty()).map(entireRow -> {
        int numFeatures = distNumFeatures.getValue();
        String[] fields = entireRow.split("\t");
        String line = fields[1];
        int docID = Integer.parseInt(fields[0]);
        fields = line.split("\\s+");
        String[] t = fields[0].split(",");
        int[] labels = new int[0];
        if (!binaryProblem) {
            labels = new int[t.length];
            for (int i = 0; i < t.length; i++) {
                String label = t[i];
                // Labels should be already 0-based.
                if (labels0Based)
                    labels[i] = new Double(Double.parseDouble(label)).intValue();
                else
                    labels[i] = new Double(Double.parseDouble(label)).intValue() - 1;
                if (labels[i] < 0)
                    throw new IllegalArgumentException(
                            "In current configuration I obtain a negative label ID value. Please check if this is a problem binary or multiclass "
                                    + "and if the labels IDs are in form 0-based or 1-based");
                assert (labels[i] >= 0);
            }
        } else {
            if (t.length > 1)
                throw new IllegalArgumentException(
                        "In binary problem you can only specify one label ID (+1 or -1) per document as valid label IDs");
            int label = new Double(Double.parseDouble(t[0])).intValue();
            if (label > 0) {
                labels = new int[] { 0 };
            }
        }
        ArrayList<Integer> indexes = new ArrayList<Integer>();
        ArrayList<Double> values = new ArrayList<Double>();
        for (int j = 1; j < fields.length; j++) {
            String data = fields[j];
            if (data.startsWith("#"))
                // Beginning of a comment. Skip it.
                break;
            String[] featInfo = data.split(":");
            // Transform feature ID value in 0-based.
            int featID = Integer.parseInt(featInfo[0]) - 1;
            double value = Double.parseDouble(featInfo[1]);
            indexes.add(featID);
            values.add(value);
        }

        SparseVector v = (SparseVector) Vectors.sparse(numFeatures, indexes.stream().mapToInt(i -> i).toArray(),
                values.stream().mapToDouble(i -> i).toArray());
        return new MultilabelPoint(docID, v, labels);
    });

    lines.unpersist();
    return docs;
}

From source file:com.globocom.grou.report.ts.opentsdb.OpenTSDBClient.java

@SuppressWarnings("unchecked")
@Override//w w  w  . j  a v  a2  s  .  c o  m
public Map<String, Double> makeReport(Test test) {
    final TreeMap<String, Double> mapOfResult = new TreeMap<>();
    ArrayList<HashMap<String, Object>> metrics = Optional.ofNullable(metrics(test)).orElse(new ArrayList<>());
    metrics.stream().filter(metric -> Objects.nonNull(metric.get("metric"))).forEach(metric -> {
        String key = (String) metric.get("metric");
        String aggr = (String) metric.get("aggr");
        int durationTimeMillis = test.getDurationTimeMillis();
        Map<String, Double> dps = Optional.ofNullable((Map<String, Double>) metric.get("dps"))
                .orElse(Collections.emptyMap());
        final AtomicDouble reduceSum = new AtomicDouble(0.0);
        final AtomicDouble reduceMax = new AtomicDouble(0.0);
        dps.entrySet().stream().mapToDouble(Map.Entry::getValue).forEach(delta -> {
            reduceSum.addAndGet(delta);
            if (reduceMax.get() < delta)
                reduceMax.set(delta);
        });
        double value = reduceSum.get();
        double max = reduceMax.get();
        if (!Double.isNaN(value)) {
            if ("sum".equals(aggr)) {
                int durationTimeSecs = durationTimeMillis / 1000;
                double avg = value / (double) durationTimeSecs;
                mapOfResult.put(key + " (total)", formatValue(value));
                mapOfResult.put(key + " (avg tps)", formatValue(avg));
                mapOfResult.put(key + " (max tps)",
                        formatValue(max / Math.max(1.0, (double) durationTimeSecs / (double) NUM_SAMPLES)));
            } else {
                value = value / (double) dps.size();
                mapOfResult.put(key, formatValue(value));
            }
        }
    });
    if (mapOfResult.isEmpty())
        LOGGER.error("Test {}.{}: makeReport return NULL", test.getProject(), test.getName());
    return mapOfResult;
}

From source file:org.apache.sysml.hops.rewrite.HopRewriteUtils.java

public static boolean containsSecondOrderBuiltin(ArrayList<Hop> roots) {
    Hop.resetVisitStatus(roots);//from w  ww . j  ava 2  s .  c  o  m
    return roots.stream().anyMatch(r -> containsSecondOrderBuiltin(r));
}

From source file:io.paradoxical.cassieq.ServiceConfigurator.java

public void configure() {
    ArrayList<BiConsumer<ServiceConfiguration, Environment>> run = new ArrayList<>();

    run.add(this::configureJson);

    run.add(this::configureFilters);

    run.add(this::configureSwaggerApi);

    run.add(this::configureLogging);

    run.add(this::configureAdmin);

    run.stream().forEach(configFunction -> configFunction.accept(config, env));
}

From source file:org.moe.gradle.tasks.Retrolambda.java

@Override
protected void run() {
    // Clean expanded classes dir
    try {//from  w  ww  .j a  v a 2 s  . c om
        FileUtils.deleteFileOrFolder(getExpandedClassesDir());
    } catch (IOException e) {
        throw new GradleException("Failed to delete directory " + getExpandedClassesDir().getAbsolutePath(), e);
    }

    // Clean output dir
    try {
        FileUtils.deleteFileOrFolder(getOutputDir());
    } catch (IOException e) {
        throw new GradleException("Failed to delete directory " + getOutputDir().getAbsolutePath(), e);
    }

    // Copy class files from jar
    getProject().copy(spec -> {
        getInputFiles().forEach(it -> spec.from(it.isDirectory() ? it : getProject().zipTree(it)));
        spec.include("**/*.class");
        spec.into(getExpandedClassesDir());
    });

    // Run Retrolambda
    javaexec(spec -> {
        spec.systemProperty("retrolambda.inputDir", getExpandedClassesDir().getAbsolutePath());
        spec.systemProperty("retrolambda.classpath", StringUtils.compose(() -> {
            final ArrayList<File> files = new ArrayList<>(getClasspathFiles().getFiles());
            files.add(0, getExpandedClassesDir()); // Add input dir to classpath
            final List<String> paths = files.stream().map(File::getAbsolutePath).collect(Collectors.toList());
            return org.apache.commons.lang3.StringUtils.join(paths, File.pathSeparator);
        }));
        spec.systemProperty("retrolambda.defaultMethods", getDefaultMethods());
        spec.systemProperty("retrolambda.natjSupport", getNatjSupport());
        spec.systemProperty("retrolambda.outputDir", getOutputDir().getAbsolutePath());
        spec.setMain("-jar");
        spec.args(getRetrolambdaJar().getAbsolutePath());
    });
}

From source file:qa.experiment.SRLToAligner.java

private ProcessFrame constructProcessFrame(ProcessFrame frame,
        HashMap<String, ArrayList<RoleSpan>> roleRoleSpanPair) {
    final Comparator<RoleSpan> comp = (r1, r2) -> Double.compare(r1.getRoleScore(), r2.getRoleScore());
    ProcessFrame res = new ProcessFrame();
    res.setProcessName(frame.getProcessName());
    res.setTokenizedText(frame.getTokenizedText());
    res.setRawText(frame.getRawText());/*from  www  .j  a  v a 2  s .c  o  m*/

    for (String argLabel : GlobalV.labels) {
        if (roleRoleSpanPair.get(argLabel) != null) {
            ArrayList<RoleSpan> spans = roleRoleSpanPair.get(argLabel);
            RoleSpan maxSpan = spans.stream().max(comp).get();
            if (argLabel.equalsIgnoreCase("A0")) {
                res.setUnderGoer(maxSpan.getTextSpan());
                res.setScores(0, maxSpan.getScores());
            }
            if (argLabel.equalsIgnoreCase("A1")) {
                res.setEnabler(maxSpan.getTextSpan());
                res.setScores(1, maxSpan.getScores());
            }
            if (argLabel.equalsIgnoreCase("T")) {
                res.setTrigger(maxSpan.getTextSpan());
                res.setScores(2, maxSpan.getScores());
            }
            if (argLabel.equalsIgnoreCase("A2")) {
                res.setResult(maxSpan.getTextSpan());
                res.setScores(3, maxSpan.getScores());
            }
        }
    }
    return res;
}

From source file:org.apache.metron.indexing.dao.UpdateIntegrationTest.java

@Test
public void testAddCommentAndPatch() throws Exception {
    Map<String, Object> fields = new HashMap<>();
    fields.put("guid", "add_comment");
    fields.put("source.type", SENSOR_NAME);

    Document document = new Document(fields, "add_comment", SENSOR_NAME, 1526306463050L);
    getDao().update(document, Optional.of(SENSOR_NAME));
    findUpdatedDoc(document.getDocument(), "add_comment", SENSOR_NAME);

    addAlertComment("add_comment", "New Comment", "test_user", 1526306463050L);
    // Ensure we have the first comment
    ArrayList<AlertComment> comments = new ArrayList<>();
    comments.add(new AlertComment("New Comment", "test_user", 1526306463050L));
    document.getDocument().put(COMMENTS_FIELD,
            comments.stream().map(AlertComment::asMap).collect(Collectors.toList()));
    findUpdatedDoc(document.getDocument(), "add_comment", SENSOR_NAME);

    List<Map<String, Object>> patchList = new ArrayList<>();
    Map<String, Object> patch = new HashMap<>();
    patch.put("op", "add");
    patch.put("path", "/project");
    patch.put("value", "metron");
    patchList.add(patch);/*from  w w  w.j  av a  2 s  .c om*/

    PatchRequest pr = new PatchRequest();
    pr.setGuid("add_comment");
    pr.setIndex(SENSOR_NAME);
    pr.setSensorType(SENSOR_NAME);
    pr.setPatch(patchList);
    getDao().patch(getDao(), pr, Optional.of(new Date().getTime()));

    document.getDocument().put("project", "metron");
    findUpdatedDoc(document.getDocument(), "add_comment", SENSOR_NAME);
}

From source file:org.apache.metron.indexing.dao.UpdateIntegrationTest.java

@Test
@SuppressWarnings("unchecked")
public void testRemoveComments() throws Exception {
    Map<String, Object> fields = new HashMap<>();
    fields.put("guid", "add_comment");
    fields.put("source.type", SENSOR_NAME);

    Document document = new Document(fields, "add_comment", SENSOR_NAME, 1526401584951L);
    getDao().update(document, Optional.of(SENSOR_NAME));
    findUpdatedDoc(document.getDocument(), "add_comment", SENSOR_NAME);

    addAlertComment("add_comment", "New Comment", "test_user", 1526401584951L);
    // Ensure we have the first comment
    ArrayList<AlertComment> comments = new ArrayList<>();
    comments.add(new AlertComment("New Comment", "test_user", 1526401584951L));
    document.getDocument().put(COMMENTS_FIELD,
            comments.stream().map(AlertComment::asMap).collect(Collectors.toList()));
    findUpdatedDoc(document.getDocument(), "add_comment", SENSOR_NAME);

    addAlertComment("add_comment", "New Comment 2", "test_user_2", 1526401584952L);
    // Ensure we have the second comment
    comments.add(new AlertComment("New Comment 2", "test_user_2", 1526401584952L));
    document.getDocument().put(COMMENTS_FIELD,
            comments.stream().map(AlertComment::asMap).collect(Collectors.toList()));
    findUpdatedDoc(document.getDocument(), "add_comment", SENSOR_NAME);

    removeAlertComment("add_comment", "New Comment 2", "test_user_2", 1526401584952L);
    // Ensure we only have the first comments
    comments = new ArrayList<>();
    comments.add(new AlertComment(commentOne));
    document.getDocument().put(COMMENTS_FIELD,
            comments.stream().map(AlertComment::asMap).collect(Collectors.toList()));
    findUpdatedDoc(document.getDocument(), "add_comment", SENSOR_NAME);

    removeAlertComment("add_comment", "New Comment", "test_user", 1526401584951L);
    // Ensure we have no comments
    document.getDocument().remove(COMMENTS_FIELD);
    findUpdatedDoc(document.getDocument(), "add_comment", SENSOR_NAME);
}