Example usage for java.util List parallelStream

List of usage examples for java.util List parallelStream

Introduction

In this page you can find the example usage for java.util List parallelStream.

Prototype

default Stream<E> parallelStream() 

Source Link

Document

Returns a possibly parallel Stream with this collection as its source.

Usage

From source file:com.hack23.cia.web.impl.ui.application.views.common.chartfactory.impl.DocumentChartDataManagerImpl.java

/**
 * Adds the document history by org data.
 *
 * @param dataSeries/* w  w  w. ja  v  a  2 s.  c  om*/
 *            the data series
 * @param series
 *            the series
 * @param itemList
 *            the item list
 */
private static void addDocumentHistoryByOrgData(final DataSeries dataSeries, final Series series,
        final List<ViewRiksdagenOrgDocumentDailySummary> itemList) {
    final Map<String, List<ViewRiksdagenOrgDocumentDailySummary>> map = itemList.parallelStream()
            .filter(t -> t != null)
            .collect(Collectors.groupingBy(t -> StringUtils.defaultIfBlank(t.getDocumentType(), NO_INFO)));

    for (final Entry<String, List<ViewRiksdagenOrgDocumentDailySummary>> entry : map.entrySet()) {

        series.addSeries(new XYseries().setLabel(entry.getKey()));

        dataSeries.newSeries();
        if (entry.getValue() != null) {
            for (final ViewRiksdagenOrgDocumentDailySummary item : entry.getValue()) {
                if (item != null) {
                    dataSeries.add(item.getEmbeddedId().getPublicDate(), item.getTotal());
                }
            }
        } else {
            LOGGER.info(LOG_MSG_MISSING_DATA_FOR_KEY, entry);
        }

    }
}

From source file:com.github.aptd.simulation.datamodel.CXMLReader.java

/**
 * get an agent-reference name/*  w w w  . j a  v  a2  s  . c  o  m*/
 *
 * @param p_value value of the agent-reference
 * @param p_list object list
 * @return agent-reference name
 * @throws CNotFoundException is thrown on not found
 */
@SuppressWarnings("unchecked")
private static <T> Pair<T, String> agentname(final T p_value, final List<Object> p_list) {
    return new ImmutablePair<T, String>(p_value,
            p_list.parallelStream().filter(a -> a instanceof AgentRef).findAny().map(a -> (AgentRef) a)
                    .map(AgentRef::getAgent).orElseThrow(() -> new CNotFoundException(
                            CCommon.languagestring(CXMLReader.class, "agentreferencenotfound"))));
}

From source file:com.kantenkugel.discordbot.jdocparser.JDoc.java

private static List<Documentation> getMethodDocs(JDocParser.ClassDocumentation classDoc, String methodName,
        String methodSig, boolean isFuzzy) {
    List<JDocParser.MethodDocumentation> docs = classDoc.methodDocs.get(methodName.toLowerCase()).stream()
            .sorted(Comparator.comparingInt(m -> m.argTypes.size())).collect(Collectors.toList());
    List<JDocParser.MethodDocumentation> filteredDocs = docs.parallelStream()
            .filter(doc -> doc.matches(methodSig, isFuzzy)).collect(Collectors.toList());
    switch (filteredDocs.size()) {
    case 1://w ww .  ja v a2 s . c  om
        return Collections.singletonList(filteredDocs.get(0));
    case 0:
        return Collections.unmodifiableList(docs);
    default:
        return Collections.unmodifiableList(filteredDocs);
    }
}

From source file:dhbw.clippinggorilla.userinterface.windows.NewSourceWindow.java

public static Map<String, Locale> getCountries() {
    List<String> countries = Arrays.asList(Locale.getISOCountries());
    HashMap<String, Locale> mapCountries = new HashMap<>();
    countries.parallelStream().forEach((country) -> {
        List<Locale> locales = LocaleUtils.languagesByCountry(country);
        if (!locales.isEmpty()) {
            mapCountries.put(country, new Locale(locales.get(0).getLanguage(), country));
        }//www.j  av a 2  s  . c om
    });
    return mapCountries;
}

From source file:de.bund.bfr.math.Evaluator.java

private static double[] getErrors(double[] valuesX, Map<String, Double> parserConstants,
        Map<String, Map<String, Double>> covariances, double extraVariance, int degreesOfFreedom,
        ParameterFunction f) throws ParseException {
    List<String> paramList = new ArrayList<>(covariances.keySet());
    Map<String, double[]> derivValues = new LinkedHashMap<>();
    Map<String, ParseException> exceptions = new LinkedHashMap<>();

    paramList.parallelStream().forEach(param -> {
        Map<String, Double> constantsMinus = new LinkedHashMap<>(parserConstants);
        Map<String, Double> constantsPlus = new LinkedHashMap<>(parserConstants);
        double value = parserConstants.get(param);

        constantsMinus.put(param, value - MathUtils.DERIV_EPSILON);
        constantsPlus.put(param, value + MathUtils.DERIV_EPSILON);

        double[] deriv = new double[valuesX.length];

        try {//from  ww w  .j  a  v  a2s  .co  m
            double[] valuesMinus = f.getValuesY(constantsMinus);
            double[] valuesPlus = f.getValuesY(constantsPlus);

            for (int i = 0; i < valuesX.length; i++) {
                deriv[i] = (valuesPlus[i] - valuesMinus[i]) / (2 * MathUtils.DERIV_EPSILON);
            }

            derivValues.put(param, deriv);
        } catch (ParseException e) {
            exceptions.put(param, e);
        }
    });

    if (!exceptions.isEmpty()) {
        throw exceptions.values().stream().findAny().get();
    }

    double[] valuesY = new double[valuesX.length];
    double conf95 = MathUtils.get95PercentConfidence(degreesOfFreedom);

    Arrays.fill(valuesY, Double.NaN);

    loop: for (int index = 0; index < valuesX.length; index++) {
        double variance = 0.0;
        int n = paramList.size();

        for (int i = 0; i < n; i++) {
            String param = paramList.get(i);
            double value = derivValues.get(param)[index];

            variance += value * value * covariances.get(param).get(param);

            if (!Double.isFinite(variance)) {
                continue loop;
            }
        }

        for (int i = 0; i < n - 1; i++) {
            for (int j = i + 1; j < n; j++) {
                String param1 = paramList.get(i);
                String param2 = paramList.get(j);

                variance += 2.0 * derivValues.get(param1)[index] * derivValues.get(param2)[index]
                        * covariances.get(param1).get(param2);

                if (!Double.isFinite(variance)) {
                    continue loop;
                }
            }
        }

        valuesY[index] = Math.sqrt(variance + extraVariance) * conf95;
    }

    return valuesY;
}

From source file:eu.mihosoft.vrl.v3d.Edge.java

private static List<Edge> boundaryEdgesOfPlaneGroup(List<Polygon> planeGroup) {
    List<Edge> edges = new ArrayList<>();

    Stream<Polygon> pStream;

    if (planeGroup.size() > 200) {
        pStream = planeGroup.parallelStream();
    } else {/*from  w  w w. j a v  a2  s .com*/
        pStream = planeGroup.stream();
    }

    pStream.map((p) -> Edge.fromPolygon(p)).forEach((pEdges) -> {
        edges.addAll(pEdges);
    });

    Stream<Edge> edgeStream;

    if (edges.size() > 200) {
        edgeStream = edges.parallelStream();
    } else {
        edgeStream = edges.stream();
    }

    // find potential boundary edges, i.e., edges that occur once (freq=1)
    List<Edge> potentialBoundaryEdges = new ArrayList<>();
    edgeStream.forEachOrdered((e) -> {
        int count = Collections.frequency(edges, e);
        if (count == 1) {
            potentialBoundaryEdges.add(e);
        }
    });

    // now find "false boundary" edges end remove them from the 
    // boundary-edge-list
    // 
    // thanks to Susanne Hllbacher for the idea :)
    Stream<Edge> bndEdgeStream;

    if (potentialBoundaryEdges.size() > 200) {
        bndEdgeStream = potentialBoundaryEdges.parallelStream();
    } else {
        bndEdgeStream = potentialBoundaryEdges.stream();
    }

    List<Edge> realBndEdges = bndEdgeStream
            .filter(be -> edges.stream().filter(e -> falseBoundaryEdgeSharedWithOtherEdge(be, e)).count() == 0)
            .collect(Collectors.toList());

    //
    //        System.out.println("#bnd-edges: " + realBndEdges.size()
    //                + ",#edges: " + edges.size()
    //                + ", #del-bnd-edges: " + (boundaryEdges.size() - realBndEdges.size()));
    return realBndEdges;
}

From source file:oct.util.Util.java

public static List<LinePoint> findMaxAndMins(List<LinePoint> line) {
    //create list of all positive Y values to get peaks
    ArrayList<LinePoint> convList = new ArrayList<>(line.size());
    line.forEach(p -> {//from  w  w  w .j av a 2s  .  c  om
        convList.add(new LinePoint(p.getX(), Math.abs(p.getY())));
    });
    //find X values of peaks
    List<LinePoint> peaks = getMaximums(convList);
    //collect peak points
    List<LinePoint> ret = line.parallelStream()
            .filter(p -> peaks.stream().anyMatch(pk -> pk.getX() == p.getX())).collect(Collectors.toList());
    //sort by X position
    ret.sort(Comparator.comparingInt(peak -> peak.getX()));
    return ret;
}

From source file:de.steilerdev.myVerein.server.model.Division.java

/**
 * This function is using a set of divisions, and reduces it to the divisions closest to the root
 * @param unoptimizedSetOfDivisions A set of divisions.
 * @return The list of optimized divisions.
 *//*from   w w w .j  a va  2  s.c  o  m*/
@JsonIgnore
@Transient
public static List<Division> getOptimizedSetOfDivisions(List<Division> unoptimizedSetOfDivisions) {
    if (unoptimizedSetOfDivisions == null || unoptimizedSetOfDivisions.isEmpty()) {
        logger.warn("Trying to optimize set of divisions, but unoptimized set is either null or empty");
        return null;
    } else if (unoptimizedSetOfDivisions.size() == 1) {
        return unoptimizedSetOfDivisions;
    } else {
        logger.debug("Optimizing division set");
        //Reducing the list to the divisions that are on the top of the tree, removing all unnecessary divisions.
        return unoptimizedSetOfDivisions.parallelStream() //Creating a stream of all divisions
                .filter(division -> unoptimizedSetOfDivisions.parallelStream().sorted() //filtering all divisions that are already defined in a divisions that is closer to the root of the tree. Using a parallel and sorted stream, because therefore the likeliness of an early match increases
                        .noneMatch(allDivisions -> division.getAncestors().contains(allDivisions))) //Checking, if there is any division in the list, that is an ancestor of the current division. If there is a match there exists a closer division.
                .collect(Collectors.toList()); // Converting the stream to a list
    }
}

From source file:oct.util.Util.java

public static List<LinePoint> findPeaksAndVallies(List<LinePoint> line) {
    //first find peaks
    List<LinePoint> peaks = getMaximums(line);
    //create inverse of line to find vallies
    ArrayList<LinePoint> convList = new ArrayList<>(line.size());
    line.forEach(p -> {//  www .ja v  a2s . co  m
        convList.add(new LinePoint(p.getX(), 0D - p.getY()));
    });
    //find X values of vallies
    List<LinePoint> vallies = getMaximums(convList);
    //collect valley points
    List<LinePoint> ret = line.parallelStream()
            .filter(p -> vallies.stream().anyMatch(pk -> pk.getX() == p.getX())).collect(Collectors.toList());
    //sort by X position
    ret.addAll(peaks);
    ret.sort(Comparator.comparingInt(peak -> peak.getX()));
    return ret;
}

From source file:org.opendatakit.briefcase.export.SubmissionParser.java

/**
 * Returns an sorted {@link List} of {@link Path} instances pointing to all the
 * submissions of a form that belong to the given {@link DateRange}.
 * <p>//from  w ww. j  a  v  a 2 s.  co m
 * Each file gets briefly parsed to obtain their submission date and use it as
 * the sorting criteria and for filtering.
 */
static List<Path> getListOfSubmissionFiles(FormDefinition formDef, DateRange dateRange,
        SubmissionExportErrorCallback onParsingError) {
    Path instancesDir = formDef.getFormDir().resolve("instances");
    if (!Files.exists(instancesDir) || !Files.isReadable(instancesDir))
        return Collections.emptyList();
    // TODO Migrate this code to Try<Pair<Path, Option<OffsetDate>>> to be able to filter failed parsing attempts
    List<Pair<Path, OffsetDateTime>> paths = new ArrayList<>();
    list(instancesDir).filter(UncheckedFiles::isInstanceDir).forEach(instanceDir -> {
        Path submissionFile = instanceDir.resolve("submission.xml");
        try {
            Optional<OffsetDateTime> submissionDate = readSubmissionDate(submissionFile, onParsingError);
            paths.add(Pair.of(submissionFile,
                    submissionDate.orElse(OffsetDateTime.of(1970, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))));
        } catch (Throwable t) {
            log.error("Can't read submission date", t);
            EventBus.publish(ExportEvent.failureSubmission(formDef, instanceDir.getFileName().toString(), t));
        }
    });
    return paths.parallelStream()
            // Filter out submissions outside the given date range
            .filter(pair -> dateRange.contains(pair.getRight())).map(Pair::getLeft).collect(toList());
}