Example usage for java.util LinkedHashMap values

List of usage examples for java.util LinkedHashMap values

Introduction

In this page you can find the example usage for java.util LinkedHashMap values.

Prototype

public Collection<V> values() 

Source Link

Document

Returns a Collection view of the values contained in this map.

Usage

From source file:com.simiacryptus.mindseye.applications.ObjectLocationBase.java

/**
 * Run.// ww w.ja  v  a2 s .c om
 *
 * @param log the log
 */
public void run(@Nonnull final NotebookOutput log) {
    //    @Nonnull String logName = "cuda_" + log.getName() + ".log";
    //    log.p(log.file((String) null, logName, "GPU Log"));
    //    CudaSystem.addLog(new PrintStream(log.file(logName)));

    ImageClassifierBase classifier = getClassifierNetwork();
    Layer classifyNetwork = classifier.getNetwork();

    ImageClassifierBase locator = getLocatorNetwork();
    Layer locatorNetwork = locator.getNetwork();
    ArtistryUtil.setPrecision((DAGNetwork) classifyNetwork, Precision.Float);
    ArtistryUtil.setPrecision((DAGNetwork) locatorNetwork, Precision.Float);

    Tensor[][] inputData = loadImages_library();
    //    Tensor[][] inputData = loadImage_Caltech101(log);
    double alphaPower = 0.8;

    final AtomicInteger index = new AtomicInteger(0);
    Arrays.stream(inputData).limit(10).forEach(row -> {
        log.h3("Image " + index.getAndIncrement());
        final Tensor img = row[0];
        log.p(log.image(img.toImage(), ""));
        Result classifyResult = classifyNetwork.eval(new MutableResult(row));
        Result locationResult = locatorNetwork.eval(new MutableResult(row));
        Tensor classification = classifyResult.getData().get(0);
        List<CharSequence> categories = classifier.getCategories();
        int[] sortedIndices = IntStream.range(0, categories.size()).mapToObj(x -> x)
                .sorted(Comparator.comparing(i -> -classification.get(i))).mapToInt(x -> x).limit(10).toArray();
        logger.info(Arrays.stream(sortedIndices)
                .mapToObj(
                        i -> String.format("%s: %s = %s%%", i, categories.get(i), classification.get(i) * 100))
                .reduce((a, b) -> a + "\n" + b).orElse(""));
        LinkedHashMap<CharSequence, Tensor> vectors = new LinkedHashMap<>();
        List<CharSequence> predictionList = Arrays.stream(sortedIndices).mapToObj(categories::get)
                .collect(Collectors.toList());
        Arrays.stream(sortedIndices).limit(6).forEach(category -> {
            CharSequence name = categories.get(category);
            log.h3(name);
            Tensor alphaTensor = renderAlpha(alphaPower, img, locationResult, classification, category);
            log.p(log.image(img.toRgbImageAlphaMask(0, 1, 2, alphaTensor), ""));
            vectors.put(name, alphaTensor.unit());
        });

        Tensor avgDetection = vectors.values().stream().reduce((a, b) -> a.add(b)).get()
                .scale(1.0 / vectors.size());
        Array2DRowRealMatrix covarianceMatrix = new Array2DRowRealMatrix(predictionList.size(),
                predictionList.size());
        for (int x = 0; x < predictionList.size(); x++) {
            for (int y = 0; y < predictionList.size(); y++) {
                Tensor l = vectors.get(predictionList.get(x));
                Tensor r = vectors.get(predictionList.get(y));

                covarianceMatrix.setEntry(x, y,
                        null == l || null == r ? 0 : (l.minus(avgDetection)).dot(r.minus(avgDetection)));
            }
        }
        @Nonnull
        final EigenDecomposition decomposition = new EigenDecomposition(covarianceMatrix);

        for (int objectVector = 0; objectVector < 10; objectVector++) {
            log.h3("Eigenobject " + objectVector);
            double eigenvalue = decomposition.getRealEigenvalue(objectVector);
            RealVector eigenvector = decomposition.getEigenvector(objectVector);
            Tensor detectionRegion = IntStream.range(0, eigenvector.getDimension()).mapToObj(i -> {
                Tensor tensor = vectors.get(predictionList.get(i));
                return null == tensor ? null : tensor.scale(eigenvector.getEntry(i));
            }).filter(x -> null != x).reduce((a, b) -> a.add(b)).get();
            detectionRegion = detectionRegion.scale(255.0 / detectionRegion.rms());
            CharSequence categorization = IntStream.range(0, eigenvector.getDimension()).mapToObj(i -> {
                CharSequence category = predictionList.get(i);
                double component = eigenvector.getEntry(i);
                return String.format("<li>%s = %.4f</li>", category, component);
            }).reduce((a, b) -> a + "" + b).get();
            log.p(String.format("Object Detected: <ol>%s</ol>", categorization));
            log.p("Object Eigenvalue: " + eigenvalue);
            log.p("Object Region: " + log.image(img.toRgbImageAlphaMask(0, 1, 2, detectionRegion), ""));
            log.p("Object Region Compliment: "
                    + log.image(img.toRgbImageAlphaMask(0, 1, 2, detectionRegion.scale(-1)), ""));
        }

        //      final int[] orderedVectors = IntStream.range(0, 10).mapToObj(x -> x)
        //        .sorted(Comparator.comparing(x -> -decomposition.getRealEigenvalue(x))).mapToInt(x -> x).toArray();
        //      IntStream.range(0, orderedVectors.length)
        //        .mapToObj(i -> {
        //            //double realEigenvalue = decomposition.getRealEigenvalue(orderedVectors[i]);
        //            return decomposition.getEigenvector(orderedVectors[i]).toArray();
        //          }
        //        ).toArray(i -> new double[i][]);

        log.p(String.format(
                "<table><tr><th>Cosine Distance</th>%s</tr>%s</table>", Arrays.stream(sortedIndices).limit(10)
                        .mapToObj(col -> "<th>" + categories.get(col) + "</th>").reduce((a, b) -> a + b).get(),
                Arrays.stream(sortedIndices).limit(10).mapToObj(r -> {
                    return String.format("<tr><td>%s</td>%s</tr>", categories.get(r),
                            Arrays.stream(sortedIndices).limit(10).mapToObj(col -> {
                                Tensor l = vectors.get(categories.get(r));
                                Tensor r2 = vectors.get(categories.get(col));
                                return String.format("<td>%.4f</td>",
                                        (null == l || null == r2) ? 0 : Math.acos(l.dot(r2)));
                            }).reduce((a, b) -> a + b).get());
                }).reduce((a, b) -> a + b).orElse("")));
    });

    log.setFrontMatterProperty("status", "OK");
}

From source file:eionet.cr.web.action.admin.staging.AvailableFilesActionBean.java

/**
 * Event that is triggered when the user has chosen to extract a particular archive file.
 *
 * @return the resulting resolution//from w w w .  j  a  v a2s . com
 * @throws IOException
 */
public Resolution extract() throws IOException {

    String fileName = getContext().getRequestParameter("extract");
    if (StringUtils.isBlank(fileName)) {
        addGlobalValidationError("No file selected!");
    }

    File file = new File(FileDownloader.FILES_DIR, fileName);
    if (!file.exists() || !file.isFile()) {
        addGlobalValidationError("Found no such file to extract from: " + fileName);
    }

    if (extractEntries == null || extractEntries.isEmpty()) {
        addSystemMessage("No entries were selected for extraction!");
    } else {
        LinkedHashMap<String, String> entryToNewName = new LinkedHashMap<String, String>();
        for (String entry : extractEntries) {
            String newName = getContext().getRequestParameter(DigestUtils.md5Hex(entry) + "_newName");
            if (StringUtils.isNotBlank(newName)) {
                entryToNewName.put(entry, newName);
            } else {
                entryToNewName.put(entry, entry);
            }
        }
        CompressUtil.extract(file, entryToNewName);

        String msg = "Selected entries were extracted to the following files:";
        for (String newName : entryToNewName.values()) {
            msg += "\n" + newName;
        }
        addSystemMessage(msg);
    }

    return new RedirectResolution(getClass());
}

From source file:org.kutkaitis.timetable2.timetable.MonteCarlo.java

private boolean isOneGroupInOneClassroom(Group group, int lectureNumber,
        LinkedHashMap<String, LinkedHashMap> dayTimeTable) {
    boolean oneLectureInOneRoom = true;
    if (group != null) {
        ClassRoom groupsRoom = group.getClassRoom();
        String classRoomNumber = groupsRoom.getRoomNumber();
        //            System.out.println("Group to add: " + group.getGroupName());
        Collection<LinkedHashMap> teachersTimeTables = dayTimeTable.values();
        for (LinkedHashMap<String, String> teachersTimeTable : teachersTimeTables) {
            if (teachersTimeTable.isEmpty()) {
                oneLectureInOneRoom = true;
                continue;
            }/* ww w .j a v a  2  s .c  om*/
            String groupNameToSplit = teachersTimeTable.get(String.valueOf(lectureNumber));
            if (groupNameToSplit == null) {
                oneLectureInOneRoom = true;
                continue;
            }
            String[] splittedGroupNames = groupNameToSplit.split(":");
            String groupName = splittedGroupNames[1].trim();
            Group groupToCheck = studentsMockDataFiller.getGroups().get(groupName);
            boolean roomBusy = true;
            if (StringUtils.equals(groupName, "-----")) {
                roomBusy = false;
            }

            if (groupToCheck != null) {
                //                    System.out.println("Group to check: " + groupToCheck.getGroupName());
                roomBusy = StringUtils.equals(classRoomNumber, groupToCheck.getClassRoom().getRoomNumber());
                //                    System.out.println("freeRoom: " + roomBusy);
            }
            if (roomBusy == false) {
                oneLectureInOneRoom = true;
            } else {
                oneLectureInOneRoom = false;
                return oneLectureInOneRoom;
            }
        }
    } else {
        oneLectureInOneRoom = false;

    }
    return oneLectureInOneRoom;
}

From source file:com.textocat.textokit.eval.GoldStandardBasedEvaluation.java

private void evaluate(CAS goldCas, CAS sysCas) {
    FSIterator<AnnotationFS> goldAnnoIter = annotationExtractor.extract(goldCas);
    Set<AnnotationFS> goldProcessed = new HashSet<AnnotationFS>();
    // system annotations that exactly match a gold one
    Set<AnnotationFS> sysMatched = newHashSet();
    // matches/*from   ww  w.  j a  v a  2 s  .c  o m*/
    LinkedHashMap<AnnotationFS, MatchInfo> matchesMap = newLinkedHashMap();
    while (goldAnnoIter.hasNext()) {
        AnnotationFS goldAnno = goldAnnoIter.next();
        if (goldProcessed.contains(goldAnno)) {
            continue;
        }
        MatchInfo mi = new MatchInfo();
        matchesMap.put(goldAnno, mi);

        Set<AnnotationFS> candidates = newLinkedHashSet(matchingStrategy.searchCandidates(goldAnno));

        candidates.removeAll(sysMatched);
        AnnotationFS exactSys = matchingStrategy.searchExactMatch(goldAnno, candidates);
        if (exactSys != null) {
            // sanity check
            assert candidates.contains(exactSys);
            mi.exact = exactSys;
            sysMatched.add(exactSys);
        }
        mi.partialSet.addAll(candidates);

        goldProcessed.add(goldAnno);
    }

    // filter partials that match a next gold
    for (MatchInfo mi : matchesMap.values()) {
        mi.partialSet.removeAll(sysMatched);
    }

    // report for each gold anno
    for (AnnotationFS goldAnno : matchesMap.keySet()) {
        // assert order declared in EvaluationListener javadoc
        MatchInfo mi = matchesMap.get(goldAnno);
        boolean matchedExactly = mi.exact != null;
        if (matchedExactly) {
            evalCtx.reportExactMatch(goldAnno, mi.exact);
        }
        for (AnnotationFS partialSys : mi.partialSet) {
            evalCtx.reportPartialMatch(goldAnno, partialSys);
        }
        if (!matchedExactly) {
            evalCtx.reportMissing(goldAnno);
        }
    }

    // report spurious (false positives)
    FSIterator<AnnotationFS> sysAnnoIter = annotationExtractor.extract(sysCas);
    while (sysAnnoIter.hasNext()) {
        AnnotationFS sysAnno = sysAnnoIter.next();
        if (!sysMatched.contains(sysAnno)) {
            evalCtx.reportSpurious(sysAnno);
        }
    }
}

From source file:com.amalto.workbench.utils.XSDAnnotationsStructure.java

public String getFormatForeignKeyInfo() {
    LinkedHashMap<String, String> appInfos = getAppInfos("X_ForeignKeyInfoFormat");//$NON-NLS-1$

    Collection<String> values = appInfos.values();
    if (values.size() > 0) {
        return values.iterator().next();
    }//  ww w .j  a v a2 s .c o  m

    return "";
}

From source file:pt.lsts.neptus.util.logdownload.LogsDownloaderWorkerActions.java

private void orderAndFilterOutTheActiveLog(LinkedHashMap<FTPFile, String> retList) {
    if (retList.size() > 0) {
        String[] ordList = retList.values().toArray(new String[retList.size()]);
        Arrays.sort(ordList);/*from   ww w.ja v a  2s . c  om*/
        String activeLogName = ordList[ordList.length - 1];
        for (FTPFile fFile : retList.keySet().toArray(new FTPFile[retList.size()])) {
            if (retList.get(fFile).equals(activeLogName)) {
                retList.remove(fFile);
                break;
            }
        }
    }
}

From source file:nzilbb.csv.CsvDeserializer.java

/**
 * Loads the serialized form of the graph, using the given set of named streams.
 * @param streams A list of named streams that contain all the
 *  transcription/annotation data required, and possibly (a) stream(s) for the media annotated.
 * @param schema The layer schema, definining layers and the way they interrelate.
 * @return A list of parameters that require setting before {@link IDeserializer#deserialize()}
 * can be invoked. This may be an empty list, and may include parameters with the value already
 * set to a workable default. If there are parameters, and user interaction is possible, then
 * the user may be presented with an interface for setting/confirming these parameters, before
 * they are then passed to {@link IDeserializer#setParameters(ParameterSet)}.
 * @throws SerializationException If the graph could not be loaded.
 * @throws IOException On IO error.//from w  ww .  j  a  v  a  2s .  co  m
 */
@SuppressWarnings({ "rawtypes", "unchecked" })
public ParameterSet load(NamedStream[] streams, Schema schema) throws SerializationException, IOException {
    // take the first stream, ignore all others.
    NamedStream csv = Utility.FindSingleStream(streams, ".csv", "text/csv");
    if (csv == null)
        throw new SerializationException("No CSV stream found");
    setName(csv.getName());

    setSchema(schema);

    // create a list of layers we need and possible matching layer names
    LinkedHashMap<Parameter, List<String>> layerToPossibilities = new LinkedHashMap<Parameter, List<String>>();
    HashMap<String, LinkedHashMap<String, Layer>> layerToCandidates = new HashMap<String, LinkedHashMap<String, Layer>>();

    LinkedHashMap<String, Layer> metadataLayers = new LinkedHashMap<String, Layer>();
    for (Layer layer : schema.getRoot().getChildren().values()) {
        if (layer.getAlignment() == Constants.ALIGNMENT_NONE) {
            metadataLayers.put(layer.getId(), layer);
        }
    } // next turn child layer

    // look for person attributes
    for (Layer layer : schema.getParticipantLayer().getChildren().values()) {
        if (layer.getAlignment() == Constants.ALIGNMENT_NONE) {
            metadataLayers.put(layer.getId(), layer);
        }
    } // next turn child layer
    LinkedHashMap<String, Layer> utteranceAndMetadataLayers = new LinkedHashMap<String, Layer>(metadataLayers);
    utteranceAndMetadataLayers.put(getUtteranceLayer().getId(), getUtteranceLayer());
    LinkedHashMap<String, Layer> whoAndMetadataLayers = new LinkedHashMap<String, Layer>(metadataLayers);
    whoAndMetadataLayers.put(getParticipantLayer().getId(), getParticipantLayer());

    // read the header line

    setParser(CSVParser.parse(csv.getStream(), java.nio.charset.Charset.forName("UTF-8"),
            CSVFormat.EXCEL.withHeader()));
    setHeaderMap(parser.getHeaderMap());
    Vector<String> possibleIDHeaders = new Vector<String>();
    Vector<String> possibleUtteranceHeaders = new Vector<String>();
    Vector<String> possibleParticipantHeaders = new Vector<String>();
    for (String header : getHeaderMap().keySet()) {
        if (header.trim().length() == 0)
            continue;
        Vector<String> possibleMatches = new Vector<String>();
        possibleMatches.add("transcript" + header);
        possibleMatches.add("participant" + header);
        possibleMatches.add("speaker" + header);
        possibleMatches.add(header);

        // special cases
        if (header.equalsIgnoreCase("id") || header.equalsIgnoreCase("transcript")) {
            possibleIDHeaders.add(header);
        } else if (header.equalsIgnoreCase("text") || header.equalsIgnoreCase("document")) {
            possibleUtteranceHeaders.add(header);
        } else if (header.equalsIgnoreCase("name") || header.equalsIgnoreCase("participant")
                || header.equalsIgnoreCase("participantid")) {
            possibleParticipantHeaders.add(header);
        }

        layerToPossibilities.put(new Parameter("header_" + getHeaderMap().get(header), Layer.class, header),
                possibleMatches);
        layerToCandidates.put("header_" + getHeaderMap().get(header), metadataLayers);
    } // next header

    ParameterSet parameters = new ParameterSet();

    // add utterance/participant parameters
    int defaultUtterancePossibilityIndex = 0;

    // if there are no obvious participant column possibilities...      
    Parameter idColumn = new Parameter("id", String.class, "ID Column", "Column containing the ID of the text.",
            false);
    if (possibleIDHeaders.size() == 0) { // ...include all columns
        possibleIDHeaders.addAll(getHeaderMap().keySet());
    } else {
        idColumn.setValue(possibleIDHeaders.firstElement());
    }
    idColumn.setPossibleValues(possibleIDHeaders);
    parameters.addParameter(idColumn);

    // if there are no obvious participant column possibilities...      
    if (possibleParticipantHeaders.size() == 0) { // ...include all columns
        possibleParticipantHeaders.addAll(getHeaderMap().keySet());
        // default participant column will be the first column,
        // so default utterance should be the second (if we didn't find obvious possible text column)
        if (possibleParticipantHeaders.size() > 1) // but only if there's more than one column
        {
            defaultUtterancePossibilityIndex = 1;
        }
    }
    Parameter participantColumn = new Parameter("who", "Participant Column",
            "Column containing the ID of the author of the text.", true,
            possibleParticipantHeaders.firstElement());
    participantColumn.setPossibleValues(possibleParticipantHeaders);
    parameters.addParameter(participantColumn);

    // if there are no obvious text column possibilities...
    if (possibleUtteranceHeaders.size() == 0) { // ...include all columns
        possibleUtteranceHeaders.addAll(getHeaderMap().keySet());
    } else {
        // we found a possible text column, so run with it regardless of whether we also found
        // a possible participant column
        defaultUtterancePossibilityIndex = 0;
    }
    Parameter utteranceColumn = new Parameter("text", "Text Column", "Column containing the transcript text.",
            true, possibleUtteranceHeaders.elementAt(defaultUtterancePossibilityIndex));
    utteranceColumn.setPossibleValues(possibleUtteranceHeaders);
    parameters.addParameter(utteranceColumn);

    // add column-mapping parameters, and set possibile/default values
    for (Parameter p : layerToPossibilities.keySet()) {
        List<String> possibleNames = layerToPossibilities.get(p);
        LinkedHashMap<String, Layer> candidateLayers = layerToCandidates.get(p.getName());
        parameters.addParameter(p);
        if (p.getValue() == null && candidateLayers != null && possibleNames != null) {
            p.setValue(Utility.FindLayerById(candidateLayers, possibleNames));
        }
        if (p.getPossibleValues() == null && candidateLayers != null) {
            p.setPossibleValues(candidateLayers.values());
        }
    }
    return parameters;
}

From source file:gate.util.reporting.PRTimeReporter.java

/**
 * Sorts LinkedHashMap by its values(natural descending order). keeps the
 * duplicates as it is.//  ww w .  ja va 2  s  .com
 *
 * @param passedMap
 *          An Object of type LinkedHashMap to be sorted by its values.
 *
 * @return An Object containing the sorted LinkedHashMap.
 */
private LinkedHashMap<String, String> sortHashMapByValues(LinkedHashMap<String, String> passedMap) {
    List<String> mapKeys = new ArrayList<String>(passedMap.keySet());
    List<String> mapValues = new ArrayList<String>(passedMap.values());

    Collections.sort(mapValues, new ValueComparator());
    Collections.sort(mapKeys);
    Collections.reverse(mapValues);
    LinkedHashMap<String, String> sortedMap = new LinkedHashMap<String, String>();

    Iterator<String> valueIt = mapValues.iterator();
    while (valueIt.hasNext()) {
        String val = valueIt.next();
        Iterator<String> keyIt = mapKeys.iterator();
        while (keyIt.hasNext()) {
            String key = keyIt.next();
            String comp1 = passedMap.get(key).toString();
            String comp2 = val.toString();

            if (comp1.equals(comp2)) {
                passedMap.remove(key);
                mapKeys.remove(key);
                sortedMap.put(key, val);
                break;
            }
        }
    }
    return sortedMap;
}

From source file:com.opengamma.analytics.financial.interestrate.MultipleYieldCurveFinderDataBundle.java

/**
 * Create a MultipleYieldCurveFinderDataBundle where the number of nodes and the list of curve names correspond to all the curves (known curves and curves still to be calibrated).
 * This constructor is used to compute the extended Jacobian matrix when curves are calibrated in several blocks.
 * @param derivatives The list of instruments used in the calibration.
 * @param marketValues The market value of the instruments.
 * @param knownCurves The curves already calibrated.
 * @param unknownCurveNodePoints The node points of the new curves to calibrate.
 * @param unknownCurveInterpolators The interpolators of the new curves to calibrate.
 * @param useFiniteDifferenceByDefault Flag for using the finite difference computation of the Jacobian.
 * @param fxMatrix The FX Matrix with the required exchange rates.
 * @return The data bundle./*w  ww .  j av  a2s.c  om*/
 */
public static MultipleYieldCurveFinderDataBundle withAllCurves(final List<InstrumentDerivative> derivatives,
        final double[] marketValues, final YieldCurveBundle knownCurves,
        final LinkedHashMap<String, double[]> unknownCurveNodePoints,
        final LinkedHashMap<String, Interpolator1D> unknownCurveInterpolators,
        final boolean useFiniteDifferenceByDefault, final FXMatrix fxMatrix) {
    // Argument checker: start
    ArgumentChecker.notNull(derivatives, "derivatives");
    ArgumentChecker.noNulls(derivatives, "derivatives");
    ArgumentChecker.notNull(marketValues, "market values null");
    ArgumentChecker.notNull(unknownCurveNodePoints, "unknown curve node points");
    ArgumentChecker.notNull(unknownCurveInterpolators, "unknown curve interpolators");
    ArgumentChecker.notEmpty(unknownCurveNodePoints, "unknown curve node points");
    ArgumentChecker.notEmpty(unknownCurveInterpolators, "unknown curve interpolators");
    ArgumentChecker.isTrue(derivatives.size() == marketValues.length,
            "marketValues wrong length; must be one par rate per derivative (have {} values for {} derivatives",
            marketValues.length, derivatives.size());
    ArgumentChecker.notNull(fxMatrix, "FX matrix");
    if (knownCurves != null) {
        for (final String name : knownCurves.getAllNames()) {
            if (unknownCurveInterpolators.containsKey(name)) {
                throw new IllegalArgumentException("Curve name in known set matches one to be solved for");
            }
        }
    }
    if (unknownCurveNodePoints.size() != unknownCurveInterpolators.size()) {
        throw new IllegalArgumentException("Number of unknown curves not the same as curve interpolators");
    }
    // Argument checker: end
    int nbNodes = 0;
    if (knownCurves != null) {
        for (final String name : knownCurves.getAllNames()) {
            nbNodes += knownCurves.getCurve(name).getNumberOfParameters();
        }
    }
    for (final double[] nodes : unknownCurveNodePoints.values()) { // Nodes from new curves
        nbNodes += nodes.length;
    }
    final List<String> names = new ArrayList<>();
    if (knownCurves != null) {
        names.addAll(knownCurves.getAllNames()); // Names from existing curves
    }
    final Iterator<Entry<String, double[]>> nodePointsIterator = unknownCurveNodePoints.entrySet().iterator();
    final Iterator<Entry<String, Interpolator1D>> unknownCurvesIterator = unknownCurveInterpolators.entrySet()
            .iterator();
    while (nodePointsIterator.hasNext()) { // Names from new curves
        final Entry<String, double[]> entry1 = nodePointsIterator.next();
        final Entry<String, Interpolator1D> entry2 = unknownCurvesIterator.next();
        final String name1 = entry1.getKey();
        if (!name1.equals(entry2.getKey())) {
            throw new IllegalArgumentException("Names must be the same");
        }
        ArgumentChecker.notNull(entry1.getValue(), "curve node points for " + name1);
        ArgumentChecker.notNull(entry2.getValue(), "interpolator for " + name1);
        names.add(name1);
    }
    return new MultipleYieldCurveFinderDataBundle(derivatives, marketValues, knownCurves,
            unknownCurveNodePoints, unknownCurveInterpolators, useFiniteDifferenceByDefault, fxMatrix, nbNodes,
            names);
}

From source file:com.codesourcery.internal.installer.InstallManager.java

/**
 * Returns the wizard pages from all install modules.  This method ensures
 * that wizard pages with the same name are not returned.
 * /*from   w  w w  .j a  va  2s  .  c o  m*/
 * @return Wizard pages
 */
protected IInstallWizardPage[] getModulePages() {
    // Filter duplicated named pages, maintain order
    LinkedHashMap<String, IInstallWizardPage> pages = new LinkedHashMap<String, IInstallWizardPage>();
    for (IInstallModule module : getModules()) {
        IInstallWizardPage[] modulePages = module.getInstallPages(getInstallMode());
        if (modulePages != null) {
            for (IInstallWizardPage modulePage : modulePages) {
                pages.put(modulePage.getName(), modulePage);
            }
        }
    }
    return pages.values().toArray(new IInstallWizardPage[pages.size()]);
}