Example usage for java.util Map getOrDefault

List of usage examples for java.util Map getOrDefault

Introduction

In this page you can find the example usage for java.util Map getOrDefault.

Prototype

default V getOrDefault(Object key, V defaultValue) 

Source Link

Document

Returns the value to which the specified key is mapped, or defaultValue if this map contains no mapping for the key.

Usage

From source file:org.apache.nifi.lookup.RestLookupService.java

@Override
public Optional<Record> lookup(Map<String, Object> coordinates, Map<String, String> context)
        throws LookupFailureException {
    final String endpoint = determineEndpoint(coordinates);
    final String mimeType = (String) coordinates.get(MIME_TYPE_KEY);
    final String method = ((String) coordinates.getOrDefault(METHOD_KEY, "get")).trim().toLowerCase();
    final String body = (String) coordinates.get(BODY_KEY);

    validateVerb(method);/*  w w  w .j a  v  a 2 s.  c  om*/

    if (StringUtils.isBlank(body)) {
        if (method.equals("post") || method.equals("put")) {
            throw new LookupFailureException(String.format(
                    "Used HTTP verb %s without specifying the %s key to provide a payload.", method, BODY_KEY));
        }
    } else {
        if (StringUtils.isBlank(mimeType)) {
            throw new LookupFailureException(
                    String.format("Request body is specified without its %s.", MIME_TYPE_KEY));
        }
    }

    Request request = buildRequest(mimeType, method, body, endpoint);
    try {
        Response response = executeRequest(request);

        if (getLogger().isDebugEnabled()) {
            getLogger().debug("Response code {} was returned for coordinate {}",
                    new Object[] { response.code(), coordinates });
        }

        final ResponseBody responseBody = response.body();
        if (responseBody == null) {
            return Optional.empty();
        }

        InputStream is = responseBody.byteStream();
        Record record = handleResponse(is, context);

        return Optional.ofNullable(record);
    } catch (Exception e) {
        getLogger().error("Could not execute lookup.", e);
        throw new LookupFailureException(e);
    }
}

From source file:org.apache.gobblin.compaction.verify.CompactionAuditCountVerifier.java

/**
 * Compare record count between {@link CompactionAuditCountVerifier#gobblinTier} and {@link CompactionAuditCountVerifier#referenceTiers}.
 * @param datasetName the name of dataset
 * @param countsByTier the tier-to-count mapping retrieved by {@link AuditCountClient#fetch(String, long, long)}
 * @param referenceTier the tiers we wants to compare against
 * @return If any of (gobblin/refenence) >= threshold, return true, else return false
 *//*from ww w  .  j  a  v  a 2s  .co  m*/
private Result passed(String datasetName, Map<String, Long> countsByTier, String referenceTier) {
    if (!countsByTier.containsKey(this.gobblinTier)) {
        log.info("Missing entry for dataset: " + datasetName + " in gobblin tier: " + this.gobblinTier
                + "; setting count to 0.");
    }
    if (!countsByTier.containsKey(referenceTier)) {
        log.info("Missing entry for dataset: " + datasetName + " in reference tier: " + referenceTier
                + "; setting count to 0.");
    }

    long refCount = countsByTier.getOrDefault(referenceTier, 0L);
    long gobblinCount = countsByTier.getOrDefault(this.gobblinTier, 0L);

    if (refCount == 0) {
        return new Result(true, "");
    }

    if ((double) gobblinCount / (double) refCount < this.threshold) {
        return new Result(false,
                String.format("%s failed for %s : gobblin count = %d, %s count = %d (%f < threshold %f)",
                        this.getName(), datasetName, gobblinCount, referenceTier, refCount,
                        (double) gobblinCount / (double) refCount, this.threshold));
    }
    return new Result(true, "");
}

From source file:com.netflix.spinnaker.halyard.deploy.spinnaker.v1.service.distributed.kubernetes.v2.KubernetesV2Service.java

default String buildContainer(String name, AccountDeploymentDetails<KubernetesAccount> details,
        ServiceSettings settings, List<ConfigSource> configSources, Map<String, String> env) {
    List<String> volumeMounts = configSources.stream().map(c -> {
        TemplatedResource volume = new JinjaJarResource("/kubernetes/manifests/volumeMount.yml");
        volume.addBinding("name", c.getId());
        volume.addBinding("mountPath", c.getMountPath());
        return volume.toString();
    }).collect(Collectors.toList());

    volumeMounts.addAll(settings.getKubernetes().getVolumes().stream().map(c -> {
        TemplatedResource volume = new JinjaJarResource("/kubernetes/manifests/volumeMount.yml");
        volume.addBinding("name", c.getId());
        volume.addBinding("mountPath", c.getMountPath());
        return volume.toString();
    }).collect(Collectors.toList()));

    TemplatedResource probe;/*  w  w  w  .  j a v  a 2 s .c  om*/
    if (StringUtils.isNotEmpty(settings.getHealthEndpoint())) {
        probe = new JinjaJarResource("/kubernetes/manifests/execReadinessProbe.yml");
        probe.addBinding("command", getReadinessExecCommand(settings));
    } else {
        probe = new JinjaJarResource("/kubernetes/manifests/tcpSocketReadinessProbe.yml");
        probe.addBinding("port", settings.getPort());
    }

    String lifecycle = "{}";
    List<String> preStopCommand = getPreStopCommand(settings);
    if (!preStopCommand.isEmpty()) {
        TemplatedResource lifecycleResource = new JinjaJarResource("/kubernetes/manifests/lifecycle.yml");
        lifecycleResource.addBinding("command", getPreStopCommand(settings));
        lifecycle = lifecycleResource.toString();
    }

    CustomSizing customSizing = details.getDeploymentConfiguration().getDeploymentEnvironment()
            .getCustomSizing();
    TemplatedResource resources = new JinjaJarResource("/kubernetes/manifests/resources.yml");
    if (customSizing != null) {
        // Look for container specific sizing otherwise fall back to service sizing
        Map componentSizing = customSizing.getOrDefault(name,
                customSizing.getOrDefault(getService().getServiceName(), new HashMap()));
        resources.addBinding("requests", componentSizing.getOrDefault("requests", new HashMap()));
        resources.addBinding("limits", componentSizing.getOrDefault("limits", new HashMap()));
    }

    TemplatedResource container = new JinjaJarResource("/kubernetes/manifests/container.yml");
    container.addBinding("name", name);
    container.addBinding("imageId", settings.getArtifactId());
    TemplatedResource port = new JinjaJarResource("/kubernetes/manifests/port.yml");
    port.addBinding("port", settings.getPort());
    container.addBinding("port", port.toString());
    container.addBinding("volumeMounts", volumeMounts);
    container.addBinding("probe", probe.toString());
    container.addBinding("lifecycle", lifecycle);
    container.addBinding("env", env);
    container.addBinding("resources", resources.toString());

    return container.toString();
}

From source file:org.apache.storm.loadgen.CaptureLoad.java

static TopologyLoadConf captureTopology(Nimbus.Iface client, TopologySummary topologySummary) throws Exception {
    String topologyName = topologySummary.get_name();
    LOG.info("Capturing {}...", topologyName);
    String topologyId = topologySummary.get_id();
    TopologyInfo info = client.getTopologyInfo(topologyId);
    TopologyPageInfo tpinfo = client.getTopologyPageInfo(topologyId, ":all-time", false);
    @SuppressWarnings("checkstyle:VariableDeclarationUsageDistance")
    StormTopology topo = client.getUserTopology(topologyId);
    //Done capturing topology information...

    Map<String, Object> savedTopoConf = new HashMap<>();
    Map<String, Object> topoConf = (Map<String, Object>) JSONValue.parse(client.getTopologyConf(topologyId));
    for (String key : TopologyLoadConf.IMPORTANT_CONF_KEYS) {
        Object o = topoConf.get(key);
        if (o != null) {
            savedTopoConf.put(key, o);/*  w w w . ja  va 2 s  .  c  om*/
            LOG.info("with config {}: {}", key, o);
        }
    }
    //Lets use the number of actually scheduled workers as a way to bridge RAS and non-RAS
    int numWorkers = tpinfo.get_num_workers();
    if (savedTopoConf.containsKey(Config.TOPOLOGY_WORKERS)) {
        numWorkers = Math.max(numWorkers, ((Number) savedTopoConf.get(Config.TOPOLOGY_WORKERS)).intValue());
    }
    savedTopoConf.put(Config.TOPOLOGY_WORKERS, numWorkers);

    Map<String, LoadCompConf.Builder> boltBuilders = new HashMap<>();
    Map<String, LoadCompConf.Builder> spoutBuilders = new HashMap<>();
    List<InputStream.Builder> inputStreams = new ArrayList<>();
    Map<GlobalStreamId, OutputStream.Builder> outStreams = new HashMap<>();

    //Bolts
    if (topo.get_bolts() != null) {
        for (Map.Entry<String, Bolt> boltSpec : topo.get_bolts().entrySet()) {
            String boltComp = boltSpec.getKey();
            LOG.info("Found bolt {}...", boltComp);
            Bolt bolt = boltSpec.getValue();
            ComponentCommon common = bolt.get_common();
            Map<GlobalStreamId, Grouping> inputs = common.get_inputs();
            if (inputs != null) {
                for (Map.Entry<GlobalStreamId, Grouping> input : inputs.entrySet()) {
                    GlobalStreamId id = input.getKey();
                    LOG.info("with input {}...", id);
                    Grouping grouping = input.getValue();
                    InputStream.Builder builder = new InputStream.Builder().withId(id.get_streamId())
                            .withFromComponent(id.get_componentId()).withToComponent(boltComp)
                            .withGroupingType(grouping);
                    inputStreams.add(builder);
                }
            }
            Map<String, StreamInfo> outputs = common.get_streams();
            if (outputs != null) {
                for (String name : outputs.keySet()) {
                    GlobalStreamId id = new GlobalStreamId(boltComp, name);
                    LOG.info("and output {}...", id);
                    OutputStream.Builder builder = new OutputStream.Builder().withId(name);
                    outStreams.put(id, builder);
                }
            }
            LoadCompConf.Builder builder = new LoadCompConf.Builder()
                    .withParallelism(common.get_parallelism_hint()).withId(boltComp);
            boltBuilders.put(boltComp, builder);
        }

        Map<String, Map<String, Double>> boltResources = getBoltsResources(topo, topoConf);
        for (Map.Entry<String, Map<String, Double>> entry : boltResources.entrySet()) {
            LoadCompConf.Builder bd = boltBuilders.get(entry.getKey());
            if (bd != null) {
                Map<String, Double> resources = entry.getValue();
                Double cpu = resources.get(Config.TOPOLOGY_COMPONENT_CPU_PCORE_PERCENT);
                if (cpu != null) {
                    bd.withCpuLoad(cpu);
                }
                Double mem = resources.get(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB);
                if (mem != null) {
                    bd.withMemoryLoad(mem);
                }
            }
        }
    }

    //Spouts
    if (topo.get_spouts() != null) {
        for (Map.Entry<String, SpoutSpec> spoutSpec : topo.get_spouts().entrySet()) {
            String spoutComp = spoutSpec.getKey();
            LOG.info("Found Spout {}...", spoutComp);
            SpoutSpec spout = spoutSpec.getValue();
            ComponentCommon common = spout.get_common();

            Map<String, StreamInfo> outputs = common.get_streams();
            if (outputs != null) {
                for (String name : outputs.keySet()) {
                    GlobalStreamId id = new GlobalStreamId(spoutComp, name);
                    LOG.info("with output {}...", id);
                    OutputStream.Builder builder = new OutputStream.Builder().withId(name);
                    outStreams.put(id, builder);
                }
            }
            LoadCompConf.Builder builder = new LoadCompConf.Builder()
                    .withParallelism(common.get_parallelism_hint()).withId(spoutComp);
            spoutBuilders.put(spoutComp, builder);
        }

        Map<String, Map<String, Double>> spoutResources = getSpoutsResources(topo, topoConf);
        for (Map.Entry<String, Map<String, Double>> entry : spoutResources.entrySet()) {
            LoadCompConf.Builder sd = spoutBuilders.get(entry.getKey());
            if (sd != null) {
                Map<String, Double> resources = entry.getValue();
                Double cpu = resources.get(Config.TOPOLOGY_COMPONENT_CPU_PCORE_PERCENT);
                if (cpu != null) {
                    sd.withCpuLoad(cpu);
                }
                Double mem = resources.get(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB);
                if (mem != null) {
                    sd.withMemoryLoad(mem);
                }
            }
        }
    }

    //Stats...
    Map<String, List<ExecutorSummary>> byComponent = new HashMap<>();
    for (ExecutorSummary executor : info.get_executors()) {
        String component = executor.get_component_id();
        List<ExecutorSummary> list = byComponent.get(component);
        if (list == null) {
            list = new ArrayList<>();
            byComponent.put(component, list);
        }
        list.add(executor);
    }

    List<InputStream> streams = new ArrayList<>(inputStreams.size());
    //Compute the stats for the different input streams
    for (InputStream.Builder builder : inputStreams) {
        GlobalStreamId streamId = new GlobalStreamId(builder.getFromComponent(), builder.getId());
        List<ExecutorSummary> summaries = byComponent.get(builder.getToComponent());
        //Execute and process latency...
        builder.withProcessTime(
                new NormalDistStats(extractBoltValues(summaries, streamId, BoltStats::get_process_ms_avg)));
        builder.withExecTime(
                new NormalDistStats(extractBoltValues(summaries, streamId, BoltStats::get_execute_ms_avg)));
        //InputStream is done
        streams.add(builder.build());
    }

    //There is a bug in some versions that returns 0 for the uptime.
    // To work around it we should get it an alternative (working) way.
    Map<String, Integer> workerToUptime = new HashMap<>();
    for (WorkerSummary ws : tpinfo.get_workers()) {
        workerToUptime.put(ws.get_supervisor_id() + ":" + ws.get_port(), ws.get_uptime_secs());
    }
    LOG.debug("WORKER TO UPTIME {}", workerToUptime);

    for (Map.Entry<GlobalStreamId, OutputStream.Builder> entry : outStreams.entrySet()) {
        OutputStream.Builder builder = entry.getValue();
        GlobalStreamId id = entry.getKey();
        List<Double> emittedRate = new ArrayList<>();
        List<ExecutorSummary> summaries = byComponent.get(id.get_componentId());
        if (summaries != null) {
            for (ExecutorSummary summary : summaries) {
                if (summary.is_set_stats()) {
                    int uptime = summary.get_uptime_secs();
                    LOG.debug("UPTIME {}", uptime);
                    if (uptime <= 0) {
                        //Likely it is because of a bug, so try to get it another way
                        String key = summary.get_host() + ":" + summary.get_port();
                        uptime = workerToUptime.getOrDefault(key, 1);
                        LOG.debug("Getting uptime for worker {}, {}", key, uptime);
                    }
                    for (Map.Entry<String, Map<String, Long>> statEntry : summary.get_stats().get_emitted()
                            .entrySet()) {
                        String timeWindow = statEntry.getKey();
                        long timeSecs = uptime;
                        try {
                            timeSecs = Long.valueOf(timeWindow);
                        } catch (NumberFormatException e) {
                            //Ignored...
                        }
                        timeSecs = Math.min(timeSecs, uptime);
                        Long count = statEntry.getValue().get(id.get_streamId());
                        if (count != null) {
                            LOG.debug("{} emitted {} for {} secs or {} tuples/sec", id, count, timeSecs,
                                    count.doubleValue() / timeSecs);
                            emittedRate.add(count.doubleValue() / timeSecs);
                        }
                    }
                }
            }
        }
        builder.withRate(new NormalDistStats(emittedRate));

        //The OutputStream is done
        LoadCompConf.Builder comp = boltBuilders.get(id.get_componentId());
        if (comp == null) {
            comp = spoutBuilders.get(id.get_componentId());
        }
        comp.withStream(builder.build());
    }

    List<LoadCompConf> spouts = spoutBuilders.values().stream().map((b) -> b.build())
            .collect(Collectors.toList());

    List<LoadCompConf> bolts = boltBuilders.values().stream().map((b) -> b.build())
            .collect(Collectors.toList());

    return new TopologyLoadConf(topologyName, savedTopoConf, spouts, bolts, streams);
}

From source file:io.anserini.rerank.lib.AxiomReranker.java

/**
 * Calculate the scores (weights) of each term that occured in the reranking pool.
 * The Process:/*from  w w w. j  a  v a  2  s  .  c  o m*/
 * 1. For each query term, calculate its score for each term in the reranking pool. the score
 * is calcuated as
 * <pre>
 * P(both occurs)*log{P(both occurs)/P(t1 occurs)/P(t2 occurs)}
 * + P(both not occurs)*log{P(both not occurs)/P(t1 not occurs)/P(t2 not occurs)}
 * + P(t1 occurs t2 not occurs)*log{P(t1 occurs t2 not occurs)/P(t1 occurs)/P(t2 not occurs)}
 * + P(t1 not occurs t2 occurs)*log{P(t1 not occurs t2 occurs)/P(t1 not occurs)/P(t2 occurs)}
 * </pre>
 * 2. For each query term the scores of every other term in the reranking pool are stored in a
 * PriorityQueue, only the top {@code K} are kept.
 * 3. Add the scores of the same term together and pick the top {@code M} ones.
 *
 * @param termInvertedList A Map of <term -> Set<docId>> where the Set of docIds is where the term occurs
 * @param context An instance of RerankerContext
 * @return Map<String, Double> Top terms and their weight scores in a HashMap
 */
private Map<String, Double> computeTermScore(Map<String, Set<Integer>> termInvertedList,
        RerankerContext<T> context) throws IOException {
    class ScoreComparator implements Comparator<Pair<String, Double>> {
        public int compare(Pair<String, Double> a, Pair<String, Double> b) {
            int cmp = Double.compare(b.getRight(), a.getRight());
            if (cmp == 0) {
                return a.getLeft().compareToIgnoreCase(b.getLeft());
            } else {
                return cmp;
            }
        }
    }

    // get collection statistics so that we can get idf later on.
    IndexReader reader;
    if (this.externalIndexPath != null) {
        Path indexPath = Paths.get(this.externalIndexPath);
        if (!Files.exists(indexPath) || !Files.isDirectory(indexPath) || !Files.isReadable(indexPath)) {
            throw new IllegalArgumentException(
                    this.externalIndexPath + " does not exist or is not a directory.");
        }
        reader = DirectoryReader.open(FSDirectory.open(indexPath));
    } else {
        IndexSearcher searcher = context.getIndexSearcher();
        reader = searcher.getIndexReader();
    }
    final long docCount = reader.numDocs() == -1 ? reader.maxDoc() : reader.numDocs();

    //calculate the Mutual Information between term with each query term
    List<String> queryTerms = context.getQueryTokens();
    Map<String, Integer> queryTermsCounts = new HashMap<>();
    for (String qt : queryTerms) {
        queryTermsCounts.put(qt, queryTermsCounts.getOrDefault(qt, 0) + 1);
    }

    Set<Integer> allDocIds = new HashSet<>();
    for (Set<Integer> s : termInvertedList.values()) {
        allDocIds.addAll(s);
    }
    int docIdsCount = allDocIds.size();

    // Each priority queue corresponds to a query term: The p-queue itself stores all terms
    // in the reranking pool and their reranking scores to the query term.
    List<PriorityQueue<Pair<String, Double>>> allTermScoresPQ = new ArrayList<>();
    for (Map.Entry<String, Integer> q : queryTermsCounts.entrySet()) {
        String queryTerm = q.getKey();
        long df = reader.docFreq(new Term(LuceneDocumentGenerator.FIELD_BODY, queryTerm));
        if (df == 0L) {
            continue;
        }
        float idf = (float) Math.log((1 + docCount) / df);
        int qtf = q.getValue();
        if (termInvertedList.containsKey(queryTerm)) {
            PriorityQueue<Pair<String, Double>> termScorePQ = new PriorityQueue<>(new ScoreComparator());
            double selfMI = computeMutualInformation(termInvertedList.get(queryTerm),
                    termInvertedList.get(queryTerm), docIdsCount);
            for (Map.Entry<String, Set<Integer>> termEntry : termInvertedList.entrySet()) {
                double score;
                if (termEntry.getKey().equals(queryTerm)) { // The mutual information to itself will always be 1
                    score = idf * qtf;
                } else {
                    double crossMI = computeMutualInformation(termInvertedList.get(queryTerm),
                            termEntry.getValue(), docIdsCount);
                    score = idf * beta * qtf * crossMI / selfMI;
                }
                termScorePQ.add(Pair.of(termEntry.getKey(), score));
            }
            allTermScoresPQ.add(termScorePQ);
        }
    }

    Map<String, Double> aggTermScores = new HashMap<>();
    for (PriorityQueue<Pair<String, Double>> termScores : allTermScoresPQ) {
        for (int i = 0; i < Math.min(termScores.size(), this.K); i++) {
            Pair<String, Double> termScore = termScores.poll();
            String term = termScore.getLeft();
            Double score = termScore.getRight();
            if (score - 0.0 > 1e-8) {
                aggTermScores.put(term, aggTermScores.getOrDefault(term, 0.0) + score);
            }
        }
    }
    PriorityQueue<Pair<String, Double>> termScoresPQ = new PriorityQueue<>(new ScoreComparator());
    for (Map.Entry<String, Double> termScore : aggTermScores.entrySet()) {
        termScoresPQ.add(Pair.of(termScore.getKey(), termScore.getValue() / queryTerms.size()));
    }
    Map<String, Double> resultTermScores = new HashMap<>();
    for (int i = 0; i < Math.min(termScoresPQ.size(), this.M); i++) {
        Pair<String, Double> termScore = termScoresPQ.poll();
        String term = termScore.getKey();
        double score = termScore.getValue();
        resultTermScores.put(term, score);
    }

    return resultTermScores;
}

From source file:com.linkedin.pinot.core.indexsegment.mutable.MutableSegmentImplAggregateMetricsTest.java

@Test
public void testAggregateMetrics() {
    String[] stringValues = new String[10];
    for (int i = 0; i < stringValues.length; i++) {
        stringValues[i] = RandomStringUtils.random(10);
    }//  w  w  w .  ja v a 2 s.  co  m

    Map<String, Long> expectedValues = new HashMap<>();
    Random random = new Random();
    for (int i = 0; i < NUM_ROWS; i++) {
        GenericRow row = new GenericRow();
        row.putField(DIMENSION_1, random.nextInt(10));
        row.putField(DIMENSION_2, stringValues[random.nextInt(stringValues.length)]);
        // Generate random int to prevent overflow
        long metricValue = random.nextInt();
        row.putField(METRIC, metricValue);

        _mutableSegmentImpl.index(row);

        // Update expected values
        String key = buildKey(row);
        expectedValues.put(key, expectedValues.getOrDefault(key, 0L) + metricValue);
    }

    int numDocsIndexed = _mutableSegmentImpl.getNumDocsIndexed();
    Assert.assertEquals(numDocsIndexed, expectedValues.size());

    GenericRow reuse = new GenericRow();
    for (int docId = 0; docId < numDocsIndexed; docId++) {
        GenericRow row = _mutableSegmentImpl.getRecord(docId, reuse);
        String key = buildKey(row);
        Assert.assertEquals(row.getValue(METRIC), expectedValues.get(key));
    }
}

From source file:com.epam.dlab.backendapi.service.impl.LibraryServiceImpl.java

private void populateModel(String exploratoryName, Document document, Map<LibKey, List<LibraryStatus>> model,
        String resourceType) {//  w w w. j  a  v a 2s .  co m
    String name = document.getString(ExploratoryLibDAO.LIB_NAME);
    String version = document.getString(ExploratoryLibDAO.LIB_VERSION);
    String group = document.getString(ExploratoryLibDAO.LIB_GROUP);
    String status = document.getString(ExploratoryLibDAO.STATUS);
    String error = document.getString(ExploratoryLibDAO.ERROR_MESSAGE);

    LibKey libKey = new LibKey(name, version, group);
    List<LibraryStatus> statuses = model.getOrDefault(libKey, new ArrayList<>());

    if (statuses.isEmpty()) {
        model.put(libKey, statuses);
    }

    statuses.add(new LibraryStatus(exploratoryName, resourceType, status, error));
}

From source file:org.jpos.qi.eeuser.ConsumersView.java

public void saveEntity() {
    // TODO: BBB maybe the logic of creating a consumer and its secret should be
    // abstracted away inside ConsumerManager and not in UI code?
    Consumer c = getInstance();//from   www .  j  a  v  a2 s .  c o m
    c.setUser(this.selectedUser);
    Map<String, String> smap = new HashMap<>();
    try {
        smap.put("S", Base64.toBase64String(generateKey().getEncoded()));
        SecureData sd = getCryptoService().aesEncrypt(Serializer.serialize(smap));
        c.setKid(sd.getId().toString());
        c.setSecureData(sd.getEncoded());
    } catch (Exception e) {
        getApp().getLog().error(e);
    }

    getApp().addWindow(new ConfirmDialog(getApp().getMessage("secretTitle"),
            getApp().getMessage("secretDescription", smap.getOrDefault("S", "?")),
            getApp().getMessage("secretConfirm"), getApp().getMessage("cancel"), confirm -> {
                if (confirm) {
                    super.saveEntity();
                }
            }));
}

From source file:org.silverpeas.components.formsonline.control.FormsOnlineSessionController.java

public ExportSummary export() throws FormsOnlineDatabaseException, SilverpeasException {
    List<StringBuilder> csvRows = new ArrayList<>();
    StringBuilder csvHeader = new StringBuilder();

    // adding columns relative to request metadata
    List<String> csvCols = new ArrayList<>();
    csvCols.add("id");
    addCSVValue(csvHeader, "Id");
    csvCols.add("status");
    addCSVValue(csvHeader, getString("GML.status"));
    csvCols.add("creationDate");
    addCSVValue(csvHeader, getString("formsOnline.sendDate"));
    csvCols.add("requester");
    addCSVValue(csvHeader, getString("formsOnline.sender"));
    csvCols.add("processDate");
    addCSVValue(csvHeader, getString("formsOnline.request.process.date"));
    csvCols.add("validator");
    addCSVValue(csvHeader, getString("formsOnline.request.process.user"));
    csvCols.add("comment");
    addCSVValue(csvHeader, getString("GML.comments"));

    int nbMetaDataCols = csvCols.size();

    // adding columns relative to request content
    RecordSet recordSet;/*from   w  w  w  . ja  v a  2s.c  om*/
    FieldTemplate[] fields;
    try {
        PublicationTemplate template = getCurrentPublicationTemplate(false);
        recordSet = template.getRecordSet();
        fields = template.getRecordTemplate().getFieldTemplates();
    } catch (Exception e) {
        throw new SilverpeasException("Can't load form '" + getCurrentForm().getXmlFormName() + "'", e);
    }
    for (FieldTemplate field : fields) {
        csvCols.add(field.getFieldName());
        addCSVValue(csvHeader, field.getLabel(getLanguage()));
    }
    csvRows.add(csvHeader);

    // getting rows
    RequestsByStatus requestsByStatus = getAllValidatorRequests();
    List<FormInstance> requests = requestsByStatus.getAll();
    for (FormInstance request : requests) {
        StringBuilder csvRow = new StringBuilder();

        addCSVValue(csvRow, request.getId());
        addCSVValue(csvRow, statusLabels.get(request.getState()));
        addCSVValue(csvRow, DateUtil.getOutputDate(request.getCreationDate(), getLanguage()));
        User creator = request.getCreator();
        addCSVValue(csvRow, creator.getLastName() + " " + creator.getFirstName());
        addCSVValue(csvRow, DateUtil.getOutputDate(request.getValidationDate(), getLanguage()));
        User validator = request.getValidator();
        if (validator != null) {
            addCSVValue(csvRow, validator.getLastName() + " " + validator.getFirstName());
        } else {
            addCSVValue(csvRow, "");
        }
        addCSVValue(csvRow, request.getComments());

        DataRecord data = null;
        try {
            data = recordSet.getRecord(request.getId());
        } catch (Exception e) {
            SilverLogger.getLogger(this).error("RequestId = " + request.getId(), e);
        }
        if (data != null) {
            Map<String, String> values = data.getValues(getLanguage());

            for (int i = nbMetaDataCols; i < csvCols.size(); i++) {
                String value = values.getOrDefault(csvCols.get(i), "");
                // removing all HTML
                value = new Source(value).getTextExtractor().toString();
                addCSVValue(csvRow, value);
            }
        }
        csvRows.add(csvRow);
    }

    String exportFileName = writeCSVFile(csvRows);
    return new ExportSummary(exportFileName, csvRows.size() - 1);
}

From source file:org.springframework.cloud.dataflow.server.stream.AppDeployerStreamDeployer.java

@Override
public Map<StreamDefinition, DeploymentState> streamsStates(List<StreamDefinition> streamDefinitions) {
    Map<StreamDefinition, List<String>> deploymentIdsPerStream = streamDefinitions.stream()
            .collect(Collectors.toMap(Function.identity(),
                    sd -> sd.getAppDefinitions().stream().map(
                            sad -> deploymentIdRepository.findOne(DeploymentKey.forStreamAppDefinition(sad)))
                            .collect(Collectors.toList())));

    // Map from app deployment id to DeploymentState
    Map<String, DeploymentState> statePerApp = gatherDeploymentStates(deploymentIdsPerStream.values().stream()
            .flatMap(Collection::stream).filter(Objects::nonNull).toArray(String[]::new));

    // Map from SCDF Stream to aggregate streamsStates
    return deploymentIdsPerStream.entrySet().stream().map(kv -> new AbstractMap.SimpleImmutableEntry<>(
            kv.getKey(),/*w w w . j a  va 2  s .c om*/
            StreamDeployerUtil.aggregateState(kv.getValue().stream()
                    .map(deploymentId -> statePerApp.getOrDefault(deploymentId, DeploymentState.unknown))
                    .collect(Collectors.toSet()))))
            .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
}