Example usage for com.google.common.collect Maps newLinkedHashMap

List of usage examples for com.google.common.collect Maps newLinkedHashMap

Introduction

In this page you can find the example usage for com.google.common.collect Maps newLinkedHashMap.

Prototype

public static <K, V> LinkedHashMap<K, V> newLinkedHashMap() 

Source Link

Document

Creates a mutable, empty, insertion-ordered LinkedHashMap instance.

Usage

From source file:org.apache.brooklyn.util.core.task.AbstractExecutionContext.java

/** @see #submit(Map, Runnable) */
@Override
public <T> Task<T> submit(TaskAdaptable<T> task) {
    return submitInternal(Maps.newLinkedHashMap(), task.asTask());
}

From source file:io.appform.nautilus.funnel.funnel.ESFunnelCalculator.java

@Override
public Funnel calculate(String tenant, Context context, FunnelRequest funnelRequest) throws Exception {

    SearchRequestBuilder request = context.getEsConnection().client()
            .prepareSearch(ESUtils.getAllIndicesForTenant(tenant)).setQuery(ESUtils.query(funnelRequest))
            .setTypes(TypeUtils.typeName(Session.class)).setFetchSource(false).setSize(0)
            .setIndicesOptions(IndicesOptions.lenientExpandOpen()).addAggregation(
                    AggregationBuilders.terms("paths").field(Constants.NORMALIZED_PATH_FIELD_NAME).size(0));
    log.debug("Generated query for filter request: {}", request);
    SearchResponse response = request.execute().actionGet();
    Aggregations aggregations = response.getAggregations();
    Terms terms = aggregations.get("paths");
    Map<String, Long> funnelStages = Maps.newLinkedHashMap();
    funnelRequest.getStates().stream().forEach(stage -> funnelStages.put(stage, 0L));
    Map<String, List<String>> regexes = RegexUtils.separateRegexes(funnelRequest.getStates());
    for (Terms.Bucket buckets : terms.getBuckets()) {
        final String flatPath = buckets.getKey().toString();
        final long count = buckets.getDocCount();
        Set<String> tracker = Sets.newHashSet();
        regexes.entrySet().stream().filter(entry -> flatPath.matches(entry.getKey())).forEach(entry -> {
            final List<String> stage = regexes.get(entry.getKey());
            stage.stream().filter(key -> funnelStages.containsKey(key) && !tracker.contains(key))
                    .forEach(key -> {
                        funnelStages.put(key, funnelStages.get(key) + count);
                        tracker.add(key);
                    });//from w  w w .  j av a 2  s.c o m
        });
    }
    return Funnel.builder().stages(funnelStages).build();
}

From source file:co.jirm.core.util.ObjectMapUtils.java

@SuppressWarnings("unchecked")
public static void pushPath(final Map<String, Object> m, List<String> names, Object value) {
    Map<String, Object> current = m;
    int j = 0;//from  w  w w. ja  v  a 2  s. c  o  m
    for (String n : names) {
        j++;
        if (j == names.size()) {
            current.put(n, value);
            break;
        }
        Object o = current.get(n);
        if (o == null) {
            Map<String, Object> sub = Maps.newLinkedHashMap();
            current.put(n, sub);
            current = sub;
        } else if (o instanceof Map) {
            current = (Map<String, Object>) o;
        } else {
            throw new IllegalArgumentException("Cannot set value to " + names);
        }
    }
}

From source file:org.jfrog.hudson.release.maven.PomTransformer.java

/**
 * Performs the transformation./* w w w . j  a  va 2 s  .  c o  m*/
 *
 * @return True if the file was modified.
 */
public Boolean invoke(File pomFile, VirtualChannel channel) throws IOException, InterruptedException {

    org.jfrog.build.extractor.maven.reader.ModuleName current = new org.jfrog.build.extractor.maven.reader.ModuleName(
            currentModule.groupId, currentModule.artifactId);

    Map<org.jfrog.build.extractor.maven.reader.ModuleName, String> modules = Maps.newLinkedHashMap();
    for (Map.Entry<ModuleName, String> entry : versionsByModule.entrySet()) {
        modules.put(new org.jfrog.build.extractor.maven.reader.ModuleName(entry.getKey().groupId,
                entry.getKey().artifactId), entry.getValue());
    }

    org.jfrog.build.extractor.maven.transformer.PomTransformer transformer = new org.jfrog.build.extractor.maven.transformer.PomTransformer(
            current, modules, scmUrl, failOnSnapshot);

    return transformer.transform(pomFile);
}

From source file:org.openehr.adl.parser.tree.AdlTreeDAdlParser.java

Map<String, Tree> parseAdlMap(Tree tAdlMap) {
    if (tAdlMap.getType() == AdlParser.AST_NULL)
        return Collections.emptyMap();

    assertTokenType(tAdlMap, AdlParser.AST_ADL_MAP);
    Map<String, Tree> properties = Maps.newLinkedHashMap();
    for (Tree tAdlMapEntry : children(tAdlMap)) {
        assertTokenType(tAdlMapEntry, AdlParser.AST_ADL_MAP_ENTRY);

        String name = unescapeString(tAdlMapEntry.getChild(0).getText());
        properties.put(name, tAdlMapEntry.getChild(1));
    }/*from  w w  w . j  a v  a 2 s .  co m*/

    return properties;
}

From source file:org.eclipse.osee.orcs.db.internal.sql.join.JoinCleanerCallable.java

private static <K, V> ListMultimap<K, V> newListMultimap() {
    Map<K, Collection<V>> map = Maps.newLinkedHashMap();
    return Multimaps.newListMultimap(map, new Supplier<List<V>>() {
        @Override/*  w  w w .j a va2 s . co m*/
        public List<V> get() {
            return Lists.newArrayList();
        }
    });
}

From source file:exec.validate_evaluation.stats.UsageToMicroCommitRatioCalculator.java

public void run() throws IOException {

    System.out.println("\nreading all available MicroCommits...");
    for (String zip : io.findZips()) {
        for (MicroCommit mc : io.read(zip)) {
            List<MicroCommit> mcs = allMicroCommits.get(mc.getType());
            if (mcs == null) {
                mcs = Lists.newLinkedList();
                allMicroCommits.put(mc.getType(), mcs);
            }/*from   w  w  w. j  a  v a  2s  . c o m*/
            mcs.add(mc);
        }
    }

    Map<String, Double> usageToHistoryRatio = Maps.newLinkedHashMap();

    int numTypesTotal = 0;
    int numCommitsTotal = 0;
    int numUsagesTotal = 0;

    int numTypesDATEV = 0;
    int numCommitsDATEV = 0;
    int numUsagesDATEV = 0;

    int numTypesWith = 0;
    int numCommitsWith = 0;
    int numUsagesWith = 0;
    int numTypesWithout = 0;
    int numCommitsWithout = 0;
    int numUsagesWithout = 0;

    for (ICoReTypeName t : allMicroCommits.keySet()) {

        List<MicroCommit> commits = allMicroCommits.get(t);
        List<Usage> usages = dirUsages.readAllZips(t, Usage.class);

        int numCommits = commits.size();
        int numUsages = usages.size();
        System.out.printf("%s: %d commits, %d usages\n", t, numCommits, numUsages);

        // if (numUsages > 0 && !isDatev(t)) {
        if (!isDatev(t)) {
            double ratio = (0.000001 + numUsages) / (1.0 * numCommits);
            String key = String.format("%s (%d/%d)", t, numUsages, numCommits);
            usageToHistoryRatio.put(key, ratio);
        }

        numTypesTotal++;
        numCommitsTotal += numCommits;
        numUsagesTotal += numUsages;

        if (numCommits > 0 && numUsages > 0) {
            numTypesWith++;
            numCommitsWith += numCommits;
            numUsagesWith += numUsages;
        } else {
            numTypesWithout++;
            numCommitsWithout += numCommits;
            numUsagesWithout += numUsages;

            if (isDatev(t)) {
                numTypesDATEV++;
                numCommitsDATEV += numCommits;
                numUsagesDATEV += numUsages;
            }
        }
    }

    System.out.printf("\n\nsummary:\n");
    System.out.printf("we have a total of %d commits and %d usages for %d different types\n", numCommitsTotal,
            numUsagesTotal, numTypesTotal);
    System.out.printf("currently, we have both commits and usages for %d types (%d commits, %d usages)\n",
            numTypesWith, numCommitsWith, numUsagesWith);
    System.out.printf("we have commits, but no usages for %d types (%d commits, %d usages)\n", numTypesWithout,
            numCommitsWithout, numUsagesWithout);
    System.out.printf("out of these, %d types (%d commits, %d usages) are related to DATEV\n", numTypesDATEV,
            numCommitsDATEV, numUsagesDATEV);

    System.out.printf("\n\nratios (usages/histories):\n");
    Map<String, Double> sortedRatios = MapSorter.sort(usageToHistoryRatio);
    for (String key : sortedRatios.keySet()) {
        double ratio = sortedRatios.get(key);
        System.out.printf("%3.2f - %s\n", ratio, key);
    }

}

From source file:org.sonar.batch.profiling.ModuleProfiling.java

public void dump(Properties props) {
    double percent = this.totalTime() / 100.0;
    Map<Object, AbstractTimeProfiling> categories = Maps.newLinkedHashMap();
    categories.putAll(profilingPerPhase);
    categories.putAll(profilingPerBatchStep);

    for (Map.Entry<Object, AbstractTimeProfiling> batchStep : categories.entrySet()) {
        props.setProperty(batchStep.getKey().toString(), Long.toString(batchStep.getValue().totalTime()));
    }//from  w  w  w  . j  a  va  2  s .co m

    for (Map.Entry<Object, AbstractTimeProfiling> batchStep : sortByDescendingTotalTime(categories)
            .entrySet()) {
        println(" * " + batchStep.getKey() + " execution time: ", percent, batchStep.getValue());
    }
    // Breakdown per phase
    for (Phase phase : Phase.values()) {
        if (profilingPerPhase.containsKey(phase) && getProfilingPerPhase(phase).hasItems()) {
            println("");
            println(" * " + phase + " execution time breakdown: ", getProfilingPerPhase(phase));
            getProfilingPerPhase(phase).dump(props);
        }
    }
}

From source file:edu.umn.msi.tropix.proteomics.itraqquantitation.impl.ReportSummary.java

public ReportSummary(final Iterable<ITraqMatch> iTraqMatchs, final Iterable<ITraqLabel> labels,
        GroupType groupType) {/*w  w  w.ja v  a  2  s  .c  om*/
    final Multimap<String, ITraqMatch> groupMap = LinkedHashMultimap.create();
    groupLabelProtein = Maps.newLinkedHashMap();

    for (final ITraqMatch iTraqMatch : iTraqMatchs) {
        final ProteinInformation proteinInformation = iTraqMatch.getProteinInformation();
        List<String> groupLabels = Lists.newArrayList();
        if (groupType == GroupType.PROTEIN) {
            groupLabels.add(proteinInformation.getProteinAccession());
        } else if (groupType == GroupType.PEPTIDE) {
            groupLabels.add(iTraqMatch.getPeptideSequence());
        } else if (groupType == GroupType.PEPTIDE_WITH_MODIFICATIONS) {
            groupLabels.add(iTraqMatch.getModifiedPeptideSequence().toString());
        } else if (groupType == GroupType.PEPTIDE_WITH_UNIQUE_MODIFICATION) {
            SequenceWithModifications seqWithMods = iTraqMatch.getModifiedPeptideSequence();
            if (!(seqWithMods instanceof CanSplitModifications)) {
                throw new IllegalArgumentException(
                        "Attempt to split modifications when operation unavailable for data source.");
            }
            final CanSplitModifications splittableSeqWithMods = (CanSplitModifications) seqWithMods;
            for (SequenceWithModifications seqWithOneMod : splittableSeqWithMods.splitupModifications()) {
                groupLabels.add(seqWithOneMod.toString());
            }
        } else {
            throw new IllegalArgumentException("Unknown group type " + groupType);
        }
        for (final String groupLabel : groupLabels) {
            groupMap.put(groupLabel, iTraqMatch);
            if (!groupLabelProtein.containsKey(groupLabel)) {
                groupLabelProtein.put(groupLabel, proteinInformation);
            }
        }
    }

    int numGroups = 0;
    groupSummariesByLabel = Maps.newLinkedHashMap();
    for (final String groupLabel : groupMap.keySet()) {
        final Iterable<ITraqMatch> groupDataEntries = groupMap.get(groupLabel);
        groupSummariesByLabel.put(groupLabel, new GroupSummary(groupDataEntries, labels));
        numGroups++;
    }

    this.numGroups = numGroups;
    this.groupType = groupType;
}

From source file:brooklyn.entity.drivers.downloads.DownloadProducerFromUrlAttribute.java

public DownloadProducerFromUrlAttribute() {
    super(new Function<DownloadRequirement, String>() {
        @Override//from  w  w  w .  j  av a  2  s  .  c o m
        public String apply(DownloadRequirement input) {
            if (input.getAddonName() == null) {
                return input.getEntityDriver().getEntity().getAttribute(Attributes.DOWNLOAD_URL);
            } else {
                String addon = input.getAddonName();
                Map<String, String> addonUrls = input.getEntityDriver().getEntity()
                        .getAttribute(Attributes.DOWNLOAD_ADDON_URLS);
                return (addonUrls != null) ? addonUrls.get(addon) : null;
            }
        }
    }, new Function<DownloadRequirement, Map<String, ?>>() {
        @Override
        public Map<String, ?> apply(DownloadRequirement input) {
            Map<String, Object> result = Maps.newLinkedHashMap();
            if (input.getAddonName() == null) {
                result.putAll(DownloadSubstituters.getBasicEntitySubstitutions(input.getEntityDriver()));
            } else {
                result.putAll(DownloadSubstituters.getBasicAddonSubstitutions(input.getEntityDriver(),
                        input.getAddonName()));
            }
            result.putAll(input.getProperties());
            return result;
        }
    });
}