Example usage for com.google.common.collect Multimap entries

List of usage examples for com.google.common.collect Multimap entries

Introduction

In this page you can find the example usage for com.google.common.collect Multimap entries.

Prototype

Collection<Map.Entry<K, V>> entries();

Source Link

Document

Returns a view collection of all key-value pairs contained in this multimap, as Map.Entry instances.

Usage

From source file:uk.ac.ebi.mdk.apps.tool.SummariseReferences.java

public void summarise(File in, File out) throws ClassNotFoundException, IOException {

    boolean chemicalOnly = has("c");

    Reconstruction reconstruction = ReconstructionIOHelper.read(in);

    File summaryFile = new File(new File(out, "summary"), reconstruction.getAccession() + ".tsv");
    File chebiFile = new File(new File(out, "chebi"), reconstruction.getAccession() + ".tsv");
    File nonChEBIFile = new File(new File(out, "other"), reconstruction.getAccession() + ".tsv");
    File noneFile = new File(new File(out, "unreferenced"), reconstruction.getAccession() + ".tsv");

    // number of reactions/metabolites
    int n = reconstruction.getMetabolome().size();
    int m = reconstruction.getReactome().size();

    Multimap<Identifier, Identifier> chebi = HashMultimap.create();
    Multimap<Identifier, Identifier> nonChebi = HashMultimap.create();
    Map<Identifier, String> unreferenced = new HashMap<Identifier, String>();
    Set<String> types = new HashSet<String>();

    for (Metabolite metabolite : reconstruction.getMetabolome()) {

        Collection<CrossReference> xrefs = metabolite.getAnnotationsExtending(CrossReference.class);

        for (CrossReference xref : xrefs) {

            Identifier id = xref.getIdentifier();

            types.add(id.getShortDescription());

            if (!chemicalOnly || id instanceof ChemicalIdentifier) {
                if (id instanceof ChEBIIdentifier) {
                    chebi.put(metabolite.getIdentifier(), id);
                } else {
                    nonChebi.put(metabolite.getIdentifier(), id);
                }//from www  .  j  a v a  2 s.c  o  m
            }

        }

        if (!nonChebi.containsKey(metabolite.getIdentifier())
                && !chebi.containsKey(metabolite.getIdentifier())) {
            unreferenced.put(metabolite.getIdentifier(), metabolite.getName());
        }

    }

    // output
    out.mkdirs();
    summaryFile.getParentFile().mkdirs();
    chebiFile.getParentFile().mkdirs();
    nonChEBIFile.getParentFile().mkdirs();
    noneFile.getParentFile().mkdirs();

    // summary

    CSVWriter summaryTSV = new CSVWriter(new FileWriter(summaryFile), '\t', '\0');

    summaryTSV.writeNext(new String[] { reconstruction.getAccession(), Integer.toString(n), Integer.toString(m),
            Integer.toString(chebi.keySet().size()), Integer.toString(nonChebi.keySet().size()),
            Integer.toString(unreferenced.size()), Joiner.on(", ").join(types) });

    summaryTSV.close();

    // chebi references

    CSVWriter chebiTSV = new CSVWriter(new FileWriter(chebiFile), '\t', '\0');
    chebiTSV.writeNext(new String[] { "query.accession", "xref.accession", "xref.resource", "xref.mir" });
    for (Map.Entry<Identifier, Identifier> e : chebi.entries()) {
        chebiTSV.writeNext(new String[] { e.getKey().getAccession(), e.getValue().getAccession(),
                e.getValue().getResource().getName(), ((MIRIAMEntry) e.getValue().getResource()).getId() });
    }
    chebiTSV.close();

    // all chemical id references

    CSVWriter chemicalTSV = new CSVWriter(new FileWriter(nonChEBIFile), '\t', '\0');
    chemicalTSV.writeNext(
            new String[] { "query.accession", "query.name", "xref.accession", "xref.resource", "xref.mir" });
    for (Map.Entry<Identifier, Identifier> e : nonChebi.entries()) {
        chemicalTSV.writeNext(new String[] { e.getKey().getAccession(),
                reconstruction.getMetabolome().ofIdentifier(e.getKey()).iterator().next().getName(),
                e.getValue().getAccession(), e.getValue().getResource().getName(),
                ((MIRIAMEntry) e.getValue().getResource()).getId() });
    }
    chemicalTSV.close();

    // no references

    CSVWriter noneTSV = new CSVWriter(new FileWriter(noneFile), '\t', '\0');
    for (Map.Entry<Identifier, String> e : unreferenced.entrySet()) {
        noneTSV.writeNext(new String[] { e.getKey().getAccession(), e.getValue(), });
    }
    noneTSV.close();

}

From source file:org.apache.cassandra.service.StorageService.java

/**
 * Called when an endpoint is removed from the ring. This function checks
 * whether this node becomes responsible for new ranges as a
 * consequence and streams data if needed.
 *
 * This is rather ineffective, but it does not matter so much
 * since this is called very seldom//from   w ww  . j a  va 2  s . c om
 *
 * @param endpoint the node that left
 */
private void restoreReplicaCount(InetAddress endpoint, final InetAddress notifyEndpoint) {
    final Multimap<InetAddress, String> fetchSources = HashMultimap.create();
    Multimap<String, Map.Entry<InetAddress, Collection<Range>>> rangesToFetch = HashMultimap.create();

    final InetAddress myAddress = FBUtilities.getLocalAddress();

    for (String table : DatabaseDescriptor.getNonSystemTables()) {
        Multimap<Range, InetAddress> changedRanges = getChangedRangesForLeaving(table, endpoint);
        Set<Range> myNewRanges = new HashSet<Range>();
        for (Map.Entry<Range, InetAddress> entry : changedRanges.entries()) {
            if (entry.getValue().equals(myAddress))
                myNewRanges.add(entry.getKey());
        }
        Multimap<InetAddress, Range> sourceRanges = getNewSourceRanges(table, myNewRanges);
        for (Map.Entry<InetAddress, Collection<Range>> entry : sourceRanges.asMap().entrySet()) {
            fetchSources.put(entry.getKey(), table);
            rangesToFetch.put(table, entry);
        }
    }

    for (final String table : rangesToFetch.keySet()) {
        for (Map.Entry<InetAddress, Collection<Range>> entry : rangesToFetch.get(table)) {
            final InetAddress source = entry.getKey();
            Collection<Range> ranges = entry.getValue();
            final Runnable callback = new Runnable() {
                public void run() {
                    synchronized (fetchSources) {
                        fetchSources.remove(source, table);
                        if (fetchSources.isEmpty())
                            sendReplicationNotification(myAddress, notifyEndpoint);
                    }
                }
            };
            if (logger_.isDebugEnabled())
                logger_.debug("Requesting from " + source + " ranges " + StringUtils.join(ranges, ", "));
            StreamIn.requestRanges(source, table, ranges, callback, OperationType.RESTORE_REPLICA_COUNT);
        }
    }
}

From source file:org.apache.accumulo.examples.wikisearch.parser.RangeCalculator.java

/**
 * //from   ww  w. j a v a2  s .  co m
 * @param c
 * @param auths
 * @param indexedTerms
 * @param terms
 * @param query
 * @param logic
 * @param typeFilter
 * @throws ParseException
 */
public void execute(Connector c, Authorizations auths, Multimap<String, Normalizer> indexedTerms,
        Multimap<String, QueryTerm> terms, String query, AbstractQueryLogic logic, Set<String> typeFilter)
        throws ParseException {
    super.execute(query);
    this.c = c;
    this.auths = auths;
    this.indexedTerms = indexedTerms;
    this.termsCopy.putAll(terms);
    this.indexTableName = logic.getIndexTableName();
    this.reverseIndexTableName = logic.getReverseIndexTableName();
    this.queryThreads = logic.getQueryThreads();

    Map<MapKey, Set<Range>> indexRanges = new HashMap<MapKey, Set<Range>>();
    Map<MapKey, Set<Range>> trailingWildcardRanges = new HashMap<MapKey, Set<Range>>();
    Map<MapKey, Set<Range>> leadingWildcardRanges = new HashMap<MapKey, Set<Range>>();
    Map<Text, RangeBounds> rangeMap = new HashMap<Text, RangeBounds>();

    // Here we iterate over all of the terms in the query to determine if they are an equivalence,
    // wildcard, or range type operator
    for (Entry<String, QueryTerm> entry : terms.entries()) {
        if (entry.getValue().getOperator().equals(JexlOperatorConstants.getOperator(ASTEQNode.class))
                || entry.getValue().getOperator().equals(JexlOperatorConstants.getOperator(ASTERNode.class))
                || entry.getValue().getOperator().equals(JexlOperatorConstants.getOperator(ASTLTNode.class))
                || entry.getValue().getOperator().equals(JexlOperatorConstants.getOperator(ASTLENode.class))
                || entry.getValue().getOperator().equals(JexlOperatorConstants.getOperator(ASTGTNode.class))
                || entry.getValue().getOperator().equals(JexlOperatorConstants.getOperator(ASTGENode.class))) {
            // If this term is not in the set of indexed terms, then bail
            if (!indexedTerms.containsKey(entry.getKey())) {
                termCardinalities.put(entry.getKey().toUpperCase(), 0L);
                continue;
            }
            // In the case of function calls, the query term could be null. Dont query the index for it.
            if (null == entry.getValue()) {
                termCardinalities.put(entry.getKey().toUpperCase(), 0L);
                continue;
            }
            // In the case where we are looking for 'null', then skip.
            if (null == entry.getValue().getValue() || ((String) entry.getValue().getValue()).equals("null")) {
                termCardinalities.put(entry.getKey().toUpperCase(), 0L);
                continue;
            }

            // Remove the begin and end ' marks
            String value = null;
            if (((String) entry.getValue().getValue()).startsWith("'")
                    && ((String) entry.getValue().getValue()).endsWith("'"))
                value = ((String) entry.getValue().getValue()).substring(1,
                        ((String) entry.getValue().getValue()).length() - 1);
            else
                value = (String) entry.getValue().getValue();
            // The entries in the index are normalized
            for (Normalizer normalizer : indexedTerms.get(entry.getKey())) {
                String normalizedFieldValue = normalizer.normalizeFieldValue(null, value);
                Text fieldValue = new Text(normalizedFieldValue);
                Text fieldName = new Text(entry.getKey().toUpperCase());

                // EQUALS
                if (entry.getValue().getOperator().equals(JexlOperatorConstants.getOperator(ASTEQNode.class))) {
                    Key startRange = new Key(fieldValue, fieldName);
                    Range r = new Range(startRange, true, startRange.followingKey(PartialKey.ROW), true);

                    MapKey key = new MapKey(fieldName.toString(), fieldValue.toString());
                    key.setOriginalQueryValue(value);
                    this.originalQueryValues.put(value, key);
                    if (!indexRanges.containsKey(key))
                        indexRanges.put(key, new HashSet<Range>());
                    indexRanges.get(key).add(r);
                    // WILDCARD
                } else if (entry.getValue().getOperator()
                        .equals(JexlOperatorConstants.getOperator(ASTERNode.class))) {
                    // This is a wildcard query using regex. We can only support leading and trailing wildcards at this time. Leading
                    // wildcards will need be reversed and sent to the global reverse index. Trailing wildcard queries will be sent to the
                    // global index. In all cases, the range for the wilcard will be the range of possible UNICODE codepoints, hex 0 to 10FFFF.
                    int loc = normalizedFieldValue.indexOf(WILDCARD);
                    if (-1 == loc)
                        loc = normalizedFieldValue.indexOf(SINGLE_WILDCARD);
                    if (-1 == loc) {
                        // Then no wildcard in the query? Treat like the equals case above.
                        Key startRange = new Key(fieldValue, fieldName);
                        Range r = new Range(startRange, true, startRange.followingKey(PartialKey.ROW), true);

                        MapKey key = new MapKey(fieldName.toString(), fieldValue.toString());
                        key.setOriginalQueryValue(value);
                        this.originalQueryValues.put(value, key);
                        if (!indexRanges.containsKey(key))
                            indexRanges.put(key, new HashSet<Range>());
                        indexRanges.get(key).add(r);
                    } else {
                        if (loc == 0) {
                            // Then we have a leading wildcard, reverse the term and use the global reverse index.
                            StringBuilder buf = new StringBuilder(normalizedFieldValue.substring(2));
                            normalizedFieldValue = buf.reverse().toString();
                            Key startRange = new Key(new Text(normalizedFieldValue + "\u0000"), fieldName);
                            Key endRange = new Key(new Text(normalizedFieldValue + "\u10FFFF"), fieldName);
                            Range r = new Range(startRange, true, endRange, true);

                            MapKey key = new MapKey(fieldName.toString(), normalizedFieldValue);
                            key.setOriginalQueryValue(value);
                            this.originalQueryValues.put(value, key);
                            if (!leadingWildcardRanges.containsKey(key))
                                leadingWildcardRanges.put(key, new HashSet<Range>());
                            leadingWildcardRanges.get(key).add(r);
                        } else if (loc == (normalizedFieldValue.length() - 2)) {
                            normalizedFieldValue = normalizedFieldValue.substring(0, loc);
                            // Then we have a trailing wildcard character.
                            Key startRange = new Key(new Text(normalizedFieldValue + "\u0000"), fieldName);
                            Key endRange = new Key(new Text(normalizedFieldValue + "\u10FFFF"), fieldName);
                            Range r = new Range(startRange, true, endRange, true);

                            MapKey key = new MapKey(fieldName.toString(), normalizedFieldValue);
                            key.setOriginalQueryValue(value);
                            this.originalQueryValues.put(value, key);
                            if (!trailingWildcardRanges.containsKey(key))
                                trailingWildcardRanges.put(key, new HashSet<Range>());
                            trailingWildcardRanges.get(key).add(r);
                        } else {
                            // throw new RuntimeException("Unsupported wildcard location. Only trailing or leading wildcards are supported: " + normalizedFieldValue);
                            // Don't throw an exception, there must be a wildcard in the query, we'll treat it as a filter on the results since it is not
                            // leading or trailing.
                        }
                    }
                    // RANGES
                } else if (entry.getValue().getOperator()
                        .equals(JexlOperatorConstants.getOperator(ASTGTNode.class))
                        || entry.getValue().getOperator()
                                .equals(JexlOperatorConstants.getOperator(ASTGENode.class))) {
                    // Then we have a lower bound to a range query
                    if (!rangeMap.containsKey(fieldName))
                        rangeMap.put(fieldName, new RangeBounds());
                    rangeMap.get(fieldName).setLower(fieldValue);
                    rangeMap.get(fieldName).setOriginalLower(value);
                } else if (entry.getValue().getOperator()
                        .equals(JexlOperatorConstants.getOperator(ASTLTNode.class))
                        || entry.getValue().getOperator()
                                .equals(JexlOperatorConstants.getOperator(ASTLENode.class))) {
                    // Then we have an upper bound to a range query
                    if (!rangeMap.containsKey(fieldName))
                        rangeMap.put(fieldName, new RangeBounds());
                    rangeMap.get(fieldName).setUpper(fieldValue);
                    rangeMap.get(fieldName).setOriginalUpper(value);
                }
            }
        }
    }

    // INDEX RANGE QUERY
    // Now that we have figured out the range bounds, create the index ranges.
    for (Entry<Text, RangeBounds> entry : rangeMap.entrySet()) {
        if (entry.getValue().getLower() != null && entry.getValue().getUpper() != null) {
            // Figure out the key order
            Key lk = new Key(entry.getValue().getLower());
            Key up = new Key(entry.getValue().getUpper());
            Text lower = lk.getRow();
            Text upper = up.getRow();
            // Swith the order if needed.
            if (lk.compareTo(up) > 0) {
                lower = up.getRow();
                upper = lk.getRow();
            }
            Key startRange = new Key(lower, entry.getKey());
            Key endRange = new Key(upper, entry.getKey());
            Range r = new Range(startRange, true, endRange, true);
            // For the range queries we need to query the global index and then handle the results a little differently.
            Map<MapKey, Set<Range>> ranges = new HashMap<MapKey, Set<Range>>();
            MapKey key = new MapKey(entry.getKey().toString(), entry.getValue().getLower().toString());
            key.setOriginalQueryValue(entry.getValue().getOriginalLower().toString());
            this.originalQueryValues.put(entry.getValue().getOriginalLower().toString(), key);
            ranges.put(key, new HashSet<Range>());
            ranges.get(key).add(r);

            // Now query the global index and override the field value used in the results map
            try {
                Map<MapKey, TermRange> lowerResults = queryGlobalIndex(ranges, entry.getKey().toString(),
                        this.indexTableName, false, key, typeFilter);
                // Add the results to the global index results for both the upper and lower field values.
                Map<MapKey, TermRange> upperResults = new HashMap<MapKey, TermRange>();
                for (Entry<MapKey, TermRange> e : lowerResults.entrySet()) {
                    MapKey key2 = new MapKey(e.getKey().getFieldName(), entry.getValue().getUpper().toString());
                    key2.setOriginalQueryValue(entry.getValue().getOriginalUpper().toString());
                    upperResults.put(key2, e.getValue());
                    this.originalQueryValues.put(entry.getValue().getOriginalUpper(), key2);

                }

                this.globalIndexResults.putAll(lowerResults);
                this.globalIndexResults.putAll(upperResults);

            } catch (TableNotFoundException e) {
                log.error("index table not found", e);
                throw new RuntimeException(" index table not found", e);
            }
        } else {
            log.warn("Unbounded range detected, not querying index for it. Field  " + entry.getKey().toString()
                    + " in query: " + query);
        }
    }
    // Now that we have calculated all of the ranges, query the global index.
    try {

        // Query for the trailing wildcards if we have any
        for (Entry<MapKey, Set<Range>> trailing : trailingWildcardRanges.entrySet()) {
            Map<MapKey, Set<Range>> m = new HashMap<MapKey, Set<Range>>();
            m.put(trailing.getKey(), trailing.getValue());
            if (log.isDebugEnabled())
                log.debug("Ranges for Wildcard Global Index query: " + m.toString());
            this.globalIndexResults.putAll(queryGlobalIndex(m, trailing.getKey().getFieldName(),
                    this.indexTableName, false, trailing.getKey(), typeFilter));
        }

        // Query for the leading wildcards if we have any
        for (Entry<MapKey, Set<Range>> leading : leadingWildcardRanges.entrySet()) {
            Map<MapKey, Set<Range>> m = new HashMap<MapKey, Set<Range>>();
            m.put(leading.getKey(), leading.getValue());
            if (log.isDebugEnabled())
                log.debug("Ranges for Wildcard Global Reverse Index query: " + m.toString());
            this.globalIndexResults.putAll(queryGlobalIndex(m, leading.getKey().getFieldName(),
                    this.reverseIndexTableName, true, leading.getKey(), typeFilter));
        }

        // Query for the equals case
        for (Entry<MapKey, Set<Range>> equals : indexRanges.entrySet()) {
            Map<MapKey, Set<Range>> m = new HashMap<MapKey, Set<Range>>();
            m.put(equals.getKey(), equals.getValue());
            if (log.isDebugEnabled())
                log.debug("Ranges for Global Index query: " + m.toString());
            this.globalIndexResults.putAll(queryGlobalIndex(m, equals.getKey().getFieldName(),
                    this.indexTableName, false, equals.getKey(), typeFilter));
        }
    } catch (TableNotFoundException e) {
        log.error("index table not found", e);
        throw new RuntimeException(" index table not found", e);
    }

    if (log.isDebugEnabled())
        log.debug("Ranges from Global Index query: " + globalIndexResults.toString());

    // Now traverse the AST
    EvaluationContext ctx = new EvaluationContext();
    this.getAST().childrenAccept(this, ctx);

    if (ctx.lastRange.getRanges().size() == 0) {
        log.debug("No resulting range set");
    } else {
        if (log.isDebugEnabled())
            log.debug("Setting range results to: " + ctx.lastRange.getRanges().toString());
        this.result = ctx.lastRange.getRanges();
    }
}

From source file:com.yahoo.pulsar.broker.loadbalance.impl.SimpleLoadManagerImpl.java

/**
 * Assign owner for specified ServiceUnit from the given candidates, following the the principles: 1) Optimum
 * distribution: fill up one broker till its load reaches optimum level (defined by underload threshold) before pull
 * another idle broker in; 2) Even distribution: once all brokers' load are above optimum level, maintain all
 * brokers to have even load; 3) Set the underload threshold to small value (like 1) for pure even distribution, and
 * high value (like 80) for pure optimum distribution;
 *
 * Strategy to select broker: 1) The first choice is the least loaded broker which is underload but not idle; 2) The
 * second choice is idle broker (if there is any); 3) Othewise simply select the least loaded broker if it is NOT
 * overloaded; 4) If all brokers are overloaded, select the broker with maximum available capacity (considering
 * brokers could have different hardware configuration, this usually means to select the broker with more hardware
 * resource);/*from  w  w w.j  a v  a2  s.c  om*/
 *
 * Broker's load level: 1) Load ranking (triggered by LoadReport update) estimate the load level according to the
 * resourse usage and namespace bundles already loaded by each broker; 2) When leader broker decide the owner for a
 * new namespace bundle, it may take time for the real owner to actually load the bundle and refresh LoadReport,
 * leader broker will store the bundle in a list called preAllocatedBundles, and the quota of all
 * preAllocatedBundles in preAllocatedQuotas, and re-estimate the broker's load level by putting the
 * preAllocatedQuota into calculation; 3) Everything (preAllocatedBundles and preAllocatedQuotas) will get reset in
 * load ranking.
 */
private ResourceUnit findBrokerForPlacement(Multimap<Long, ResourceUnit> candidates,
        ServiceUnitId serviceUnit) {
    long underloadThreshold = this.getLoadBalancerBrokerUnderloadedThresholdPercentage();
    long overloadThreshold = this.getLoadBalancerBrokerOverloadedThresholdPercentage();
    ResourceQuota defaultQuota = pulsar.getLocalZkCacheService().getResourceQuotaCache().getDefaultQuota();

    double minLoadPercentage = 101.0;
    long maxAvailability = -1;
    ResourceUnit idleRU = null;
    ResourceUnit maxAvailableRU = null;
    ResourceUnit randomRU = null;

    ResourceUnit selectedRU = null;
    ResourceUnitRanking selectedRanking = null;
    String serviceUnitId = serviceUnit.toString();
    synchronized (resourceUnitRankings) {
        long randomBrokerIndex = (candidates.size() > 0) ? (this.brokerRotationCursor % candidates.size()) : 0;
        // find the least loaded & not-idle broker
        for (Map.Entry<Long, ResourceUnit> candidateOwner : candidates.entries()) {
            ResourceUnit candidate = candidateOwner.getValue();
            randomBrokerIndex--;

            // skip broker which is not ranked. this should never happen except in unit test
            if (!resourceUnitRankings.containsKey(candidate)) {
                continue;
            }

            // check if this ServiceUnit is already pre-allocated
            String resourceUnitId = candidate.getResourceId();
            ResourceUnitRanking ranking = resourceUnitRankings.get(candidate);
            if (ranking.isServiceUnitPreAllocated(serviceUnitId)) {
                return candidate;
            }

            // check if this ServiceUnit is already loaded
            if (ranking.isServiceUnitLoaded(serviceUnitId)) {
                ranking.removeLoadedServiceUnit(serviceUnitId, this.getResourceQuota(serviceUnitId));
            }

            // record a random broker
            if (randomBrokerIndex < 0 && randomRU == null) {
                randomRU = candidate;
            }

            // check the available capacity
            double loadPercentage = ranking.getEstimatedLoadPercentage();
            double availablePercentage = Math.max(0, (100 - loadPercentage) / 100);
            long availability = (long) (ranking.estimateMaxCapacity(defaultQuota) * availablePercentage);
            if (availability > maxAvailability) {
                maxAvailability = availability;
                maxAvailableRU = candidate;
            }

            // check the load percentage
            if (ranking.isIdle()) {
                if (idleRU == null) {
                    idleRU = candidate;
                }
            } else {
                if (selectedRU == null) {
                    selectedRU = candidate;
                    selectedRanking = ranking;
                    minLoadPercentage = loadPercentage;
                } else {
                    if (ranking.compareTo(selectedRanking) < 0) {
                        minLoadPercentage = loadPercentage;
                        selectedRU = candidate;
                        selectedRanking = ranking;
                    }
                }
            }
        }

        if ((minLoadPercentage > underloadThreshold && idleRU != null) || selectedRU == null) {
            // assigned to idle broker is the least loaded broker already have optimum load (which means NOT
            // underloaded), or all brokers are idle
            selectedRU = idleRU;
        } else if (minLoadPercentage >= 100.0 && randomRU != null) {
            // all brokers are full, assign to a random one
            selectedRU = randomRU;
        } else if (minLoadPercentage > overloadThreshold) {
            // assign to the broker with maximum available capacity if all brokers are overloaded
            selectedRU = maxAvailableRU;
        }

        // re-calculate load level for selected broker
        if (selectedRU != null) {
            this.brokerRotationCursor = (this.brokerRotationCursor + 1) % 1000000;
            ResourceUnitRanking ranking = resourceUnitRankings.get(selectedRU);
            String loadPercentageDesc = ranking.getEstimatedLoadPercentageString();
            log.info("Assign {} to {} with ({}).", serviceUnitId, selectedRU.getResourceId(),
                    loadPercentageDesc);
            if (!ranking.isServiceUnitPreAllocated(serviceUnitId)) {
                ResourceQuota quota = this.getResourceQuota(serviceUnitId);
                ranking.addPreAllocatedServiceUnit(serviceUnitId, quota);
            }
        }
    }
    return selectedRU;
}

From source file:org.apache.cassandra.service.StorageService.java

/**
 * Seed data to the endpoints that will be responsible for it at the future
 *
 * @param rangesToStreamByTable tables and data ranges with endpoints included for each
 * @return latch to count down//ww w.  j a va 2s .c  o m
 */
private CountDownLatch streamRanges(final Map<String, Multimap<Range, InetAddress>> rangesToStreamByTable) {
    final CountDownLatch latch = new CountDownLatch(rangesToStreamByTable.keySet().size());
    for (final String table : rangesToStreamByTable.keySet()) {
        Multimap<Range, InetAddress> rangesWithEndpoints = rangesToStreamByTable.get(table);

        if (rangesWithEndpoints.isEmpty()) {
            latch.countDown();
            continue;
        }

        final Set<Map.Entry<Range, InetAddress>> pending = new HashSet<Map.Entry<Range, InetAddress>>(
                rangesWithEndpoints.entries());

        for (final Map.Entry<Range, InetAddress> entry : rangesWithEndpoints.entries()) {
            final Range range = entry.getKey();
            final InetAddress newEndpoint = entry.getValue();

            final Runnable callback = new Runnable() {
                public void run() {
                    synchronized (pending) {
                        pending.remove(entry);

                        if (pending.isEmpty())
                            latch.countDown();
                    }
                }
            };

            StageManager.getStage(Stage.STREAM).execute(new Runnable() {
                public void run() {
                    // TODO each call to transferRanges re-flushes, this is potentially a lot of waste
                    StreamOut.transferRanges(newEndpoint, table, Arrays.asList(range), callback,
                            OperationType.UNBOOTSTRAP);
                }
            });
        }
    }
    return latch;
}

From source file:com.facebook.presto.operator.PipelineContext.java

public PipelineStats getPipelineStats() {
    List<DriverContext> driverContexts = ImmutableList.copyOf(this.drivers);

    int totalDriers = completedDrivers.get() + driverContexts.size();
    int queuedDrivers = 0;
    int queuedPartitionedDrivers = 0;
    int runningDrivers = 0;
    int runningPartitionedDrivers = 0;
    int completedDrivers = this.completedDrivers.get();

    Distribution queuedTime = new Distribution(this.queuedTime);
    Distribution elapsedTime = new Distribution(this.elapsedTime);

    long totalScheduledTime = this.totalScheduledTime.get();
    long totalCpuTime = this.totalCpuTime.get();
    long totalUserTime = this.totalUserTime.get();
    long totalBlockedTime = this.totalBlockedTime.get();

    long rawInputDataSize = this.rawInputDataSize.getTotalCount();
    long rawInputPositions = this.rawInputPositions.getTotalCount();

    long processedInputDataSize = this.processedInputDataSize.getTotalCount();
    long processedInputPositions = this.processedInputPositions.getTotalCount();

    long outputDataSize = this.outputDataSize.getTotalCount();
    long outputPositions = this.outputPositions.getTotalCount();

    List<DriverStats> drivers = new ArrayList<>();

    Multimap<Integer, OperatorStats> runningOperators = ArrayListMultimap.create();
    for (DriverContext driverContext : driverContexts) {
        DriverStats driverStats = driverContext.getDriverStats();
        drivers.add(driverStats);/*from   w w w . j  a va  2  s.  c  o m*/

        if (driverStats.getStartTime() == null) {
            queuedDrivers++;
            if (driverContext.isPartitioned()) {
                queuedPartitionedDrivers++;
            }
        } else {
            runningDrivers++;
            if (driverContext.isPartitioned()) {
                runningPartitionedDrivers++;
            }
        }

        queuedTime.add(driverStats.getQueuedTime().roundTo(NANOSECONDS));
        elapsedTime.add(driverStats.getElapsedTime().roundTo(NANOSECONDS));

        totalScheduledTime += driverStats.getTotalScheduledTime().roundTo(NANOSECONDS);
        totalCpuTime += driverStats.getTotalCpuTime().roundTo(NANOSECONDS);
        totalUserTime += driverStats.getTotalUserTime().roundTo(NANOSECONDS);
        totalBlockedTime += driverStats.getTotalBlockedTime().roundTo(NANOSECONDS);

        List<OperatorStats> operators = ImmutableList
                .copyOf(transform(driverContext.getOperatorContexts(), OperatorContext::getOperatorStats));
        for (OperatorStats operator : operators) {
            runningOperators.put(operator.getOperatorId(), operator);
        }

        rawInputDataSize += driverStats.getRawInputDataSize().toBytes();
        rawInputPositions += driverStats.getRawInputPositions();

        processedInputDataSize += driverStats.getProcessedInputDataSize().toBytes();
        processedInputPositions += driverStats.getProcessedInputPositions();

        outputDataSize += driverStats.getOutputDataSize().toBytes();
        outputPositions += driverStats.getOutputPositions();
    }

    // merge the running operator stats into the operator summary
    TreeMap<Integer, OperatorStats> operatorSummaries = new TreeMap<>(this.operatorSummaries);
    for (Entry<Integer, OperatorStats> entry : runningOperators.entries()) {
        OperatorStats current = operatorSummaries.get(entry.getKey());
        if (current == null) {
            current = entry.getValue();
        } else {
            current = current.add(entry.getValue());
        }
        operatorSummaries.put(entry.getKey(), current);
    }

    ImmutableSet<BlockedReason> blockedReasons = drivers.stream()
            .filter(driver -> driver.getEndTime() == null && driver.getStartTime() != null)
            .flatMap(driver -> driver.getBlockedReasons().stream())
            .collect(ImmutableCollectors.toImmutableSet());
    boolean fullyBlocked = drivers.stream()
            .filter(driver -> driver.getEndTime() == null && driver.getStartTime() != null)
            .allMatch(DriverStats::isFullyBlocked);
    return new PipelineStats(inputPipeline, outputPipeline,

            totalDriers, queuedDrivers, queuedPartitionedDrivers, runningDrivers, runningPartitionedDrivers,
            completedDrivers,

            new DataSize(memoryReservation.get(), BYTE).convertToMostSuccinctDataSize(),
            new DataSize(systemMemoryReservation.get(), BYTE).convertToMostSuccinctDataSize(),

            queuedTime.snapshot(), elapsedTime.snapshot(),

            new Duration(totalScheduledTime, NANOSECONDS).convertToMostSuccinctTimeUnit(),
            new Duration(totalCpuTime, NANOSECONDS).convertToMostSuccinctTimeUnit(),
            new Duration(totalUserTime, NANOSECONDS).convertToMostSuccinctTimeUnit(),
            new Duration(totalBlockedTime, NANOSECONDS).convertToMostSuccinctTimeUnit(),
            fullyBlocked && (runningDrivers > 0 || runningPartitionedDrivers > 0), blockedReasons,

            new DataSize(rawInputDataSize, BYTE).convertToMostSuccinctDataSize(), rawInputPositions,

            new DataSize(processedInputDataSize, BYTE).convertToMostSuccinctDataSize(), processedInputPositions,

            new DataSize(outputDataSize, BYTE).convertToMostSuccinctDataSize(), outputPositions,

            ImmutableList.copyOf(operatorSummaries.values()), drivers);
}

From source file:org.openscience.cdk.app.DepictController.java

private void abbreviate(IReaction rxn, String mode, String annotate) {
    Multimap<IAtomContainer, Sgroup> sgroupmap = ArrayListMultimap.create();
    switch (mode.toLowerCase()) {
    case "true":
    case "on":
    case "yes":
        for (IAtomContainer mol : rxn.getReactants().atomContainers()) {
            contractHydrates(mol);//w  ww.j  av  a  2  s .com
            Set<IAtom> atoms = new HashSet<>();
            List<Sgroup> newSgroups = new ArrayList<>();
            for (Sgroup sgroup : abbreviations.generate(mol)) {
                if (add(atoms, sgroup.getAtoms()))
                    newSgroups.add(sgroup);
            }
            sgroupmap.putAll(mol, newSgroups);
        }
        for (IAtomContainer mol : rxn.getProducts().atomContainers()) {
            contractHydrates(mol);
            Set<IAtom> atoms = new HashSet<>();
            List<Sgroup> newSgroups = new ArrayList<>();
            for (Sgroup sgroup : abbreviations.generate(mol)) {
                if (add(atoms, sgroup.getAtoms()))
                    newSgroups.add(sgroup);
            }
            sgroupmap.putAll(mol, newSgroups);
        }
        for (IAtomContainer mol : rxn.getAgents().atomContainers()) {
            contractHydrates(mol);
            reagents.apply(mol);
            abbreviations.apply(mol);
        }
        break;
    case "groups":
        for (IAtomContainer mol : rxn.getAgents().atomContainers()) {
            contractHydrates(mol);
            abbreviations.apply(mol);
        }
        break;
    case "reagents":
        for (IAtomContainer mol : rxn.getAgents().atomContainers()) {
            contractHydrates(mol);
            reagents.apply(mol);
        }
        break;
    }

    Set<String> include = new HashSet<>();
    for (Map.Entry<IAtomContainer, Sgroup> e : sgroupmap.entries()) {
        final IAtomContainer mol = e.getKey();
        final Sgroup abbrv = e.getValue();
        int numAtoms = mol.getAtomCount();
        if (abbrv.getBonds().isEmpty()) {
            include.add(abbrv.getSubscript());
        } else {
            int numAbbr = abbrv.getAtoms().size();
            double f = numAbbr / (double) numAtoms;
            if (numAtoms - numAbbr > 1 && f <= 0.4) {
                include.add(abbrv.getSubscript());
            }
        }
    }

    for (Map.Entry<IAtomContainer, Collection<Sgroup>> e : sgroupmap.asMap().entrySet()) {
        final IAtomContainer mol = e.getKey();

        List<Sgroup> sgroups = mol.getProperty(CDKConstants.CTAB_SGROUPS);
        if (sgroups == null)
            sgroups = new ArrayList<>();
        else
            sgroups = new ArrayList<>(sgroups);
        mol.setProperty(CDKConstants.CTAB_SGROUPS, sgroups);

        for (Sgroup abbrv : e.getValue()) {
            if (include.contains(abbrv.getSubscript()))
                sgroups.add(abbrv);
        }
    }
}

From source file:io.prestosql.operator.PipelineContext.java

public PipelineStats getPipelineStats() {
    // check for end state to avoid callback ordering problems
    if (taskContext.getState().isDone()) {
        DateTime now = DateTime.now();/*  w  w w.ja  v  a 2s  . c  o m*/
        executionStartTime.compareAndSet(null, now);
        lastExecutionStartTime.compareAndSet(null, now);
        lastExecutionEndTime.compareAndSet(null, now);
    }

    int completedDrivers = this.completedDrivers.get();
    List<DriverContext> driverContexts = ImmutableList.copyOf(this.drivers);
    int totalSplits = this.totalSplits.get();
    PipelineStatus pipelineStatus = getPipelineStatus(driverContexts.iterator(), totalSplits, completedDrivers,
            partitioned);

    int totalDrivers = completedDrivers + driverContexts.size();

    Distribution queuedTime = new Distribution(this.queuedTime);
    Distribution elapsedTime = new Distribution(this.elapsedTime);

    long totalScheduledTime = this.totalScheduledTime.get();
    long totalCpuTime = this.totalCpuTime.get();
    long totalBlockedTime = this.totalBlockedTime.get();

    long physicalInputDataSize = this.physicalInputDataSize.getTotalCount();
    long physicalInputPositions = this.physicalInputPositions.getTotalCount();

    long internalNetworkInputDataSize = this.internalNetworkInputDataSize.getTotalCount();
    long internalNetworkInputPositions = this.internalNetworkInputPositions.getTotalCount();

    long rawInputDataSize = this.rawInputDataSize.getTotalCount();
    long rawInputPositions = this.rawInputPositions.getTotalCount();

    long processedInputDataSize = this.processedInputDataSize.getTotalCount();
    long processedInputPositions = this.processedInputPositions.getTotalCount();

    long outputDataSize = this.outputDataSize.getTotalCount();
    long outputPositions = this.outputPositions.getTotalCount();

    long physicalWrittenDataSize = this.physicalWrittenDataSize.get();

    List<DriverStats> drivers = new ArrayList<>();

    Multimap<Integer, OperatorStats> runningOperators = ArrayListMultimap.create();
    for (DriverContext driverContext : driverContexts) {
        DriverStats driverStats = driverContext.getDriverStats();
        drivers.add(driverStats);

        queuedTime.add(driverStats.getQueuedTime().roundTo(NANOSECONDS));
        elapsedTime.add(driverStats.getElapsedTime().roundTo(NANOSECONDS));

        totalScheduledTime += driverStats.getTotalScheduledTime().roundTo(NANOSECONDS);
        totalCpuTime += driverStats.getTotalCpuTime().roundTo(NANOSECONDS);
        totalBlockedTime += driverStats.getTotalBlockedTime().roundTo(NANOSECONDS);

        List<OperatorStats> operators = ImmutableList
                .copyOf(transform(driverContext.getOperatorContexts(), OperatorContext::getOperatorStats));
        for (OperatorStats operator : operators) {
            runningOperators.put(operator.getOperatorId(), operator);
        }

        physicalInputDataSize += driverStats.getPhysicalInputDataSize().toBytes();
        physicalInputPositions += driverStats.getPhysicalInputPositions();

        internalNetworkInputDataSize += driverStats.getInternalNetworkInputDataSize().toBytes();
        internalNetworkInputPositions += driverStats.getInternalNetworkInputPositions();

        rawInputDataSize += driverStats.getRawInputDataSize().toBytes();
        rawInputPositions += driverStats.getRawInputPositions();

        processedInputDataSize += driverStats.getProcessedInputDataSize().toBytes();
        processedInputPositions += driverStats.getProcessedInputPositions();

        outputDataSize += driverStats.getOutputDataSize().toBytes();
        outputPositions += driverStats.getOutputPositions();

        physicalWrittenDataSize += driverStats.getPhysicalWrittenDataSize().toBytes();
    }

    // merge the running operator stats into the operator summary
    TreeMap<Integer, OperatorStats> operatorSummaries = new TreeMap<>(this.operatorSummaries);
    for (Entry<Integer, OperatorStats> entry : runningOperators.entries()) {
        OperatorStats current = operatorSummaries.get(entry.getKey());
        if (current == null) {
            current = entry.getValue();
        } else {
            current = current.add(entry.getValue());
        }
        operatorSummaries.put(entry.getKey(), current);
    }

    Set<DriverStats> runningDriverStats = drivers.stream()
            .filter(driver -> driver.getEndTime() == null && driver.getStartTime() != null)
            .collect(toImmutableSet());
    ImmutableSet<BlockedReason> blockedReasons = runningDriverStats.stream()
            .flatMap(driver -> driver.getBlockedReasons().stream()).collect(toImmutableSet());

    boolean fullyBlocked = !runningDriverStats.isEmpty()
            && runningDriverStats.stream().allMatch(DriverStats::isFullyBlocked);

    return new PipelineStats(pipelineId,

            executionStartTime.get(), lastExecutionStartTime.get(), lastExecutionEndTime.get(),

            inputPipeline, outputPipeline,

            totalDrivers, pipelineStatus.getQueuedDrivers(), pipelineStatus.getQueuedPartitionedDrivers(),
            pipelineStatus.getRunningDrivers(), pipelineStatus.getRunningPartitionedDrivers(),
            pipelineStatus.getBlockedDrivers(), completedDrivers,

            succinctBytes(pipelineMemoryContext.getUserMemory()),
            succinctBytes(pipelineMemoryContext.getRevocableMemory()),
            succinctBytes(pipelineMemoryContext.getSystemMemory()),

            queuedTime.snapshot(), elapsedTime.snapshot(),

            new Duration(totalScheduledTime, NANOSECONDS).convertToMostSuccinctTimeUnit(),
            new Duration(totalCpuTime, NANOSECONDS).convertToMostSuccinctTimeUnit(),
            new Duration(totalBlockedTime, NANOSECONDS).convertToMostSuccinctTimeUnit(), fullyBlocked,
            blockedReasons,

            succinctBytes(physicalInputDataSize), physicalInputPositions,

            succinctBytes(internalNetworkInputDataSize), internalNetworkInputPositions,

            succinctBytes(rawInputDataSize), rawInputPositions,

            succinctBytes(processedInputDataSize), processedInputPositions,

            succinctBytes(outputDataSize), outputPositions,

            succinctBytes(physicalWrittenDataSize),

            ImmutableList.copyOf(operatorSummaries.values()), drivers);
}

From source file:org.terasology.crafting.ui.workstation.StationAvailableRecipesWidget.java

public void loadRecipes() {
    availableRecipes.clear();/*  w w w .j  a  va 2 s  . co  m*/
    displayedOpenCategories.clear();

    displayedOpenCategories.addAll(openCategories);

    Multimap<String, CraftingStationRecipe.CraftingStationResult> withoutCategory = LinkedHashMultimap.create();
    Multimap<String, String> categoryRelationships = TreeMultimap.create(Ordering.natural(),
            Ordering.natural());
    Set<String> topLevelCategories = new TreeSet<>();

    Map<String, Multimap<String, CraftingStationRecipe.CraftingStationResult>> categoryRecipesMap = Maps
            .newHashMap();

    WorkstationComponent workstation = station.getComponent(WorkstationComponent.class);
    for (WorkstationProcess workstationProcess : registry
            .getWorkstationProcesses(workstation.supportedProcessTypes.keySet())) {
        if (workstationProcess instanceof CraftingWorkstationProcess) {
            String recipeId = workstationProcess.getId();
            List<? extends CraftingStationRecipe.CraftingStationResult> results = ((CraftingWorkstationProcess) workstationProcess)
                    .getCraftingWorkstationRecipe().getMatchingRecipeResultsForDisplay(station);
            if (results != null) {
                for (CraftingStationRecipe.CraftingStationResult result : results) {
                    availableRecipes.put(recipeId, result.getResultParameters());

                    String category = getCategory(recipeId);
                    if (category == null) {
                        withoutCategory.put(recipeId, result);
                    } else {
                        Multimap<String, CraftingStationRecipe.CraftingStationResult> categoryRecipes = categoryRecipesMap
                                .get(category);
                        if (categoryRecipes == null) {
                            categoryRecipes = LinkedHashMultimap.create();
                            categoryRecipesMap.put(category, categoryRecipes);
                        }
                        categoryRecipes.put(recipeId, result);
                        String topLevel = fillRelationships(categoryRelationships, category);
                        topLevelCategories.add(topLevel);
                    }
                }
            }
        }
    }

    for (String topLevelCategory : topLevelCategories) {
        int level = 0;

        appendCategory(categoryRelationships, categoryRecipesMap, topLevelCategory, level);
    }

    appendRecipes(0, withoutCategory.entries());
}

From source file:org.lanternpowered.server.command.DefaultCommandsCollection.java

public void load() {
    final Multimap<PluginContainer, CommandProvider> commandProviders = HashMultimap.create();
    // Minecraft Commands
    commandProviders.put(this.minecraft, new CommandBan());
    commandProviders.put(this.minecraft, new CommandBanIp());
    commandProviders.put(this.minecraft, new CommandBorder());
    commandProviders.put(this.minecraft, new CommandDeop());
    commandProviders.put(this.minecraft, new CommandDifficulty());
    commandProviders.put(this.minecraft, new CommandGameMode());
    commandProviders.put(this.minecraft, new CommandGameRule());
    commandProviders.put(this.minecraft, new CommandHelp());
    commandProviders.put(this.minecraft, new CommandKick());
    commandProviders.put(this.minecraft, new CommandListBans());
    commandProviders.put(this.minecraft, new CommandListPlayers());
    commandProviders.put(this.minecraft, new CommandMe());
    commandProviders.put(this.minecraft, new CommandOp());
    commandProviders.put(this.minecraft, new CommandPardon());
    commandProviders.put(this.minecraft, new CommandPardonIp());
    commandProviders.put(this.minecraft, new CommandParticle());
    commandProviders.put(this.implementation, new CommandParticleEffect());
    commandProviders.put(this.minecraft, new CommandPlaySound());
    commandProviders.put(this.minecraft, new CommandSay());
    commandProviders.put(this.minecraft, new CommandScoreboard());
    commandProviders.put(this.implementation, new CommandSetData());
    commandProviders.put(this.minecraft, new CommandSetIdleTimeout());
    commandProviders.put(this.minecraft, new CommandSetSpawn());
    commandProviders.put(this.minecraft, new CommandStop());
    commandProviders.put(this.minecraft, new CommandStopSound());
    commandProviders.put(this.minecraft, new CommandTeleport());
    commandProviders.put(this.minecraft, new CommandTell());
    commandProviders.put(this.minecraft, new CommandTime());
    commandProviders.put(this.minecraft, new CommandTitle());
    commandProviders.put(this.minecraft, new CommandToggleDownfall());
    commandProviders.put(this.minecraft, new CommandTp());
    commandProviders.put(this.implementation, new CommandVersion());
    commandProviders.put(this.minecraft, new CommandWeather());
    commandProviders.put(this.minecraft, new CommandWhitelist());
    // Testing Commands
    commandProviders.put(this.implementation, new CommandOpenTestContainer());

    for (Map.Entry<PluginContainer, CommandProvider> entry : commandProviders.entries()) {
        final PluginContainer plugin = entry.getKey();
        this.commandManager.register(plugin, entry.getValue().buildSpecFor(plugin),
                entry.getValue().getAliases());
    }//w w  w.  j  av  a2 s.  com

    final PermissionService permissionService = this.permissionService.get();
    if (permissionService instanceof LanternPermissionService) {
        final LanternPermissionService lanternPermissionService = (LanternPermissionService) permissionService;
        //noinspection Convert2streamapi
        for (Map.Entry<PluginContainer, CommandProvider> entry : commandProviders.entries()) {
            entry.getValue().getOpPermissionLevel()
                    .ifPresent(level -> lanternPermissionService.getGroupForOpLevel(level).getSubjectData()
                            .setPermission(SubjectData.GLOBAL_CONTEXT,
                                    entry.getValue().getPermissionFor(entry.getKey()), Tristate.TRUE));
        }
    } else {
        //noinspection Convert2streamapi
        for (Map.Entry<PluginContainer, CommandProvider> entry : commandProviders.entries()) {
            if (entry.getValue().getOpPermissionLevel().orElse(0) == 0) {
                permissionService.getDefaults().getTransientSubjectData().setPermission(
                        SubjectData.GLOBAL_CONTEXT, entry.getValue().getPermissionFor(entry.getKey()),
                        Tristate.TRUE);
            }
        }
    }
}