Example usage for com.google.common.collect Multimap entries

List of usage examples for com.google.common.collect Multimap entries

Introduction

In this page you can find the example usage for com.google.common.collect Multimap entries.

Prototype

Collection<Map.Entry<K, V>> entries();

Source Link

Document

Returns a view collection of all key-value pairs contained in this multimap, as Map.Entry instances.

Usage

From source file:co.cask.cdap.etl.planner.ControlDag.java

private void flattenFrom(String node) {
    Set<String> outputs = outgoingConnections.get(node);
    if (outputs.isEmpty()) {
        return;/*from w w w  . ja  v  a2 s . c  o  m*/
    }

    if (outputs.size() == 1) {
        flattenFrom(outputs.iterator().next());
        return;
    }

    Multimap<String, String> branchEndpointOutputs = HashMultimap.create();
    // can't just use branchEndpointOutputs.keySet(),
    // because that won't track branch endpoints that had no output (sinks)
    Set<String> branchEndpoints = new HashSet<>();
    for (String output : outputs) {
        String branchEndpoint = findBranchEnd(output);
        branchEndpoints.add(branchEndpoint);
        branchEndpointOutputs.putAll(branchEndpoint, outgoingConnections.get(branchEndpoint));
    }

    // if all the branch endpoints connect to a single node, there is no need to add a join node
    Set<String> endpointOutputs = new HashSet<>(branchEndpointOutputs.values());
    if (endpointOutputs.size() == 1) {
        flattenFrom(endpointOutputs.iterator().next());
        return;
    }

    // add a connection from each branch endpoint to a newly added join node
    // then move all outgoing connections from each branch endpoint so that they are coming out of the new join node
    String newJoinNode = generateJoinNodeName(branchEndpoints);
    addNode(newJoinNode, branchEndpoints, endpointOutputs);
    // remove the outgoing connections from endpoints that aren't going to our new join node
    for (Map.Entry<String, String> endpointEntry : branchEndpointOutputs.entries()) {
        removeConnection(endpointEntry.getKey(), endpointEntry.getValue());
    }
    /*
       have to trim again due to reshuffling of nodes. For example, if we have:
            
              |--> n3
    |--> n2 --|
    |         |--> n4
       n1 --|              |
    |              v
    |--> n5 -----> n6
            
       after we insert the new join node we'll have:
            
    |--> n2 --|           |--> n3
    |         |           |
       n1 --|         |--> join --|--> n4
    |         |           |    |
    |--> n5 --|           |    v
                          |--> n6
            
       and we need to remove the connection from join -> n6, otherwise the algorithm will get messed up
     */
    trim();

    // then keep flattening from the new join node
    flattenFrom(newJoinNode);
}

From source file:com.flexive.core.search.genericSQL.GenericSQLDataFilter.java

/**
 * Builds an 'OR' condition./* ww  w  .j a  va 2  s .  co  m*/
 *
 * @param sb the string buffer to use
 * @param br the brace
 * @throws FxSqlSearchException if the build failed
 */
private void buildOr(StringBuilder sb, Brace br) throws FxSqlSearchException {
    // Start OR
    boolean conditionStarted = false;
    final Multimap<String, ConditionTableInfo> tables = getUsedContentTables(br, true);
    if (br.getElements().size() > 1) {
        final Map.Entry<String, ConditionTableInfo> singleTable = tables.keySet().size() == 1
                ? tables.entries().iterator().next()
                : null;

        // for "OR" we can always optimize flat storage queries,
        // as long as only one flat storage table is used and we don't have a nested 'and',
        // and the brace does not contain an IS NULL condition
        if (singleTable != null && singleTable.getValue().isFlatStorage() && !br.containsAnd()
                && !containsIsNullCondition(br)) {
            sb.append(getOptimizedFlatStorageSubquery(br, tables.keySet().iterator().next(), false));
            return;
        }

        if (singleTable != null && singleTable.getKey().equals(DatabaseConst.TBL_CONTENT)) {
            // combine main table selects into a single one
            sb.append("(SELECT id,ver," + getEmptyLanguage() + " as lang FROM " + DatabaseConst.TBL_CONTENT
                    + " cd" + " WHERE " + getOptimizedMainTableConditions(br, "cd") + ")");
            return;
        }

        // check if there are two or more queries on the same flat storage that can be grouped
        try {
            final Brace grouped = br.groupConditions(new Brace.GroupFunction() {
                @Override
                public Object apply(Condition cond) {
                    try {
                        final Pair<String, ConditionTableInfo> pi = getPropertyInfo(cond);
                        return pi.getSecond().isFlatStorage()
                                // flat storage entries can be grouped as long as they're on the same table
                                ? pi.getFirst()
                                // generate a unique ID, the statement will be ignored for grouping
                                : "condition" + cond.getId();
                    } catch (FxSqlSearchException e) {
                        throw e.asRuntimeException();
                    }
                }
            });
            if (grouped != br) {
                // reorg happened - process new version
                if (LOG.isTraceEnabled()) {
                    LOG.trace("OR statement reorganized, new statement: " + grouped);
                }
                buildOr(sb, grouped);
                return;
            }
        } catch (SqlParserException e) {
            throw new FxSqlSearchException(e);
        }
    }

    if (tables.containsKey(DatabaseConst.TBL_CONTENT_DATA)) {
        // combine content data "OR" queries into a single select
        final List<Condition> simpleConditions = getSimpleContentDataSelects(br);
        if (simpleConditions.size() > 1) {
            // avoid UNION of identically structured subqueries,
            // use "SELECT id, ver, ... FROM FX_CONTENT_DATA WHERE (cond1 or cond2 or ...) AND (filters)" instead

            final String cdSelect = simpleContentDataUnion(simpleConditions);

            // don't process conditions any further
            br.getElements().removeAll(simpleConditions);

            if (br.size() == 0) {
                // no more conditions
                sb.append(cdSelect);
                return;
            }

            // more conditions follow
            conditionStarted = true;
            sb.append('(').append(cdSelect).append("\nUNION\n");
        }
    }

    if (!conditionStarted) {
        sb.append("(");
    }

    int pos = 0;
    for (BraceElement be : br.getElements()) {
        if (pos > 0) {
            sb.append("\nUNION\n");
        }
        if (be instanceof Condition) {
            sb.append(getConditionSubQuery(br.getStatement(), (Condition) be));
        } else if (be instanceof Brace) {
            build(sb, (Brace) be);
        } else {
            throw new FxSqlSearchException(LOG, "ex.sqlSearch.filter.invalidBrace", be);
        }
        pos++;
    }
    sb.append(")");
}

From source file:com.google.devtools.build.lib.query2.SkyQueryEnvironment.java

/**
 * Returns FileValue keys for which there may be relevant (from the perspective of {@link
 * #getRBuildFiles}) FileValues in the graph corresponding to the given {@code pathFragments},
 * which are assumed to be file paths./*from   w  w w.  j av  a  2s .  com*/
 *
 * <p>To do this, we emulate the {@link ContainingPackageLookupFunction} logic: for each given
 * file path, we look for the nearest ancestor directory (starting with its parent directory), if
 * any, that has a package. The {@link PackageLookupValue} for this package tells us the package
 * root that we should use for the {@link RootedPath} for the {@link FileValue} key.
 *
 * <p>Note that there may not be nodes in the graph corresponding to the returned SkyKeys.
 */
Collection<SkyKey> getSkyKeysForFileFragments(Iterable<PathFragment> pathFragments)
        throws InterruptedException {
    Set<SkyKey> result = new HashSet<>();
    Multimap<PathFragment, PathFragment> currentToOriginal = ArrayListMultimap.create();
    for (PathFragment pathFragment : pathFragments) {
        currentToOriginal.put(pathFragment, pathFragment);
    }
    while (!currentToOriginal.isEmpty()) {
        Multimap<SkyKey, PathFragment> packageLookupKeysToOriginal = ArrayListMultimap.create();
        Multimap<SkyKey, PathFragment> packageLookupKeysToCurrent = ArrayListMultimap.create();
        for (Entry<PathFragment, PathFragment> entry : currentToOriginal.entries()) {
            PathFragment current = entry.getKey();
            PathFragment original = entry.getValue();
            for (SkyKey packageLookupKey : getPkgLookupKeysForFile(original, current)) {
                packageLookupKeysToOriginal.put(packageLookupKey, original);
                packageLookupKeysToCurrent.put(packageLookupKey, current);
            }
        }
        Map<SkyKey, SkyValue> lookupValues = graph.getSuccessfulValues(packageLookupKeysToOriginal.keySet());
        for (Map.Entry<SkyKey, SkyValue> entry : lookupValues.entrySet()) {
            SkyKey packageLookupKey = entry.getKey();
            PackageLookupValue packageLookupValue = (PackageLookupValue) entry.getValue();
            if (packageLookupValue.packageExists()) {
                Collection<PathFragment> originalFiles = packageLookupKeysToOriginal.get(packageLookupKey);
                Preconditions.checkState(!originalFiles.isEmpty(), entry);
                for (PathFragment fileName : originalFiles) {
                    result.add(FileValue.key(RootedPath.toRootedPath(packageLookupValue.getRoot(), fileName)));
                }
                for (PathFragment current : packageLookupKeysToCurrent.get(packageLookupKey)) {
                    currentToOriginal.removeAll(current);
                }
            }
        }
        Multimap<PathFragment, PathFragment> newCurrentToOriginal = ArrayListMultimap.create();
        for (PathFragment pathFragment : currentToOriginal.keySet()) {
            PathFragment parent = pathFragment.getParentDirectory();
            if (parent != null) {
                newCurrentToOriginal.putAll(parent, currentToOriginal.get(pathFragment));
            }
        }
        currentToOriginal = newCurrentToOriginal;
    }
    return result;
}

From source file:com.palantir.atlasdb.keyvalue.cassandra.CassandraKeyValueService.java

@Override
public void putWithTimestamps(String tableName, Multimap<Cell, Value> values) {
    try {//from   w  w  w.  ja  v a 2s .c  o  m
        putInternal(tableName, values.entries());
    } catch (Exception e) {
        throw Throwables.throwUncheckedException(e);
    }
}

From source file:de.hzi.helmholtz.Compare.PathwayComparisonUsingModules.java

public void pathwayComparisonGlobalBestGreedy() {
    Multimap<Integer, Multimap<Double, String>> forward = pcompare(source, target); // key: qgeneId, value: {score=tgenecombination;...}
    Multimap<Integer, Multimap<Double, String>> reverse = pcompare(target, source);

    /* Create global list of matchings ordered by score by joining forward and reverse lists
     * key: querygene -> targetgenes//from  www  . j  a v a 2 s.c om
     * value: score
     */
    TreeMultimap<Double, String> globalMap = TreeMultimap.create(Ordering.natural().reverse(),
            Ordering.natural());
    for (Map.Entry<Integer, Multimap<Double, String>> e : forward.entries()) {
        int fgene = e.getKey();
        Multimap<Double, String> geneAndScore = e.getValue();
        for (Map.Entry<Double, String> scoreEntry : geneAndScore.entries()) {
            double score = scoreEntry.getKey();
            String matchingGeneString = scoreEntry.getValue();
            String[] multipleMatchingGenes = matchingGeneString.split(",");
            for (String matchingGene : multipleMatchingGenes) {
                String newKey = fgene + "->" + matchingGene;
                globalMap.put(score, newKey);
            }
        }
    }
    for (Map.Entry<Integer, Multimap<Double, String>> e : reverse.entries()) {
        int rgene = e.getKey();
        Multimap<Double, String> geneAndScore = e.getValue();
        for (Map.Entry<Double, String> scoreEntry : geneAndScore.entries()) {
            double score = scoreEntry.getKey();
            String matchingGeneString = scoreEntry.getValue();
            String[] multipleMatchingGenes = matchingGeneString.split(",");
            for (String matchingGene : multipleMatchingGenes) {
                String newKey = matchingGene + "->" + rgene;
                globalMap.put(score, newKey);
            }
        }
    }

    // create alignment
    //  //////System.out.println(globalMap);
    bestResultMapping = new TreeMap<String, Map<String, Double>>();
    Map<String, Double> matchingInTarget;
    Set<String> queryGenesCovered = new HashSet<String>();
    Set<String> targetGenesCovered = new HashSet<String>();

    for (Map.Entry<Double, String> entry : globalMap.entries()) {
        double score = entry.getKey();
        //score=[alignment1, aligment2, ..]
        String alignment = entry.getValue();

        String bestScoreAlignment = alignment.split(",")[0];
        // start->end
        String start = bestScoreAlignment.split("->")[0];
        String end = bestScoreAlignment.split("->")[1];

        // start and end can be combination of genes
        Set<String> s = new HashSet<String>(Arrays.asList((start + "+").split("\\+")));
        Set<String> t = new HashSet<String>(Arrays.asList((end + "+").split("\\+")));

        // add to result mapping
        Set<String> sIntersection = new HashSet<String>();
        sIntersection.addAll(queryGenesCovered);
        sIntersection.retainAll(s);

        Set<String> tIntersection = new HashSet<String>();
        tIntersection.addAll(targetGenesCovered);
        tIntersection.retainAll(t);

        if (sIntersection.isEmpty() && tIntersection.isEmpty()) {
            matchingInTarget = new HashMap<String, Double>();
            matchingInTarget.put(reconstructWithGeneId(end, tgtGeneIdToPositionMap), score);
            bestResultMapping.put(reconstructWithGeneId(start, srcGeneIdToPositionMap), matchingInTarget);
            queryGenesCovered.addAll(s);
            targetGenesCovered.addAll(t);
        }
    }
    String[] SourcePathwaysID = source.getPathwayId().split("_");
    String[] TargetPathwaysID = target.getPathwayId().split("_");
    //////System.out.println(bestResultMapping);
    Double Bitscore = Calculations4BitScore(bestResultMapping);
    int m = SimpleCompareUsingModules.SizeofTargetPathwaysInDatabase;
    int n = SimpleCompareUsingModules.SizeofQueryPathway;

    Double modifiedBitscore = Bitscore / 5;
    NumberFormat formatter;
    Double value = m * n * kscore * Math.exp((-(lambda * modifiedBitscore)));
    formatter = new DecimalFormat("0.##E0");
    String eval = formatter.format(value);
    System.out.println(eval + "           " + Bitscore);
    // write result ot database for further use
    maxbitscore = Bitscore;
    // write result ot database for further use
    try {
        if (self != 1) {
            todatabase.WriteToDatabase(UniqueJobID, SourcePathwaysID[0].trim(), SourcePathwaysID[1].trim(),
                    TargetPathwaysID[0].trim(), TargetPathwaysID[1].trim(), bestResultMapping.toString(),
                    Bitscore, eval);
        }
    } catch (Exception e) {
        e.printStackTrace();
    }
    //     JOptionPane.showMessageDialog(null, bestResultMapping,new Throwable().getStackTrace()[0].getLineNumber() + " sdsdsd321321", JOptionPane.INFORMATION_MESSAGE);
    // //////System.out.println(bestResultMapping);
}

From source file:org.kiji.schema.impl.hbase.HBaseKijiTableReader.java

/**
 * Creates a new <code>HBaseKijiTableReader</code> instance that sends read requests directly to
 * HBase.//from  ww w  .j  av a 2s. c  o m
 *
 * @param table Kiji table from which to read.
 * @param onDecoderCacheMiss behavior to use when a {@link ColumnReaderSpec} override
 *     specified in a {@link KijiDataRequest} cannot be found in the prebuilt cache of cell
 *     decoders.
 * @param overrides mapping from columns to overriding read behavior for those columns.
 * @param alternatives mapping from columns to reader spec alternatives which the
 *     KijiTableReader will accept as overrides in data requests.
 * @throws IOException on I/O error.
 */
private HBaseKijiTableReader(final HBaseKijiTable table, final OnDecoderCacheMiss onDecoderCacheMiss,
        final Map<KijiColumnName, ColumnReaderSpec> overrides,
        final Multimap<KijiColumnName, ColumnReaderSpec> alternatives) throws IOException {
    mTable = table;
    mOnDecoderCacheMiss = onDecoderCacheMiss;

    final KijiTableLayout layout = mTable.getLayout();
    final Set<KijiColumnName> layoutColumns = layout.getColumnNames();
    final Map<KijiColumnName, BoundColumnReaderSpec> boundOverrides = Maps.newHashMap();
    for (Map.Entry<KijiColumnName, ColumnReaderSpec> override : overrides.entrySet()) {
        final KijiColumnName column = override.getKey();
        if (!layoutColumns.contains(column)
                && !layoutColumns.contains(KijiColumnName.create(column.getFamily()))) {
            throw new NoSuchColumnException(
                    String.format("KijiTableLayout: %s does not contain column: %s", layout, column));
        } else {
            boundOverrides.put(column, BoundColumnReaderSpec.create(override.getValue(), column));
        }
    }
    mOverrides = boundOverrides;
    final Collection<BoundColumnReaderSpec> boundAlternatives = Sets.newHashSet();
    for (Map.Entry<KijiColumnName, ColumnReaderSpec> altsEntry : alternatives.entries()) {
        final KijiColumnName column = altsEntry.getKey();
        if (!layoutColumns.contains(column)
                && !layoutColumns.contains(KijiColumnName.create(column.getFamily()))) {
            throw new NoSuchColumnException(
                    String.format("KijiTableLayout: %s does not contain column: %s", layout, column));
        } else {
            boundAlternatives.add(BoundColumnReaderSpec.create(altsEntry.getValue(), altsEntry.getKey()));
        }
    }
    mAlternatives = boundAlternatives;
    mCellSpecOverrides = null;

    mLayoutConsumerRegistration = mTable.registerLayoutConsumer(new InnerLayoutUpdater());
    Preconditions.checkState(mReaderLayoutCapsule != null,
            "KijiTableReader for table: %s failed to initialize.", mTable.getURI());

    // Retain the table only when everything succeeds.
    mTable.retain();
    final State oldState = mState.getAndSet(State.OPEN);
    Preconditions.checkState(oldState == State.UNINITIALIZED,
            "Cannot open KijiTableReader instance in state %s.", oldState);
    DebugResourceTracker.get().registerResource(this);
}

From source file:org.spongepowered.api.util.reflect.AccessorFirstStrategy.java

@Override
public ImmutableSet<? extends Property> findProperties(final Class<?> type) {
    checkNotNull(type, "type");

    final Multimap<String, Method> accessors = HashMultimap.create();
    final Multimap<String, Method> mutators = HashMultimap.create();
    final Queue<Class<?>> queue = new NonNullUniqueQueue<Class<?>>();
    final Map<String, Method> accessorHierarchyBottoms = new HashMap<String, Method>();

    queue.add(type); // Start off with our target type

    Class<?> scannedType;/*w  ww.  j  a  v  a2 s . c  o  m*/
    while ((scannedType = queue.poll()) != null) {
        for (Method method : scannedType.getMethods()) {
            String name;

            Method leastSpecificMethod;
            if ((name = getAccessorName(method)) != null
                    && ((leastSpecificMethod = accessorHierarchyBottoms.get(name)) == null
                            || leastSpecificMethod.getReturnType() != method.getReturnType())) {
                accessors.put(name, method);
                if (accessorHierarchyBottoms.get(name) == null || method.getReturnType()
                        .isAssignableFrom(accessorHierarchyBottoms.get(name).getReturnType())) {
                    accessorHierarchyBottoms.put(name, method);
                }
            } else if ((name = getMutatorName(method)) != null) {
                mutators.put(name, method);
            }
        }

        for (Class<?> implInterfaces : scannedType.getInterfaces()) {
            queue.offer(implInterfaces);
        }
        queue.offer(scannedType.getSuperclass());
    }

    final ImmutableSet.Builder<Property> result = ImmutableSet.builder();

    for (Map.Entry<String, Method> entry : accessors.entries()) {
        Method accessor = entry.getValue();
        @Nullable
        Method mutator = findMutator(entry.getValue(), mutators.get(entry.getKey()));
        result.add(new Property(entry.getKey(), accessor.getReturnType(),
                accessorHierarchyBottoms.get(entry.getKey()), accessor, mutator));
    }

    return result.build();
}

From source file:org.lanternpowered.server.world.chunk.LanternChunkManager.java

public void loadTickets() throws IOException {
    final Multimap<String, LanternLoadingTicket> tickets = LanternLoadingTicketIO.load(this.worldFolder, this,
            this.chunkLoadService);
    final Iterator<Entry<String, LanternLoadingTicket>> it = tickets.entries().iterator();
    while (it.hasNext()) {
        final LanternLoadingTicket ticket = it.next().getValue();
        if (ticket instanceof LanternEntityLoadingTicket) {
            final LanternEntityLoadingTicket ticket0 = (LanternEntityLoadingTicket) ticket;
            final EntityReference ref = ticket0.getEntityReference().orElse(null);
            if (ref != null) {
                final LanternChunk chunk = getOrCreateChunk(ref.getChunkCoords(),
                        () -> Cause.source(ticket0).owner(this.world).build(), true, true);
                final Entity entity = chunk.getEntity(ref.getUniqueId()).orElse(null);
                if (entity != null) {
                    ticket0.bindToEntity(entity);
                } else {
                    // The entity is gone?
                    it.remove();/*from   ww  w  .j a  v a  2 s . c o  m*/
                }
            } else {
                // The entity is gone?
                it.remove();
            }
        }
    }
    for (Entry<String, Collection<LanternLoadingTicket>> entry : tickets.asMap().entrySet()) {
        final Collection<ChunkTicketManager.Callback> callbacks = this.chunkLoadService.getCallbacks()
                .get(entry.getKey());

        // These maps will be loaded lazily
        ImmutableListMultimap<UUID, LoadingTicket> playerLoadedTickets = null;
        ImmutableList<LoadingTicket> nonPlayerLoadedTickets = null;

        final Set<LoadingTicket> resultPlayerLoadedTickets = entry.getValue().stream()
                .filter(ticket -> ticket instanceof PlayerLoadingTicket).collect(Collectors.toSet());
        final Set<LoadingTicket> resultNonPlayerLoadedTickets = entry.getValue().stream()
                .filter(ticket -> !(ticket instanceof PlayerLoadingTicket)).collect(Collectors.toSet());

        final int maxTickets = this.chunkLoadService.getMaxTicketsById(entry.getKey());

        for (ChunkTicketManager.Callback callback : callbacks) {
            if (callback instanceof ChunkTicketManager.OrderedCallback) {
                if (nonPlayerLoadedTickets == null) {
                    nonPlayerLoadedTickets = ImmutableList.copyOf(resultNonPlayerLoadedTickets);
                    resultNonPlayerLoadedTickets.clear();
                }
                final List<LoadingTicket> result = ((ChunkTicketManager.OrderedCallback) callback)
                        .onLoaded(nonPlayerLoadedTickets, this.world, maxTickets);
                checkNotNull(result,
                        "The OrderedCallback#onLoaded method may not return null, "
                                + "error caused by (plugin=%s, clazz=%s)",
                        entry.getKey(), callback.getClass().getName());
                resultNonPlayerLoadedTickets.addAll(result);
            }
            if (callback instanceof ChunkTicketManager.PlayerOrderedCallback) {
                if (playerLoadedTickets == null) {
                    final ImmutableListMultimap.Builder<UUID, LoadingTicket> mapBuilder = ImmutableListMultimap
                            .builder();
                    resultPlayerLoadedTickets.forEach(ticket -> mapBuilder
                            .put(((PlayerLoadingTicket) ticket).getPlayerUniqueId(), ticket));
                    resultPlayerLoadedTickets.clear();
                    playerLoadedTickets = mapBuilder.build();
                }
                final ListMultimap<UUID, LoadingTicket> result = ((ChunkTicketManager.PlayerOrderedCallback) callback)
                        .onPlayerLoaded(playerLoadedTickets, this.world);
                checkNotNull(result,
                        "The PlayerOrderedCallback#onPlayerLoaded method may not return null, "
                                + "error caused by (plugin=%s, clazz=%s)",
                        entry.getKey(), callback.getClass().getName());
                resultPlayerLoadedTickets.addAll(result.values());
            }
        }

        final List<LoadingTicket> resultLoadedTickets = new ArrayList<>();
        resultLoadedTickets.addAll(resultPlayerLoadedTickets);
        resultLoadedTickets.addAll(resultNonPlayerLoadedTickets);

        // Lets see how many plugins attempted to add loading tickets
        final int sizeA = resultLoadedTickets.size();
        resultLoadedTickets.retainAll(entry.getValue());
        final int sizeB = resultLoadedTickets.size();

        if (sizeA != sizeB) {
            Lantern.getLogger().warn(
                    "The plugin {} attempted to add LoadingTicket's that were previously not present.",
                    entry.getKey());
        }

        // Remove all the tickets that are already released
        resultLoadedTickets.removeIf(ticket -> ((ChunkLoadingTicket) ticket).isReleased());

        if (resultLoadedTickets.size() > maxTickets) {
            Lantern.getLogger().warn(
                    "The plugin {} has too many open chunk loading tickets {}. " + "Excess will be dropped",
                    entry.getKey(), resultLoadedTickets.size());
            resultLoadedTickets.subList(maxTickets, resultLoadedTickets.size()).clear();
        }

        // Release all the tickets that were no longer usable
        final List<LoadingTicket> removedTickets = new ArrayList<>(entry.getValue());
        removedTickets.removeAll(resultLoadedTickets);
        removedTickets.forEach(LoadingTicket::release);

        final ImmutableList<LoadingTicket> loadedTickets = ImmutableList.copyOf(resultLoadedTickets);
        for (ChunkTicketManager.Callback callback : callbacks) {
            callback.onLoaded(loadedTickets, this.world);
        }
    }
}

From source file:com.moz.fiji.schema.impl.hbase.HBaseFijiTableReader.java

/**
 * Creates a new <code>HBaseFijiTableReader</code> instance that sends read requests directly to
 * HBase.//  ww w  . j  av a2  s .co  m
 *
 * @param table Fiji table from which to read.
 * @param onDecoderCacheMiss behavior to use when a {@link ColumnReaderSpec} override
 *     specified in a {@link FijiDataRequest} cannot be found in the prebuilt cache of cell
 *     decoders.
 * @param overrides mapping from columns to overriding read behavior for those columns.
 * @param alternatives mapping from columns to reader spec alternatives which the
 *     FijiTableReader will accept as overrides in data requests.
 * @throws IOException on I/O error.
 */
private HBaseFijiTableReader(final HBaseFijiTable table, final OnDecoderCacheMiss onDecoderCacheMiss,
        final Map<FijiColumnName, ColumnReaderSpec> overrides,
        final Multimap<FijiColumnName, ColumnReaderSpec> alternatives) throws IOException {
    mTable = table;
    mOnDecoderCacheMiss = onDecoderCacheMiss;

    final FijiTableLayout layout = mTable.getLayout();
    final Set<FijiColumnName> layoutColumns = layout.getColumnNames();
    final Map<FijiColumnName, BoundColumnReaderSpec> boundOverrides = Maps.newHashMap();
    for (Map.Entry<FijiColumnName, ColumnReaderSpec> override : overrides.entrySet()) {
        final FijiColumnName column = override.getKey();
        if (!layoutColumns.contains(column)
                && !layoutColumns.contains(FijiColumnName.create(column.getFamily()))) {
            throw new NoSuchColumnException(
                    String.format("FijiTableLayout: %s does not contain column: %s", layout, column));
        } else {
            boundOverrides.put(column, BoundColumnReaderSpec.create(override.getValue(), column));
        }
    }
    mOverrides = boundOverrides;
    final Collection<BoundColumnReaderSpec> boundAlternatives = Sets.newHashSet();
    for (Map.Entry<FijiColumnName, ColumnReaderSpec> altsEntry : alternatives.entries()) {
        final FijiColumnName column = altsEntry.getKey();
        if (!layoutColumns.contains(column)
                && !layoutColumns.contains(FijiColumnName.create(column.getFamily()))) {
            throw new NoSuchColumnException(
                    String.format("FijiTableLayout: %s does not contain column: %s", layout, column));
        } else {
            boundAlternatives.add(BoundColumnReaderSpec.create(altsEntry.getValue(), altsEntry.getKey()));
        }
    }
    mAlternatives = boundAlternatives;
    mCellSpecOverrides = null;

    mLayoutConsumerRegistration = mTable.registerLayoutConsumer(new InnerLayoutUpdater());
    Preconditions.checkState(mReaderLayoutCapsule != null,
            "FijiTableReader for table: %s failed to initialize.", mTable.getURI());

    // Retain the table only when everything succeeds.
    mTable.retain();
    final State oldState = mState.getAndSet(State.OPEN);
    Preconditions.checkState(oldState == State.UNINITIALIZED,
            "Cannot open FijiTableReader instance in state %s.", oldState);
    ResourceTracker.get().registerResource(this);
}

From source file:com.yahoo.pulsar.broker.loadbalance.impl.SimpleLoadManagerImpl.java

private ResourceUnit getLeastLoadedBroker(ServiceUnitId serviceUnit,
        Map<Long, Set<ResourceUnit>> availableBrokers) {
    ResourceUnit selectedBroker = null;//from w ww .j  ava  2 s. c o  m
    Multimap<Long, ResourceUnit> finalCandidates = getFinalCandidates(serviceUnit, availableBrokers);
    // Remove candidates that point to inactive brokers
    Set<String> activeBrokers = Collections.emptySet();
    try {
        activeBrokers = availableActiveBrokers.get();
        // Need to use an explicit Iterator object to prevent concurrent modification exceptions
        Iterator<Map.Entry<Long, ResourceUnit>> candidateIterator = finalCandidates.entries().iterator();
        while (candidateIterator.hasNext()) {
            Map.Entry<Long, ResourceUnit> candidate = candidateIterator.next();
            String candidateBrokerName = candidate.getValue().getResourceId().replace("http://", "");
            if (!activeBrokers.contains(candidateBrokerName)) {
                candidateIterator.remove(); // Current candidate points to an inactive broker, so remove it
            }
        }
    } catch (Exception e) {
        log.warn("Error during attempt to remove inactive brokers while searching for least active broker", e);
    }

    if (finalCandidates.size() > 0) {
        if (this.getLoadBalancerPlacementStrategy().equals(LOADBALANCER_STRATEGY_LLS)) {
            selectedBroker = findBrokerForPlacement(finalCandidates, serviceUnit);
        } else {
            selectedBroker = placementStrategy.findBrokerForPlacement(finalCandidates);
        }
        log.debug("Selected : [{}] for ServiceUnit : [{}]", selectedBroker.getResourceId(),
                serviceUnit.getNamespaceObject().toString());
        return selectedBroker;
    } else {
        // No available broker found
        log.warn("No broker available to acquire service unit: [{}]", serviceUnit);
        return null;
    }
}