Example usage for com.google.common.collect ImmutableMap get

List of usage examples for com.google.common.collect ImmutableMap get

Introduction

In this page you can find the example usage for com.google.common.collect ImmutableMap get.

Prototype

V get(Object key);

Source Link

Document

Returns the value to which the specified key is mapped, or null if this map contains no mapping for the key.

Usage

From source file:com.pinterest.pinlater.backends.mysql.PinLaterMySQLBackend.java

@Override
protected List<PinLaterJobInfo> scanJobsFromShard(final String queueName, final String shardName,
        final Set<Integer> priorities, final PinLaterJobState jobState, final boolean scanFutureJobs,
        final String continuation, final int limit, final String bodyRegexTomatch) throws Exception {
    final String scanQuery = scanFutureJobs ? MySQLQueries.SCAN_FUTURE_JOBS : MySQLQueries.SCAN_CURRENT_JOBS;
    Connection conn = null;/*from  w ww  . j  a va  2 s .  c  o m*/
    try {
        ImmutableMap<String, MySQLDataSources> shardMap = shardMapRef.get();
        conn = shardMap.get(shardName).getGeneralDataSource().getConnection();

        // First scan some jobs for the specified priorities.
        List<List<PinLaterJobInfo>> jobsPerPriority = Lists.newArrayListWithCapacity(priorities.size());
        for (final int priority : priorities) {
            jobsPerPriority.add(JdbcUtils.select(conn,
                    String.format(scanQuery,
                            MySQLBackendUtils.constructJobsTableName(queueName, shardName, priority),
                            getBodyRegexClause(bodyRegexTomatch)),
                    new RowProcessor<PinLaterJobInfo>() {
                        @Override
                        public PinLaterJobInfo process(ResultSet rs) throws IOException, SQLException {
                            PinLaterJobInfo ji = new PinLaterJobInfo();
                            ji.setJobDescriptor(
                                    new PinLaterJobDescriptor(queueName, shardName, priority, rs.getLong(1))
                                            .toString());
                            String claimDescriptor = rs.getString(2);
                            if (claimDescriptor != null) {
                                ji.setClaimDescriptor(claimDescriptor);
                            }
                            ji.setAttemptsAllowed(rs.getInt(3));
                            ji.setAttemptsRemaining(rs.getInt(4));
                            ji.setCustomStatus(Strings.nullToEmpty(rs.getString(5)));
                            ji.setCreatedAtTimestampMillis(rs.getTimestamp(6).getTime());
                            ji.setRunAfterTimestampMillis(rs.getTimestamp(7).getTime());
                            ji.setUpdatedAtTimestampMillis(rs.getTimestamp(8).getTime());
                            ji.setJobState(jobState);
                            return ji;
                        }
                    }, jobState.getValue(), limit));
        }

        // Merge jobsPerPriority and return the merged result.
        return PinLaterBackendUtils.mergeIntoList(jobsPerPriority,
                PinLaterBackendUtils.JobInfoComparator.getInstance());
    } finally {
        JdbcUtils.closeConnection(conn);
    }
}

From source file:com.pinterest.pinlater.backends.mysql.PinLaterMySQLBackend.java

@Override
protected void ackSingleJob(final String queueName, boolean succeeded, PinLaterJobAckInfo jobAckInfo,
        int numAutoRetries) throws SQLException, PinLaterException {
    PinLaterJobDescriptor jobDesc = new PinLaterJobDescriptor(jobAckInfo.getJobDescriptor());
    String jobsTableName = MySQLBackendUtils.constructJobsTableName(queueName, jobDesc.getShardName(),
            jobDesc.getPriority());/*from ww w . ja va  2 s.com*/
    Connection conn = null;
    try {
        ImmutableMap<String, MySQLDataSources> shardMap = shardMapRef.get();
        conn = shardMap.get(jobDesc.getShardName()).getGeneralDataSource().getConnection();

        if (succeeded) {
            // Handle succeeded job: change state to succeeded and append custom status.
            JdbcUtils.executeUpdate(conn, String.format(MySQLQueries.ACK_SUCCEEDED_UPDATE, jobsTableName),
                    PinLaterJobState.SUCCEEDED.getValue(), jobAckInfo.getAppendCustomStatus(),
                    jobDesc.getLocalId());
        } else {
            // Handle failed job. Depending on whether the job has attempts remaining, we need
            // to either move it to pending or failed, and append custom status either way.
            // We do this by running two queries, first the 'failed done' one and then the
            // 'failed retry', so that the appropriate status change happens.
            JdbcUtils.executeUpdate(conn, String.format(MySQLQueries.ACK_FAILED_DONE_UPDATE, jobsTableName),
                    PinLaterJobState.FAILED.getValue(), jobAckInfo.getAppendCustomStatus(),
                    jobDesc.getLocalId(), PinLaterJobState.IN_PROGRESS.getValue());
            JdbcUtils.executeUpdate(conn, String.format(MySQLQueries.ACK_FAILED_RETRY_UPDATE, jobsTableName),
                    PinLaterJobState.PENDING.getValue(), jobAckInfo.getAppendCustomStatus(),
                    new Timestamp(System.currentTimeMillis() + jobAckInfo.getRetryDelayMillis()),
                    jobDesc.getLocalId(), PinLaterJobState.IN_PROGRESS.getValue());
        }
    } catch (SQLException e) {
        boolean shouldRetry = checkExceptionIsRetriable(e, jobDesc.getShardName(), "ack");
        if (shouldRetry && numAutoRetries > 0) {
            // Retry on the same shard.
            Stats.incr("ack-failures-retry");
            ackSingleJob(queueName, succeeded, jobAckInfo, numAutoRetries - 1);
            return;
        }
        // Out of retries, throw the exception. Wrap it into a PinLaterException if the exception
        // is recognized and return the appropriate error code.
        if (MySQLBackendUtils.isDatabaseDoesNotExistException(e)) {
            throw new PinLaterException(ErrorCode.QUEUE_NOT_FOUND, "Queue not found: " + queueName);
        }
        throw e;
    } finally {
        JdbcUtils.closeConnection(conn);
    }
}

From source file:com.facebook.buck.config.Config.java

public boolean equalsIgnoring(Config other, ImmutableMap<String, ImmutableSet<String>> ignoredFields) {
    if (this == other) {
        return true;
    }//from w  w  w  . j a v a 2  s .c o m
    ImmutableMap<String, ImmutableMap<String, String>> left = this.getSectionToEntries();
    ImmutableMap<String, ImmutableMap<String, String>> right = other.getSectionToEntries();
    Sets.SetView<String> sections = Sets.union(left.keySet(), right.keySet());
    for (String section : sections) {
        ImmutableMap<String, String> leftFields = left.get(section);
        ImmutableMap<String, String> rightFields = right.get(section);
        if (leftFields == null || rightFields == null) {
            return false;
        }
        Sets.SetView<String> fields = Sets.difference(Sets.union(leftFields.keySet(), rightFields.keySet()),
                Optional.ofNullable(ignoredFields.get(section)).orElse(ImmutableSet.of()));
        for (String field : fields) {
            String leftValue = leftFields.get(field);
            String rightValue = rightFields.get(field);
            if (leftValue == null || rightValue == null || !leftValue.equals(rightValue)) {
                return false;
            }
        }
    }
    return true;
}

From source file:dagger2.internal.codegen.ProducerFactoryGenerator.java

private ImmutableList<Snippet> getParameterSnippets(ProductionBinding binding,
        ImmutableMap<BindingKey, FrameworkField> fields, String listArgName) {
    int argIndex = 0;
    ImmutableList.Builder<Snippet> snippets = ImmutableList.builder();
    for (DependencyRequest dependency : binding.dependencies()) {
        if (isAsyncDependency(dependency)) {
            snippets.add(/*  ww w .java2  s  .c o m*/
                    Snippet.format("(%s) %s.get(%s)", asyncDependencyType(dependency), listArgName, argIndex));
            argIndex++;
        } else {
            snippets.add(frameworkTypeUsageStatement(Snippet.format(fields.get(dependency.bindingKey()).name()),
                    dependency.kind()));
        }
    }
    return snippets.build();
}

From source file:com.pinterest.pinlater.backends.mysql.PinLaterMySQLBackend.java

@Override
protected PinLaterJobInfo lookupJobFromShard(final String queueName, final String shardName, final int priority,
        final long localId, final boolean isIncludeBody) throws Exception {
    final String mySQLQuery = isIncludeBody ? MySQLQueries.LOOKUP_JOB_WITH_BODY : MySQLQueries.LOOKUP_JOB;
    String jobsTableName = MySQLBackendUtils.constructJobsTableName(queueName, shardName, priority);
    Connection conn = null;/*from   w w w . j  a v  a 2  s. co m*/
    ImmutableMap<String, MySQLDataSources> shardMap = shardMapRef.get();
    try {
        conn = shardMap.get(shardName).getGeneralDataSource().getConnection();
        PinLaterJobInfo jobInfo = JdbcUtils.selectOne(conn, String.format(mySQLQuery, jobsTableName),
                new RowProcessor<PinLaterJobInfo>() {
                    @Override
                    public PinLaterJobInfo process(ResultSet rs) throws IOException, SQLException {
                        PinLaterJobInfo ji = new PinLaterJobInfo();
                        ji.setJobDescriptor(
                                new PinLaterJobDescriptor(queueName, shardName, priority, rs.getLong(1))
                                        .toString());
                        ji.setJobState(PinLaterJobState.findByValue(rs.getInt(2)));
                        ji.setAttemptsAllowed(rs.getInt(3));
                        ji.setAttemptsRemaining(rs.getInt(4));
                        ji.setCreatedAtTimestampMillis(rs.getTimestamp(5).getTime());
                        ji.setRunAfterTimestampMillis(rs.getTimestamp(6).getTime());
                        ji.setUpdatedAtTimestampMillis(rs.getTimestamp(7).getTime());
                        String claimDescriptor = rs.getString(8);
                        if (claimDescriptor != null) {
                            ji.setClaimDescriptor(claimDescriptor);
                        }
                        ;
                        ji.setCustomStatus(Strings.nullToEmpty(rs.getString(9)));
                        if (isIncludeBody) {
                            ji.setBody(rs.getBytes(10));
                        }
                        return ji;
                    }
                }, localId);
        return jobInfo;
    } finally {
        JdbcUtils.closeConnection(conn);
    }
}

From source file:paperparcel.PaperParcelWriter.java

private FieldSpec initModel(final ClassName className, final UniqueNameSet readNames,
        final ImmutableMap<String, FieldSpec> fieldMap) {

    ImmutableList<FieldDescriptor> constructorFields = descriptor.constructorFields();
    CodeBlock constructorParameterList = CodeBlocks
            .join(FluentIterable.from(constructorFields).transform(new Function<FieldDescriptor, CodeBlock>() {
                @Override//from ww w.j  av a  2 s .  co  m
                public CodeBlock apply(FieldDescriptor field) {
                    return CodeBlock.of("$N", fieldMap.get(field.name()));
                }
            }), ", ");

    CodeBlock initializer;
    if (descriptor.isConstructorVisible()) {
        initializer = CodeBlock.of("new $T($L)", className, constructorParameterList);
    } else {
        // Constructor is private, init via reflection
        CodeBlock constructorArgClassList = CodeBlocks.join(
                FluentIterable.from(constructorFields).transform(new Function<FieldDescriptor, CodeBlock>() {
                    @Override
                    public CodeBlock apply(FieldDescriptor field) {
                        return CodeBlock.of("$T.class", rawTypeFrom(field.type().get()));
                    }
                }), ", ");
        initializer = CodeBlock.of("$T.init($T.class, new Class[] { $L }, new Object[] { $L })", UTILS,
                className, constructorArgClassList, constructorParameterList);
    }

    return FieldSpec.builder(className, readNames.getUniqueName("data")).initializer(initializer).build();
}

From source file:com.pinterest.pinlater.backends.mysql.PinLaterMySQLBackend.java

@Override
protected PinLaterDequeueResponse dequeueJobsFromShard(final String queueName, final String shardName,
        final int priority, String claimDescriptor, int jobsNeeded, int numAutoRetries, boolean dryRun)
        throws IOException, SQLException, PinLaterException {
    String jobsTableName = MySQLBackendUtils.constructJobsTableName(queueName, shardName, priority);
    Connection conn = null;//from   w  w w.jav  a  2s .  c o m
    PinLaterDequeueResponse shardResponse = new PinLaterDequeueResponse();
    final long currentTimeMillis = System.currentTimeMillis();

    if (!mySQLHealthMonitor.isHealthy(shardName)) {
        LOG.debug("Skipping unhealthy shard on dequeue: " + shardName);
        Stats.incr("mysql-unhealthy-shard-dequeue");
        return shardResponse;
    }

    try {
        ImmutableMap<String, MySQLDataSources> shardMap = shardMapRef.get();
        conn = shardMap.get(shardName).getGeneralDataSource().getConnection();
        RowProcessor<Tuple6<String, Integer, Integer, Timestamp, Timestamp, ByteBuffer>> dequeueRowProcessor = constructDequeueRowProcessor(
                queueName, shardName, priority);

        if (dryRun) {
            // If this is a dry run, just retrieve the relevant pending jobs.
            List<Tuple6<String, Integer, Integer, Timestamp, Timestamp, ByteBuffer>> resultRows = JdbcUtils
                    .select(conn, String.format(MySQLQueries.DEQUEUE_DRY_RUN_SELECT, jobsTableName),
                            dequeueRowProcessor, PinLaterJobState.PENDING.getValue(), jobsNeeded);
            shardResponse = convertResultsIntoDequeueResponse(resultRows);
        } else {
            // If not a dry run, then we'll want to actually update the job state and claimDescriptor.
            int rowsUpdated = JdbcUtils.executeUpdate(conn,
                    String.format(MySQLQueries.DEQUEUE_UPDATE, jobsTableName), claimDescriptor,
                    PinLaterJobState.IN_PROGRESS.getValue(), PinLaterJobState.PENDING.getValue(), jobsNeeded);

            if (rowsUpdated > 0) {
                List<Tuple6<String, Integer, Integer, Timestamp, Timestamp, ByteBuffer>> resultRows = JdbcUtils
                        .select(conn, String.format(MySQLQueries.DEQUEUE_SELECT, jobsTableName),
                                dequeueRowProcessor, claimDescriptor);
                for (Tuple6<String, Integer, Integer, Timestamp, Timestamp, ByteBuffer> tuple : resultRows) {
                    int attemptsAllowed = tuple._2();
                    int attemptsRemaining = tuple._3();
                    long updatedAtMillis = tuple._4().getTime();
                    long createdAtMillis = tuple._5().getTime();
                    if (attemptsAllowed == attemptsRemaining) {
                        Stats.addMetric(String.format("%s_first_dequeue_delay_ms", queueName),
                                (int) (currentTimeMillis - createdAtMillis));
                    }
                    Stats.addMetric(String.format("%s_dequeue_delay_ms", queueName),
                            (int) (currentTimeMillis - updatedAtMillis));
                }
                shardResponse = convertResultsIntoDequeueResponse(resultRows);
            }
        }

        mySQLHealthMonitor.recordSample(shardName, true);
    } catch (SQLException e) {
        mySQLHealthMonitor.recordSample(shardName, false);
        boolean shouldRetry = checkExceptionIsRetriable(e, shardName, "dequeue");
        if (shouldRetry && numAutoRetries > 0) {
            // Retry on the same shard.
            Stats.incr("dequeue-failures-retry");
            return dequeueJobsFromShard(queueName, shardName, priority, claimDescriptor, jobsNeeded,
                    numAutoRetries - 1, dryRun);
        }
        // Out of retries, throw the exception. Wrap it into a PinLaterException if the exception
        // is recognized and return the appropriate error code.
        if (MySQLBackendUtils.isDatabaseDoesNotExistException(e)) {
            throw new PinLaterException(ErrorCode.QUEUE_NOT_FOUND, "Queue not found: " + queueName);
        }
        throw e;
    } finally {
        JdbcUtils.closeConnection(conn);
    }

    return shardResponse;
}

From source file:grakn.core.graql.gremlin.TraversalPlanner.java

/**
 * Create a plan using Edmonds' algorithm with greedy approach to execute a single conjunction
 *
 * @param query the conjunction query to find a traversal plan
 * @return a semi-optimal traversal plan to execute the given conjunction
 *///from  www  .  jav  a2s . c o m
private static List<Fragment> planForConjunction(ConjunctionQuery query, TransactionOLTP tx) {
    // a query plan is an ordered list of fragments
    final List<Fragment> plan = new ArrayList<>();

    // flatten all the possible fragments from the conjunction query (these become edges in the query graph)
    final Set<Fragment> allFragments = query.getEquivalentFragmentSets().stream()
            .flatMap(EquivalentFragmentSet::stream).collect(Collectors.toSet());

    // if role players' types are known, we can infer the types of the relation, adding label & isa fragments
    Set<Fragment> inferredFragments = inferRelationTypes(tx, allFragments);
    allFragments.addAll(inferredFragments);

    // convert fragments into nodes - some fragments create virtual middle nodes to ensure the Janus edge is traversed
    ImmutableMap<NodeId, Node> queryGraphNodes = buildNodesWithDependencies(allFragments);

    // it's possible that some (or all) fragments are disconnected, e.g. $x isa person; $y isa dog;
    Collection<Set<Fragment>> connectedFragmentSets = getConnectedFragmentSets(allFragments);

    // build a query plan for each query subgraph separately
    for (Set<Fragment> connectedFragments : connectedFragmentSets) {
        // one of two cases - either we have a connected graph > 1 node, which is used to compute a MST, OR exactly 1 node
        Arborescence<Node> subgraphArborescence = computeArborescence(connectedFragments, queryGraphNodes, tx);
        if (subgraphArborescence != null) {
            // collect the mapping from directed edge back to fragments -- inverse operation of creating virtual middle nodes
            Map<Node, Map<Node, Fragment>> middleNodeFragmentMapping = virtualMiddleNodeToFragmentMapping(
                    connectedFragments, queryGraphNodes);
            List<Fragment> subplan = GreedyTreeTraversal.greedyTraversal(subgraphArborescence, queryGraphNodes,
                    middleNodeFragmentMapping);
            plan.addAll(subplan);
        } else {
            // find and include all the nodes not touched in the MST in the plan
            Set<Node> unhandledNodes = connectedFragments.stream()
                    .flatMap(fragment -> fragment.getNodes().stream())
                    .map(node -> queryGraphNodes.get(node.getNodeId())).collect(Collectors.toSet());
            if (unhandledNodes.size() != 1) {
                throw GraknServerException
                        .create("Query planner exception - expected one unhandled node, found "
                                + unhandledNodes.size());
            }
            plan.addAll(nodeVisitedDependenciesFragments(Iterators.getOnlyElement(unhandledNodes.iterator()),
                    queryGraphNodes));
        }
    }

    // this shouldn't be necessary, but we keep it just in case of an edge case that we haven't thought of
    List<Fragment> remainingFragments = fragmentsForUnvisitedNodes(queryGraphNodes, queryGraphNodes.values());
    if (remainingFragments.size() > 0) {
        LOG.warn("Expected all fragments to be handled, but found these: " + remainingFragments);
        plan.addAll(remainingFragments);
    }

    LOG.trace("Greedy Plan = {}", plan);
    return plan;
}

From source file:edu.mit.streamjit.impl.compiler2.WorkerActor.java

/**
 * Sets up Actor connections based on the worker's predecessor/successor
 * relationships, creating TokenActors and Storages as required.  This
 * method depends on all Storage objects initially being single-input,
 * single-output, and all Tokens being single-input, single-output (which
 * they should always be by their nature).
 * @param workers an immutable map of Workers to their Actors; workers not
 * in the map are not in this blob/*from ww w . j  a va  2 s .co m*/
 * @param tokens a map of Tokens to their Actors, being constructed as we go
 * @param storage a table of (upstream, downstream) Actor to the Storage
 * that connects them, being constructed as we go
 * @param inputTokenId an int-by-value containing the next input TokenActor id, to be
 * incremented after use
 * @param outputTokenId an int-by-value containing the next output TokenActor id, to be
 * decremented after use
 */
public void connect(ImmutableMap<Worker<?, ?>, WorkerActor> workers, Map<Token, TokenActor> tokens,
        Table<Actor, Actor, Storage> storage, int[] inputTokenId, int[] outputTokenId) {
    List<? extends Worker<?, ?>> predecessors = Workers.getPredecessors(worker);
    if (predecessors.isEmpty()) {
        Blob.Token t = Blob.Token.createOverallInputToken(worker);
        TokenActor ta = new TokenActor(t, inputTokenId[0]++);
        tokens.put(t, ta);
        Storage s = new Storage(ta, this);
        inputs().add(s);
        ta.outputs().add(s);
        storage.put(ta, this, s);
    }
    for (Worker<?, ?> w : predecessors) {
        Actor pred = workers.get(w);
        if (pred == null) {
            Token t = new Blob.Token(w, worker());
            pred = new TokenActor(t, inputTokenId[0]++);
            tokens.put(t, (TokenActor) pred);
        }
        Storage s = storage.get(pred, this);
        if (s == null) {
            s = new Storage(pred, this);
            storage.put(pred, this, s);
        }
        inputs().add(s);
        if (pred instanceof TokenActor)
            pred.outputs().add(s);
    }

    List<? extends Worker<?, ?>> successors = Workers.getSuccessors(worker);
    if (successors.isEmpty()) {
        Blob.Token t = Blob.Token.createOverallOutputToken(worker);
        TokenActor ta = new TokenActor(t, outputTokenId[0]--);
        tokens.put(t, ta);
        Storage s = new Storage(this, ta);
        outputs().add(s);
        ta.inputs().add(s);
        storage.put(this, ta, s);
    }
    for (Worker<?, ?> w : successors) {
        Actor succ = workers.get(w);
        if (succ == null) {
            Token t = new Blob.Token(worker(), w);
            succ = new TokenActor(t, outputTokenId[0]--);
            tokens.put(t, (TokenActor) succ);
        }
        Storage s = storage.get(this, succ);
        if (s == null) {
            s = new Storage(this, succ);
            storage.put(this, succ, s);
        }
        outputs().add(s);
        if (succ instanceof TokenActor)
            succ.inputs().add(s);
    }

    inputIndexFunctions().addAll(Collections.nCopies(inputs().size(), IndexFunction.identity()));
    outputIndexFunctions().addAll(Collections.nCopies(outputs().size(), IndexFunction.identity()));
}

From source file:com.facebook.buck.core.cell.AbstractCellConfig.java

/**
 * Translates the 'cell name'->override map into a 'Path'->override map.
 *
 * @param pathMapping a map containing paths to all of the cells we want to query.
 * @return 'Path'->override map/*from w  ww . j  a  v a 2s  .  c  o m*/
 */
public ImmutableMap<Path, RawConfig> getOverridesByPath(ImmutableMap<CellName, Path> pathMapping)
        throws InvalidCellOverrideException {

    ImmutableSet<CellName> relativeNamesOfCellsWithOverrides = FluentIterable.from(getValues().keySet())
            .filter(Predicates.not(CellName.ALL_CELLS_SPECIAL_NAME::equals)).toSet();
    ImmutableSet.Builder<Path> pathsWithOverrides = ImmutableSet.builder();
    for (CellName cellWithOverride : relativeNamesOfCellsWithOverrides) {
        if (!pathMapping.containsKey(cellWithOverride)) {
            throw new InvalidCellOverrideException(
                    String.format("Trying to override settings for unknown cell %s", cellWithOverride));
        }
        pathsWithOverrides.add(pathMapping.get(cellWithOverride));
    }

    ImmutableMultimap<Path, CellName> pathToRelativeName = Multimaps.index(pathMapping.keySet(),
            Functions.forMap(pathMapping));

    for (Path pathWithOverrides : pathsWithOverrides.build()) {
        ImmutableList<CellName> namesForPath = RichStream.from(pathToRelativeName.get(pathWithOverrides))
                .filter(name -> name.getLegacyName().isPresent()).toImmutableList();
        if (namesForPath.size() > 1) {
            throw new InvalidCellOverrideException(
                    String.format("Configuration override is ambiguous: cell rooted at %s is reachable "
                            + "as [%s]. Please override the config by placing a .buckconfig.local file in the "
                            + "cell's root folder.", pathWithOverrides, Joiner.on(',').join(namesForPath)));
        }
    }

    Map<Path, RawConfig> overridesByPath = new HashMap<>();
    for (Map.Entry<CellName, Path> entry : pathMapping.entrySet()) {
        CellName cellRelativeName = entry.getKey();
        Path cellPath = entry.getValue();
        RawConfig configFromOtherRelativeName = overridesByPath.get(cellPath);
        RawConfig config = getForCell(cellRelativeName);
        if (configFromOtherRelativeName != null) {
            // Merge configs
            RawConfig mergedConfig = RawConfig.builder().putAll(configFromOtherRelativeName).putAll(config)
                    .build();
            overridesByPath.put(cellPath, mergedConfig);
        } else {
            overridesByPath.put(cellPath, config);
        }
    }

    return ImmutableMap.copyOf(overridesByPath);
}