Example usage for com.google.common.collect Maps filterKeys

List of usage examples for com.google.common.collect Maps filterKeys

Introduction

In this page you can find the example usage for com.google.common.collect Maps filterKeys.

Prototype

@CheckReturnValue
public static <K, V> BiMap<K, V> filterKeys(BiMap<K, V> unfiltered, final Predicate<? super K> keyPredicate) 

Source Link

Document

Returns a bimap containing the mappings in unfiltered whose keys satisfy a predicate.

Usage

From source file:com.facebook.buck.cxx.CxxLinkableEnhancer.java

/**
 * Construct a {@link CxxLink} rule that builds a native linkable from top-level input objects
 * and a dependency tree of {@link NativeLinkable} dependencies.
 *
 * @param params base params used to build the rule.  Target and deps will be overridden.
 * @param nativeLinkableDeps library dependencies that the linkable links in
 * @param immediateLinkableInput framework and libraries of the linkable itself
 *///from   w  w  w .j ava  2 s.  c om
public static CxxLink createCxxLinkableBuildRule(CxxBuckConfig cxxBuckConfig, CxxPlatform cxxPlatform,
        BuildRuleParams params, BuildRuleResolver ruleResolver, final SourcePathResolver resolver,
        SourcePathRuleFinder ruleFinder, BuildTarget target, Linker.LinkType linkType, Optional<String> soname,
        Path output, Linker.LinkableDepType depType, Iterable<? extends NativeLinkable> nativeLinkableDeps,
        Optional<Linker.CxxRuntimeType> cxxRuntimeType, Optional<SourcePath> bundleLoader,
        ImmutableSet<BuildTarget> blacklist, NativeLinkableInput immediateLinkableInput)
        throws NoSuchBuildTargetException {

    // Soname should only ever be set when linking a "shared" library.
    Preconditions.checkState(!soname.isPresent() || SONAME_REQUIRED_LINK_TYPES.contains(linkType));

    // Bundle loaders are only supported for Mach-O bundle libraries
    Preconditions.checkState(!bundleLoader.isPresent() || linkType == Linker.LinkType.MACH_O_BUNDLE);

    // Collect and topologically sort our deps that contribute to the link.
    ImmutableList.Builder<NativeLinkableInput> nativeLinkableInputs = ImmutableList.builder();
    nativeLinkableInputs.add(immediateLinkableInput);
    for (NativeLinkable nativeLinkable : Maps
            .filterKeys(NativeLinkables.getNativeLinkables(cxxPlatform, nativeLinkableDeps, depType),
                    Predicates.not(blacklist::contains))
            .values()) {
        NativeLinkableInput input = NativeLinkables.getNativeLinkableInput(cxxPlatform, depType,
                nativeLinkable);
        LOG.verbose("Native linkable %s returned input %s", nativeLinkable, input);
        nativeLinkableInputs.add(input);
    }
    NativeLinkableInput linkableInput = NativeLinkableInput.concat(nativeLinkableInputs.build());

    // Build up the arguments to pass to the linker.
    ImmutableList.Builder<Arg> argsBuilder = ImmutableList.builder();

    // If we're doing a shared build, pass the necessary flags to the linker, including setting
    // the soname.
    if (linkType == Linker.LinkType.SHARED) {
        argsBuilder.addAll(cxxPlatform.getLd().resolve(ruleResolver).getSharedLibFlag());
    } else if (linkType == Linker.LinkType.MACH_O_BUNDLE) {
        argsBuilder.add(new StringArg("-bundle"));
        // It's possible to build a Mach-O bundle without a bundle loader (logic tests, for example).
        if (bundleLoader.isPresent()) {
            argsBuilder.add(new StringArg("-bundle_loader"), new SourcePathArg(resolver, bundleLoader.get()));
        }
    }
    if (soname.isPresent()) {
        argsBuilder.addAll(StringArg.from(cxxPlatform.getLd().resolve(ruleResolver).soname(soname.get())));
    }

    // Add all arguments from our dependencies.
    argsBuilder.addAll(linkableInput.getArgs());

    // Add all shared libraries
    addSharedLibrariesLinkerArgs(cxxPlatform, resolver, ImmutableSortedSet.copyOf(linkableInput.getLibraries()),
            argsBuilder);

    // Add framework args
    addFrameworkLinkerArgs(cxxPlatform, resolver, ImmutableSortedSet.copyOf(linkableInput.getFrameworks()),
            argsBuilder);

    final ImmutableList<Arg> allArgs = argsBuilder.build();

    return createCxxLinkableBuildRule(cxxBuckConfig, cxxPlatform, params, ruleResolver, resolver, ruleFinder,
            target, output, allArgs, depType, cxxRuntimeType);
}

From source file:gobblin.broker.SharedResourcesBrokerImpl.java

NonExtendableBrokerView<S> getScopedView(final S scope) throws NoSuchScopeException {
    return new NonExtendableBrokerView<>(this.brokerCache, getWrappedScope(scope), this.scopedConfigs,
            Maps.filterKeys(this.ancestorScopesByType, new Predicate<S>() {
                @Override/*from   w w  w  . j a  v a  2s  .c  om*/
                public boolean apply(@Nullable S input) {
                    return SharedResourcesBrokerUtils.isScopeTypeAncestor(scope, input);
                }
            }));
}

From source file:com.google.jenkins.flakyTestHandler.plugin.HistoryAggregatedFlakyTestResultAction.java

/**
 * Aggregate flaky runs one previous build and put results into a map between test name and
 * its map between scm revisions and aggregated flaky stats for that revision
 *
 * @param build the build to be aggregated
 *///from ww w  . ja v a 2s .  c  om
public void aggregateOneBuild(AbstractBuild<?, ?> build) {
    FlakyTestResultAction action = build.getAction(FlakyTestResultAction.class);
    if (action == null) {
        return;
    }

    FlakyRunStats runStats = action.getFlakyRunStats();

    if (runStats == null) {
        return;
    }

    Map<String, SingleTestFlakyStatsWithRevision> testFlakyStatsMap = runStats
            .getTestFlakyStatsWithRevisionMap();

    if (testFlakyStatsMap == null) {
        // Skip old build which doesn't have the map
        return;
    }

    if (build.getCause(DeflakeCause.class) == null) {
        // This is a non-deflake build, update allTests
        allTests = testFlakyStatsMap.keySet();
    }

    for (Map.Entry<String, SingleTestFlakyStatsWithRevision> testFlakyStat : testFlakyStatsMap.entrySet()) {
        String testName = testFlakyStat.getKey();
        String revision = testFlakyStat.getValue().getRevision();
        SingleTestFlakyStats stats = testFlakyStat.getValue().getStats();

        if (aggregatedTestFlakyStatsWithRevision.containsKey(testName)) {
            Map<String, SingleTestFlakyStats> testFlakyStatMap = aggregatedTestFlakyStatsWithRevision
                    .get(testName);

            if (testFlakyStatMap.containsKey(revision)) {
                // Merge flaky stats with the same test and the same revision
                testFlakyStatMap.get(revision).merge(stats);
            } else {
                // First specific revision flaky stat for a given test
                testFlakyStatMap.put(revision, new SingleTestFlakyStats(stats));
            }
        } else {
            // The first test entry
            Map<String, SingleTestFlakyStats> testFlakyStatMap = new LinkedHashMap<String, SingleTestFlakyStats>();
            testFlakyStatMap.put(revision, new SingleTestFlakyStats(stats));
            aggregatedTestFlakyStatsWithRevision.put(testName, testFlakyStatMap);

        }
    }

    aggregatedFlakyStats = Maps.filterKeys(
            Maps.transformValues(aggregatedTestFlakyStatsWithRevision, REVISION_STATS_MAP_TO_AGGREGATED_STATS),
            Predicates.in(allTests));
}

From source file:com.facebook.presto.cassandra.CassandraSplitManager.java

@Override
public ConnectorPartitionResult getPartitions(ConnectorSession session, ConnectorTableHandle tableHandle,
        TupleDomain<ColumnHandle> tupleDomain) {
    CassandraTableHandle cassandraTableHandle = checkType(tableHandle, CassandraTableHandle.class,
            "tableHandle");
    requireNonNull(tupleDomain, "tupleDomain is null");
    CassandraTable table = schemaProvider.getTable(cassandraTableHandle);
    List<CassandraColumnHandle> partitionKeys = table.getPartitionKeyColumns();

    // fetch the partitions
    List<CassandraPartition> allPartitions = getCassandraPartitions(table, tupleDomain);
    log.debug("%s.%s #partitions: %d", cassandraTableHandle.getSchemaName(),
            cassandraTableHandle.getTableName(), allPartitions.size());

    // do a final pass to filter based on fields that could not be used to build the prefix
    List<ConnectorPartition> partitions = allPartitions.stream()
            .filter(partition -> tupleDomain.overlaps(partition.getTupleDomain())).collect(toList());

    // All partition key domains will be fully evaluated, so we don't need to include those
    TupleDomain<ColumnHandle> remainingTupleDomain = TupleDomain.none();
    if (!tupleDomain.isNone()) {
        if (partitions.size() == 1 && ((CassandraPartition) partitions.get(0)).isUnpartitioned()) {
            remainingTupleDomain = tupleDomain;
        } else {//w w w. ja  va 2  s.com
            @SuppressWarnings({ "rawtypes", "unchecked" })
            List<ColumnHandle> partitionColumns = (List) partitionKeys;
            remainingTupleDomain = TupleDomain.withColumnDomains(
                    Maps.filterKeys(tupleDomain.getDomains().get(), not(in(partitionColumns))));
        }
    }

    // push down indexed column fixed value predicates only for unpartitioned partition which uses token range query
    if (partitions.size() == 1 && ((CassandraPartition) partitions.get(0)).isUnpartitioned()) {
        Map<ColumnHandle, Domain> domains = tupleDomain.getDomains().get();
        List<ColumnHandle> indexedColumns = new ArrayList<>();
        // compose partitionId by using indexed column
        StringBuilder sb = new StringBuilder();
        for (Map.Entry<ColumnHandle, Domain> entry : domains.entrySet()) {
            CassandraColumnHandle column = (CassandraColumnHandle) entry.getKey();
            Domain domain = entry.getValue();
            if (column.isIndexed() && domain.isSingleValue()) {
                sb.append(CassandraCqlUtils.validColumnName(column.getName())).append(" = ")
                        .append(CassandraCqlUtils.cqlValue(
                                toCQLCompatibleString(entry.getValue().getSingleValue()),
                                column.getCassandraType()));
                indexedColumns.add(column);
                // Only one indexed column predicate can be pushed down.
                break;
            }
        }
        if (sb.length() > 0) {
            CassandraPartition partition = (CassandraPartition) partitions.get(0);
            TupleDomain<ColumnHandle> filterIndexedColumn = TupleDomain.withColumnDomains(
                    Maps.filterKeys(remainingTupleDomain.getDomains().get(), not(in(indexedColumns))));
            partitions = new ArrayList<>();
            partitions
                    .add(new CassandraPartition(partition.getKey(), sb.toString(), filterIndexedColumn, true));
            return new ConnectorPartitionResult(partitions, filterIndexedColumn);
        }
    }
    return new ConnectorPartitionResult(partitions, remainingTupleDomain);
}

From source file:com.facebook.presto.orc.StripeReader.java

public Stripe readStripe(StripeInformation stripe) throws IOException {
    // read the stripe footer
    StripeFooter stripeFooter = readStripeFooter(stripe);
    List<ColumnEncoding> columnEncodings = stripeFooter.getColumnEncodings();

    // get streams for selected columns
    Map<StreamId, Stream> streams = new HashMap<>();
    boolean hasRowGroupDictionary = false;
    for (Stream stream : stripeFooter.getStreams()) {
        if (includedOrcColumns.contains(stream.getColumn())) {
            streams.put(new StreamId(stream), stream);

            ColumnEncodingKind columnEncoding = columnEncodings.get(stream.getColumn()).getColumnEncodingKind();
            if (columnEncoding == DICTIONARY && stream.getStreamKind() == StreamKind.IN_DICTIONARY) {
                hasRowGroupDictionary = true;
            }/*from  w w w .  j  av a 2  s .c o  m*/
        }
    }

    // handle stripes with more than one row group or a dictionary
    if ((stripe.getNumberOfRows() > rowsInRowGroup) || hasRowGroupDictionary) {
        // determine ranges of the stripe to read
        Map<StreamId, DiskRange> diskRanges = getDiskRanges(stripeFooter.getStreams());
        diskRanges = Maps.filterKeys(diskRanges, Predicates.in(streams.keySet()));

        // read the file regions
        Map<StreamId, OrcInputStream> streamsData = readDiskRanges(stripe.getOffset(), diskRanges);

        // read the row index for each column
        Map<Integer, List<RowGroupIndex>> columnIndexes = readColumnIndexes(streams, streamsData);

        // select the row groups matching the tuple domain
        Set<Integer> selectedRowGroups = selectRowGroups(stripe, columnIndexes);

        // if all row groups are skipped, return null
        if (selectedRowGroups.isEmpty()) {
            return null;
        }

        // value streams
        Map<StreamId, ValueStream<?>> valueStreams = createValueStreams(streams, streamsData, columnEncodings);

        // build the dictionary streams
        StreamSources dictionaryStreamSources = createDictionaryStreamSources(streams, valueStreams,
                columnEncodings);

        // build the row groups
        try {
            List<RowGroup> rowGroups = createRowGroups(stripe.getNumberOfRows(), streams, valueStreams,
                    columnIndexes, selectedRowGroups, columnEncodings);

            return new Stripe(stripe.getNumberOfRows(), columnEncodings, rowGroups, dictionaryStreamSources);
        } catch (InvalidCheckpointException e) {
            // The ORC file contains a corrupt checkpoint stream
            // If the file does not have a row group dictionary, treat the stripe as a single row group. Otherwise,
            // we must fail because the length of the row group dictionary is contained in the checkpoint stream.
            if (hasRowGroupDictionary) {
                throw new OrcCorruptionException(e, "ORC file %s has corrupt checkpoints", orcDataSource);
            }
        }
    }

    // stripe only has one row group and no dictionary
    ImmutableMap.Builder<StreamId, DiskRange> diskRangesBuilder = ImmutableMap.builder();
    for (Entry<StreamId, DiskRange> entry : getDiskRanges(stripeFooter.getStreams()).entrySet()) {
        StreamId streamId = entry.getKey();
        if (streamId.getStreamKind() != ROW_INDEX && streams.keySet().contains(streamId)) {
            diskRangesBuilder.put(entry);
        }
    }
    ImmutableMap<StreamId, DiskRange> diskRanges = diskRangesBuilder.build();

    // read the file regions
    Map<StreamId, OrcInputStream> streamsData = readDiskRanges(stripe.getOffset(), diskRanges);

    // value streams
    Map<StreamId, ValueStream<?>> valueStreams = createValueStreams(streams, streamsData, columnEncodings);

    // build the dictionary streams
    StreamSources dictionaryStreamSources = createDictionaryStreamSources(streams, valueStreams,
            columnEncodings);

    // build the row group
    ImmutableMap.Builder<StreamId, StreamSource<?>> builder = ImmutableMap.builder();
    for (Entry<StreamId, ValueStream<?>> entry : valueStreams.entrySet()) {
        builder.put(entry.getKey(), new ValueStreamSource<>(entry.getValue()));
    }
    RowGroup rowGroup = new RowGroup(0, 0, stripe.getNumberOfRows(), new StreamSources(builder.build()));

    return new Stripe(stripe.getNumberOfRows(), columnEncodings, ImmutableList.of(rowGroup),
            dictionaryStreamSources);
}

From source file:org.apache.aurora.scheduler.updater.JobDiff.java

/**
 * Calculates the diff necessary to change the current state of a job to the proposed state.
 *
 * @param taskStore Store to fetch the job's current state from.
 * @param job Job being diffed.//from  w w w.  j  a v  a 2  s .co m
 * @param proposedState Proposed state to move the job towards.
 * @param scope Instances to limit the diff to.
 * @return A diff of the current state compared with {@code proposedState}, within {@code scope}.
 */
public static JobDiff compute(TaskStore taskStore, IJobKey job, Map<Integer, ITaskConfig> proposedState,
        Set<IRange> scope) {

    Map<Integer, ITaskConfig> currentState = ImmutableMap.copyOf(Maps.transformValues(
            Maps.uniqueIndex(taskStore.fetchTasks(Query.jobScoped(job).active()), Tasks::getInstanceId),
            Tasks::getConfig));

    JobDiff diff = computeUnscoped(currentState, job, proposedState);
    if (scope.isEmpty()) {
        return diff;
    } else {
        Set<Integer> limit = Numbers.rangesToInstanceIds(scope);
        Map<Integer, ITaskConfig> replaced = ImmutableMap
                .copyOf(Maps.filterKeys(diff.getReplacedInstances(), Predicates.in(limit)));
        Set<Integer> replacements = ImmutableSet
                .copyOf(Sets.intersection(diff.getReplacementInstances(), limit));

        Set<Integer> unchangedIds = ImmutableSet.copyOf(Sets.difference(
                ImmutableSet.copyOf(Sets.difference(currentState.keySet(), replaced.keySet())), replacements));
        Map<Integer, ITaskConfig> unchanged = ImmutableMap
                .copyOf(Maps.filterKeys(currentState, Predicates.in(unchangedIds)));

        return new JobDiff(replaced, replacements, unchanged);
    }
}

From source file:iterator.util.Config.java

public void load(Map<String, String> data) {
    putAll(Maps.filterKeys(data, EXPLORER_KEYS));
}

From source file:controllers.ModuleController.java

private static List<ModuleModel> getNextModules(String input) {
    // get all the supplied view models.
    List<ViewModel> suppliedViewModels = Lists.newArrayList();
    JsonNode inputJson = Json.parse(input);

    // convert json nodes to view models.
    if (inputJson != null && inputJson.isArray()) {
        suppliedViewModels = Lists// ww w . ja  v a  2  s .co m
                .newArrayList(Iterators.transform(inputJson.getElements(), new Function<JsonNode, ViewModel>() {
                    @Override
                    @Nullable
                    public ViewModel apply(@Nullable JsonNode input) {
                        if (!input.isTextual()) {
                            return null;
                        }
                        return createViewModelQuietly(
                                fetchResource(UuidUtils.create(input.asText()), PersistentObject.class), null);

                    }
                }));
    } else if (inputJson != null && inputJson.isObject()) {
        suppliedViewModels.add(createViewModelQuietly(inputJson, null));
    }

    suppliedViewModels = Lists.newArrayList(Iterables.filter(suppliedViewModels, Predicates.notNull()));

    // get all the modules that can use these inputs.
    Map<Module, Double> nullModulesMap = Maps.newHashMap();
    Map<Module, Double> modulesMap = Maps.newHashMap();
    Reflections reflections = new Reflections("controllers.modules", Play.application().classloader());
    for (Class<? extends Module> moduleClass : reflections.getSubTypesOf(Module.class)) {
        // we're not interested in abstract classes.
        if (Modifier.isAbstract(moduleClass.getModifiers())) {
            continue;
        }

        // get the Module.Requires/Requireses annotation for each module class.
        // the requirements within each Module.Require are ANDed.
        // the requirements across multiple Module.Require annotations are ORed.
        List<Module.Requires> requireds = Lists.newArrayList();
        if (moduleClass.isAnnotationPresent(Module.Requires.class)) {
            requireds.add(moduleClass.getAnnotation(Module.Requires.class));
        }
        if (moduleClass.isAnnotationPresent(Module.Requireses.class)) {
            Collections.addAll(requireds, moduleClass.getAnnotation(Module.Requireses.class).value());
        }

        if (requireds.size() == 0) {
            requireds.add(null);
        }

        for (Module.Requires required : requireds) {
            final Set<Class<? extends ViewModel>> requiredViewModelClasses = Sets.newHashSet();
            if (required != null) {
                Collections.addAll(requiredViewModelClasses, required.value());
            }

            // get all the supplied view modules that are relevant to this module.
            List<ViewModel> usefulViewModels = Lists
                    .newArrayList(Iterables.filter(suppliedViewModels, new Predicate<ViewModel>() {
                        @Override
                        public boolean apply(@Nullable ViewModel input) {
                            // if this class is required, then return true.
                            if (requiredViewModelClasses.contains(input.getClass())) {
                                return true;
                            }

                            // if any of its super classes are required, that also works.
                            for (Class<?> superClass : ClassUtils.getAllSuperclasses(input.getClass())) {
                                if (requiredViewModelClasses.contains(superClass)) {
                                    return true;
                                }
                            }

                            return false;
                        }
                    }));

            // if all the requirements were satisfied.
            if (usefulViewModels.size() >= requiredViewModelClasses.size()) {
                // try to create an instance of the module.
                Module module = null;
                try {
                    module = moduleClass.newInstance();
                    module.setViewModels(usefulViewModels);
                } catch (InstantiationException | IllegalAccessException | IllegalArgumentException e) {
                    module = null;
                } finally {
                    // if no module was created, just ignore.
                    if (module == null) {
                        continue;
                    }
                }

                // let's not divide by zero!
                double relevancyScore = suppliedViewModels.size() != 0
                        ? usefulViewModels.size() / (double) suppliedViewModels.size()
                        : 1.0;

                // keep null modules separate.
                Map<Module, Double> targetModulesMap = null;
                if (requiredViewModelClasses.size() > 0) {
                    // if a module of this type does not exist, add it.
                    if (Maps.filterKeys(modulesMap, Predicates.instanceOf(moduleClass)).size() == 0) {
                        targetModulesMap = modulesMap;
                    }
                } else {
                    targetModulesMap = nullModulesMap;
                }
                if (targetModulesMap != null) {
                    targetModulesMap.put(module, relevancyScore);
                }
            }
        }
    }

    // use null modules only if there are no regular ones.
    if (modulesMap.size() == 0) {
        modulesMap = nullModulesMap;
    }

    // convert to view models.
    Set<ModuleModel> moduleViewModels = Sets.newHashSet(
            Iterables.transform(modulesMap.entrySet(), new Function<Entry<Module, Double>, ModuleModel>() {
                @Override
                @Nullable
                public ModuleModel apply(@Nullable Entry<Module, Double> input) {
                    return new ModuleModel(input.getKey()).setRelevancyScore(input.getValue());
                }
            }));

    // order first by relevance and then by name.
    return Ordering.from(new Comparator<ModuleModel>() {
        @Override
        public int compare(ModuleModel o1, ModuleModel o2) {
            int relDiff = (int) Math.round((o2.relevancyScore - o1.relevancyScore) * 1000);
            if (relDiff == 0) {
                return o1.name.compareTo(o2.name);
            }

            return relDiff;
        }
    }).sortedCopy(moduleViewModels);
}

From source file:io.fabric8.process.manager.support.ProcessUtils.java

public static Map<String, String> getProcessLayout(Profile profile, String layoutPath) {
    return ByteToStringValues.INSTANCE.apply(
            Maps.filterKeys(profile.getOverlay().getFileConfigurations(), new LayOutPredicate(layoutPath)));
}

From source file:com.facebook.presto.hive.HivePartitionManager.java

public HivePartitionResult getPartitions(ConnectorSession session, HiveMetastore metastore,
        ConnectorTableHandle tableHandle, TupleDomain<ColumnHandle> effectivePredicate) {
    HiveTableHandle hiveTableHandle = checkType(tableHandle, HiveTableHandle.class, "tableHandle");
    requireNonNull(effectivePredicate, "effectivePredicate is null");

    if (effectivePredicate.isNone()) {
        return new HivePartitionResult(ImmutableList.of(), TupleDomain.none(), TupleDomain.none());
    }/*from w w w  .  ja  v a 2  s.  com*/

    SchemaTableName tableName = hiveTableHandle.getSchemaTableName();
    Table table = getTable(metastore, tableName);
    Optional<HiveBucketing.HiveBucket> bucket = getHiveBucket(table,
            TupleDomain.extractFixedValues(effectivePredicate).get());

    TupleDomain<HiveColumnHandle> compactEffectivePredicate = toCompactTupleDomain(effectivePredicate,
            domainCompactionThreshold);

    if (table.getPartitionKeys().isEmpty()) {
        return new HivePartitionResult(
                ImmutableList.of(new HivePartition(tableName, compactEffectivePredicate, bucket)),
                effectivePredicate, TupleDomain.none());
    }

    List<HiveColumnHandle> partitionColumns = getPartitionKeyColumnHandles(connectorId, table);
    List<String> partitionNames = getFilteredPartitionNames(metastore, tableName, partitionColumns,
            effectivePredicate);

    // do a final pass to filter based on fields that could not be used to filter the partitions
    ImmutableList.Builder<HivePartition> partitions = ImmutableList.builder();
    for (String partitionName : partitionNames) {
        Optional<Map<ColumnHandle, NullableValue>> values = parseValuesAndFilterPartition(partitionName,
                partitionColumns, effectivePredicate);

        if (values.isPresent()) {
            partitions.add(new HivePartition(tableName, compactEffectivePredicate, partitionName, values.get(),
                    bucket));
        }
    }

    // All partition key domains will be fully evaluated, so we don't need to include those
    TupleDomain<ColumnHandle> remainingTupleDomain = TupleDomain.withColumnDomains(
            Maps.filterKeys(effectivePredicate.getDomains().get(), not(Predicates.in(partitionColumns))));
    TupleDomain<ColumnHandle> enforcedTupleDomain = TupleDomain.withColumnDomains(
            Maps.filterKeys(effectivePredicate.getDomains().get(), Predicates.in(partitionColumns)));
    return new HivePartitionResult(partitions.build(), remainingTupleDomain, enforcedTupleDomain);
}