Example usage for com.google.common.collect Maps difference

List of usage examples for com.google.common.collect Maps difference

Introduction

In this page you can find the example usage for com.google.common.collect Maps difference.

Prototype

public static <K, V> SortedMapDifference<K, V> difference(SortedMap<K, ? extends V> left,
        Map<? extends K, ? extends V> right) 

Source Link

Document

Computes the difference between two sorted maps, using the comparator of the left map, or Ordering.natural() if the left map uses the natural ordering of its elements.

Usage

From source file:org.apache.drill.exec.store.parquet.AbstractParquetScanBatchCreator.java

protected ScanBatch getBatch(ExecutorFragmentContext context, AbstractParquetRowGroupScan rowGroupScan,
        OperatorContext oContext) throws ExecutionSetupException {
    final ColumnExplorer columnExplorer = new ColumnExplorer(context.getOptions(), rowGroupScan.getColumns());

    if (!columnExplorer.isStarQuery()) {
        rowGroupScan = rowGroupScan.copy(columnExplorer.getTableColumns());
        rowGroupScan.setOperatorId(rowGroupScan.getOperatorId());
    }//from w ww. j a  v  a  2 s . c  om

    AbstractDrillFileSystemManager fsManager = getDrillFileSystemCreator(oContext, context.getOptions());

    // keep footers in a map to avoid re-reading them
    Map<String, ParquetMetadata> footers = new HashMap<>();
    List<RecordReader> readers = new LinkedList<>();
    List<Map<String, String>> implicitColumns = new ArrayList<>();
    Map<String, String> mapWithMaxColumns = new LinkedHashMap<>();
    for (RowGroupReadEntry rowGroup : rowGroupScan.getRowGroupReadEntries()) {
        /*
        Here we could store a map from file names to footers, to prevent re-reading the footer for each row group in a file
        TODO - to prevent reading the footer again in the parquet record reader (it is read earlier in the ParquetStorageEngine)
        we should add more information to the RowGroupInfo that will be populated upon the first read to
        provide the reader with all of th file meta-data it needs
        These fields will be added to the constructor below
        */
        try {
            Stopwatch timer = logger.isTraceEnabled() ? Stopwatch.createUnstarted() : null;
            DrillFileSystem fs = fsManager.get(rowGroupScan.getFsConf(rowGroup), rowGroup.getPath());
            if (!footers.containsKey(rowGroup.getPath())) {
                if (timer != null) {
                    timer.start();
                }

                ParquetMetadata footer = readFooter(fs.getConf(), rowGroup.getPath());
                if (timer != null) {
                    long timeToRead = timer.elapsed(TimeUnit.MICROSECONDS);
                    logger.trace("ParquetTrace,Read Footer,{},{},{},{},{},{},{}", "", rowGroup.getPath(), "", 0,
                            0, 0, timeToRead);
                }
                footers.put(rowGroup.getPath(), footer);
            }
            ParquetMetadata footer = footers.get(rowGroup.getPath());

            boolean autoCorrectCorruptDates = rowGroupScan.areCorruptDatesAutoCorrected();
            ParquetReaderUtility.DateCorruptionStatus containsCorruptDates = ParquetReaderUtility
                    .detectCorruptDates(footer, rowGroupScan.getColumns(), autoCorrectCorruptDates);
            logger.debug("Contains corrupt dates: {}", containsCorruptDates);

            if (!context.getOptions().getBoolean(ExecConstants.PARQUET_NEW_RECORD_READER)
                    && !isComplex(footer)) {
                readers.add(new ParquetRecordReader(context, rowGroup.getPath(), rowGroup.getRowGroupIndex(),
                        rowGroup.getNumRecordsToRead(), fs,
                        CodecFactory.createDirectCodecFactory(fs.getConf(),
                                new ParquetDirectByteBufferAllocator(oContext.getAllocator()), 0),
                        footer, rowGroupScan.getColumns(), containsCorruptDates));
            } else {
                readers.add(new DrillParquetReader(context, footer, rowGroup, columnExplorer.getTableColumns(),
                        fs, containsCorruptDates));
            }

            List<String> partitionValues = rowGroupScan.getPartitionValues(rowGroup);
            Map<String, String> implicitValues = columnExplorer.populateImplicitColumns(rowGroup.getPath(),
                    partitionValues, rowGroupScan.supportsFileImplicitColumns());
            implicitColumns.add(implicitValues);
            if (implicitValues.size() > mapWithMaxColumns.size()) {
                mapWithMaxColumns = implicitValues;
            }

        } catch (IOException e) {
            throw new ExecutionSetupException(e);
        }
    }

    // all readers should have the same number of implicit columns, add missing ones with value null
    Map<String, String> diff = Maps.transformValues(mapWithMaxColumns, Functions.constant((String) null));
    for (Map<String, String> map : implicitColumns) {
        map.putAll(Maps.difference(map, diff).entriesOnlyOnRight());
    }

    return new ScanBatch(context, oContext, readers, implicitColumns);
}

From source file:org.apache.drill.exec.store.hive.HiveDrillNativeScanBatchCreator.java

@Override
public ScanBatch getBatch(FragmentContext context, HiveDrillNativeParquetSubScan config,
        List<RecordBatch> children) throws ExecutionSetupException {
    final Table table = config.getTable();
    final List<InputSplit> splits = config.getInputSplits();
    final List<Partition> partitions = config.getPartitions();
    final List<SchemaPath> columns = config.getColumns();
    final String partitionDesignator = context.getOptions()
            .getOption(ExecConstants.FILESYSTEM_PARTITION_COLUMN_LABEL).string_val;
    List<Map<String, String>> implicitColumns = Lists.newLinkedList();
    boolean selectAllQuery = AbstractRecordReader.isStarQuery(columns);

    final boolean hasPartitions = (partitions != null && partitions.size() > 0);

    final List<String[]> partitionColumns = Lists.newArrayList();
    final List<Integer> selectedPartitionColumns = Lists.newArrayList();
    List<SchemaPath> newColumns = columns;
    if (!selectAllQuery) {
        // Separate out the partition and non-partition columns. Non-partition columns are passed directly to the
        // ParquetRecordReader. Partition columns are passed to ScanBatch.
        newColumns = Lists.newArrayList();
        Pattern pattern = Pattern.compile(String.format("%s[0-9]+", partitionDesignator));
        for (SchemaPath column : columns) {
            Matcher m = pattern.matcher(column.getAsUnescapedPath());
            if (m.matches()) {
                selectedPartitionColumns.add(
                        Integer.parseInt(column.getAsUnescapedPath().substring(partitionDesignator.length())));
            } else {
                newColumns.add(column);//w ww  .  java 2s  .co  m
            }
        }
    }

    final OperatorContext oContext = context.newOperatorContext(config);

    int currentPartitionIndex = 0;
    final List<RecordReader> readers = Lists.newArrayList();

    final HiveConf conf = config.getHiveConf();

    // TODO: In future we can get this cache from Metadata cached on filesystem.
    final Map<String, ParquetMetadata> footerCache = Maps.newHashMap();

    Map<String, String> mapWithMaxColumns = Maps.newLinkedHashMap();
    try {
        for (InputSplit split : splits) {
            final FileSplit fileSplit = (FileSplit) split;
            final Path finalPath = fileSplit.getPath();
            final JobConf cloneJob = new ProjectionPusher().pushProjectionsAndFilters(new JobConf(conf),
                    finalPath.getParent());
            final FileSystem fs = finalPath.getFileSystem(cloneJob);

            ParquetMetadata parquetMetadata = footerCache.get(finalPath.toString());
            if (parquetMetadata == null) {
                parquetMetadata = ParquetFileReader.readFooter(cloneJob, finalPath);
                footerCache.put(finalPath.toString(), parquetMetadata);
            }
            final List<Integer> rowGroupNums = getRowGroupNumbersFromFileSplit(fileSplit, parquetMetadata);

            for (int rowGroupNum : rowGroupNums) {
                readers.add(new ParquetRecordReader(context,
                        Path.getPathWithoutSchemeAndAuthority(finalPath).toString(), rowGroupNum, fs,
                        CodecFactory.createDirectCodecFactory(fs.getConf(),
                                new ParquetDirectByteBufferAllocator(oContext.getAllocator()), 0),
                        parquetMetadata, newColumns));
                Map<String, String> implicitValues = Maps.newLinkedHashMap();

                if (hasPartitions) {
                    List<String> values = partitions.get(currentPartitionIndex).getValues();
                    for (int i = 0; i < values.size(); i++) {
                        if (selectAllQuery || selectedPartitionColumns.contains(i)) {
                            implicitValues.put(partitionDesignator + i, values.get(i));
                        }
                    }
                }
                implicitColumns.add(implicitValues);
                if (implicitValues.size() > mapWithMaxColumns.size()) {
                    mapWithMaxColumns = implicitValues;
                }
            }
            currentPartitionIndex++;
        }
    } catch (final IOException | RuntimeException e) {
        AutoCloseables.close(e, readers);
        throw new ExecutionSetupException("Failed to create RecordReaders. " + e.getMessage(), e);
    }

    // all readers should have the same number of implicit columns, add missing ones with value null
    mapWithMaxColumns = Maps.transformValues(mapWithMaxColumns, Functions.constant((String) null));
    for (Map<String, String> map : implicitColumns) {
        map.putAll(Maps.difference(map, mapWithMaxColumns).entriesOnlyOnRight());
    }

    // If there are no readers created (which is possible when the table is empty or no row groups are matched),
    // create an empty RecordReader to output the schema
    if (readers.size() == 0) {
        readers.add(new HiveRecordReader(table, null, null, columns, context, conf,
                ImpersonationUtil.createProxyUgi(config.getUserName(), context.getQueryUserName())));
    }

    return new ScanBatch(config, context, oContext, readers.iterator(), implicitColumns);
}

From source file:org.cloudbees.literate.api.v1.ExecutionEnvironment.java

/**
 * Tests if this environment satisfies the requirements of the specified environment.
 *
 * @param environment the specified environment.
 * @return {@code true} if and only if all the labels required by the specified environment are provided by this
 *         environment.//  w w w  .  j  a va 2s  . co m
 */
public boolean isMatchFor(ExecutionEnvironment environment) {
    return getLabels().containsAll(environment.getLabels())
            && Maps.difference(getVariables(), environment.getVariables()).entriesOnlyOnRight().isEmpty();
}

From source file:com.github.sevntu.checkstyle.domain.BaseCheckTestSupport.java

protected void verify(Checker checker, File[] processedFiles, Map<String, List<String>> expectedViolations)
        throws Exception {
    stream.flush();//  ww  w  . j  a  va  2  s.co m
    final List<File> theFiles = Lists.newArrayList();
    Collections.addAll(theFiles, processedFiles);
    final int errs = checker.process(theFiles);

    // process each of the lines
    final Map<String, List<String>> actualViolations = getActualViolations(errs);
    final Map<String, List<String>> realExpectedViolations = Maps.filterValues(expectedViolations,
            new Predicate<List<String>>() {
                @Override
                public boolean apply(List<String> input) {
                    return !input.isEmpty();
                }
            });
    final MapDifference<String, List<String>> violationDifferences = Maps.difference(realExpectedViolations,
            actualViolations);

    final Map<String, List<String>> missingViolations = violationDifferences.entriesOnlyOnLeft();
    final Map<String, List<String>> unexpectedViolations = violationDifferences.entriesOnlyOnRight();
    final Map<String, ValueDifference<List<String>>> differingViolations = violationDifferences
            .entriesDiffering();

    final StringBuilder message = new StringBuilder();
    if (!missingViolations.isEmpty()) {
        message.append("missing violations: ").append(missingViolations);
    }
    if (!unexpectedViolations.isEmpty()) {
        if (message.length() > 0) {
            message.append('\n');
        }
        message.append("unexpected violations: ").append(unexpectedViolations);
    }
    if (!differingViolations.isEmpty()) {
        if (message.length() > 0) {
            message.append('\n');
        }
        message.append("differing violations: ").append(differingViolations);
    }

    assertTrue(message.toString(),
            missingViolations.isEmpty() && unexpectedViolations.isEmpty() && differingViolations.isEmpty());

    checker.destroy();
}

From source file:org.wso2.carbon.governance.comparator.wsdl.WSDLOperationComparator.java

private boolean isDifferent(Map<String, Fault> left, Map<String, Fault> right) {
    if (left != null && right != null && left.size() != right.size()) {
        return true;
    } else {/* ww  w  .j  a  va  2s.c  o  m*/
        MapDifference<String, Fault> mapDiff = Maps.difference(left, right);
        if (!mapDiff.areEqual()) {
            return true;
        } else {
            for (String name : mapDiff.entriesInCommon().keySet()) {
                if (isDifferent(left.get(name), right.get(name))) {
                    return true;
                }
            }
        }
    }
    return false;
}

From source file:org.apache.cassandra.db.DefsTables.java

private static Set<String> mergeKeyspaces(Map<DecoratedKey, ColumnFamily> before,
        Map<DecoratedKey, ColumnFamily> after) {
    List<Row> created = new ArrayList<>();
    List<String> altered = new ArrayList<>();
    Set<String> dropped = new HashSet<>();

    /*//  w w w  . ja va  2 s.  com
     * - we don't care about entriesOnlyOnLeft() or entriesInCommon(), because only the changes are of interest to us
     * - of all entriesOnlyOnRight(), we only care about ones that have live columns; it's possible to have a ColumnFamily
     *   there that only has the top-level deletion, if:
     *      a) a pushed DROP KEYSPACE change for a keyspace hadn't ever made it to this node in the first place
     *      b) a pulled dropped keyspace that got dropped before it could find a way to this node
     * - of entriesDiffering(), we don't care about the scenario where both pre and post-values have zero live columns:
     *   that means that a keyspace had been recreated and dropped, and the recreated keyspace had never found a way
     *   to this node
     */
    MapDifference<DecoratedKey, ColumnFamily> diff = Maps.difference(before, after);

    for (Map.Entry<DecoratedKey, ColumnFamily> entry : diff.entriesOnlyOnRight().entrySet())
        if (entry.getValue().getColumnCount() > 0)
            created.add(new Row(entry.getKey(), entry.getValue()));

    for (Map.Entry<DecoratedKey, MapDifference.ValueDifference<ColumnFamily>> entry : diff.entriesDiffering()
            .entrySet()) {
        String keyspaceName = AsciiType.instance.compose(entry.getKey().key);

        ColumnFamily pre = entry.getValue().leftValue();
        ColumnFamily post = entry.getValue().rightValue();

        if (pre.getColumnCount() > 0 && post.getColumnCount() > 0)
            altered.add(keyspaceName);
        else if (pre.getColumnCount() > 0)
            dropped.add(keyspaceName);
        else if (post.getColumnCount() > 0) // a (re)created keyspace
            created.add(new Row(entry.getKey(), post));
    }

    for (Row row : created)
        addKeyspace(KSMetaData.fromSchema(row, Collections.<CFMetaData>emptyList()));
    for (String name : altered)
        updateKeyspace(name);
    return dropped;
}

From source file:org.locationtech.geogig.storage.TransactionRefDatabase.java

/**
 * The names of the refs that either have changed from their original value or didn't exist at
 * the time this method is called/*from   w  w w .j  a  va  2 s . c  om*/
 */
public ImmutableSet<String> getChangedRefs() {
    Map<String, String> externalOriginals;
    Map<String, String> externalChanged;
    {
        Map<String, String> originals = refDb.getAll(this.txOrigNamespace);
        Map<String, String> changed = refDb.getAll(this.txNamespace);

        externalOriginals = toExternal(originals);
        externalChanged = toExternal(changed);
    }
    MapDifference<String, String> difference;
    difference = Maps.difference(externalOriginals, externalChanged);

    Map<String, String> changes = new HashMap<>();
    // include all new refs
    changes.putAll(difference.entriesOnlyOnRight());

    // include all changed refs, with the new values
    for (Map.Entry<String, ValueDifference<String>> e : difference.entriesDiffering().entrySet()) {
        String name = e.getKey();
        ValueDifference<String> valueDifference = e.getValue();
        String newValue = valueDifference.rightValue();
        changes.put(name, newValue);
    }
    return ImmutableSet.copyOf(changes.keySet());
}

From source file:org.esco.grouperui.web.tag.renderer.EscoHtmlTableRenderer.java

/**
 * Allow to output a log at the end of the process. It will compare the
 * requested parameters and the obtained parameters.
 * // w w w.j  a  v  a  2s .co m
 * @param theGroupDb
 *            the parameter source.
 */
private void verifyAndLogParameter(final ParameterGroup theGroupDb) {
    // The obtained parameters
    Map<String, Parameter> reqParameter = (Map<String, Parameter>) FacesContext.getCurrentInstance()
            .getExternalContext().getRequestMap().get(EscoHtmlTableRenderer.PARAMETER);

    // The requested parameters.
    Map<String, Parameter> groupParam = new HashMap<String, Parameter>();
    for (Parameter param : theGroupDb.getParameters()) {
        groupParam.put(param.getKey(), param);
    }

    if (reqParameter != null) {
        // The difference between the two map.
        MapDifference<String, Parameter> mapDiffs = Maps.difference(reqParameter, groupParam);

        this.logDifferences(mapDiffs.entriesOnlyOnLeft(), mapDiffs.entriesOnlyOnRight());
    }
}

From source file:com.streamsets.pipeline.lib.jdbc.multithread.CDCJdbcRunnable.java

private boolean getDiff(String captureInstanceName, Map<String, Integer> sourceTableColumnInfo,
        Map<String, Integer> cdcTableColumnInfo) {
    MapDifference<String, Integer> diff = Maps.difference(sourceTableColumnInfo, cdcTableColumnInfo);

    if (!diff.areEqual()) {
        if (LOG.isTraceEnabled()) {
            LOG.trace("Detected drift for table {} - new columns: {}, drop columns: {}", captureInstanceName,
                    StringUtils.join(diff.entriesOnlyOnLeft().keySet(), ","),
                    StringUtils.join(diff.entriesOnlyOnRight().keySet(), ","));
        }/* www .ja v a2  s. c  o m*/
        return true;
    }

    return false;
}

From source file:org.locationtech.geogig.storage.impl.TransactionRefDatabase.java

/**
 * The names of the refs that either have changed from their original value or didn't exist at
 * the time this method is called/*from   ww  w.  j a v  a 2 s  .  co  m*/
 */
public ImmutableSet<String> getChangedRefs() {
    Map<String, String> externalOriginals;
    Map<String, String> externalChanged;
    {
        Map<String, String> originals = refDb.getAll(this.txOrigNamespace);
        Map<String, String> changed = refDb.getAll(this.txChangedNamespace);

        externalOriginals = toExternal(originals);
        externalChanged = toExternal(changed);
    }
    MapDifference<String, String> difference;
    difference = Maps.difference(externalOriginals, externalChanged);

    Map<String, String> changes = new HashMap<>();
    // include all new refs
    changes.putAll(difference.entriesOnlyOnRight());

    // include all changed refs, with the new values
    for (Map.Entry<String, ValueDifference<String>> e : difference.entriesDiffering().entrySet()) {
        String name = e.getKey();
        ValueDifference<String> valueDifference = e.getValue();
        String newValue = valueDifference.rightValue();
        changes.put(name, newValue);
    }
    return ImmutableSet.copyOf(changes.keySet());
}