Example usage for com.google.common.collect Maps transformValues

List of usage examples for com.google.common.collect Maps transformValues

Introduction

In this page you can find the example usage for com.google.common.collect Maps transformValues.

Prototype

@GwtIncompatible("NavigableMap")
public static <K, V1, V2> NavigableMap<K, V2> transformValues(NavigableMap<K, V1> fromMap,
        Function<? super V1, V2> function) 

Source Link

Document

Returns a view of a navigable map where each value is transformed by a function.

Usage

From source file:org.janusgraph.graphdb.olap.computer.FulgoraVertexMemory.java

public Map<Long, EntryList> retrievePartitionAggregates() {
    for (PartitionVertexAggregate agg : partitionVertices.values())
        agg.completeIteration();//  ww  w  .  ja  v  a 2s .  c  o m
    return Maps.transformValues(partitionVertices, s -> s.getLoadedProperties());
}

From source file:org.sosy_lab.cpachecker.util.cwriter.CExpressionInvariantExporter.java

/**
 * @return Mapping from line numbers to states associated with the given line.
 */// w  ww .ja va2  s  . co  m
private Map<Integer, BooleanFormula> getInvariantsForFile(ReachedSet pReachedSet, String filename) {

    // One formula per reported state.
    Multimap<Integer, BooleanFormula> byState = HashMultimap.create();

    for (AbstractState state : pReachedSet) {

        CFANode loc = AbstractStates.extractLocation(state);
        if (loc != null && loc.getNumEnteringEdges() > 0) {
            CFAEdge edge = loc.getEnteringEdge(0);
            FileLocation location = edge.getFileLocation();
            FluentIterable<FormulaReportingState> reporting = AbstractStates.asIterable(state)
                    .filter(FormulaReportingState.class);

            if (location.getFileName().equals(filename) && !reporting.isEmpty()) {
                BooleanFormula reported = bfmgr
                        .and(reporting.transform(s -> s.getFormulaApproximation(fmgr)).toList());
                byState.put(location.getStartingLineInOrigin(), reported);
            }
        }
    }
    return Maps.transformValues(byState.asMap(), invariants -> bfmgr.or(invariants));
}

From source file:com.isotrol.impe3.core.modules.ModuleDefinition.java

/**
 * If is a component module, return component provision. Else, throws an illegalstate exception.
 * @return map of component provision.//from   w  w w  . ja v  a  2s . c  o  m
 */
public final Map<String, ComponentProvision> getComponentProvisions() {
    Preconditions.checkState(getModuleType() == ModuleType.COMPONENT);
    return Maps.transformValues(getProvisions(), TO_COMPONENT_PROVISION);
}

From source file:org.apache.drill.exec.store.parquet.AbstractParquetScanBatchCreator.java

protected ScanBatch getBatch(ExecutorFragmentContext context, AbstractParquetRowGroupScan rowGroupScan,
        OperatorContext oContext) throws ExecutionSetupException {
    final ColumnExplorer columnExplorer = new ColumnExplorer(context.getOptions(), rowGroupScan.getColumns());

    if (!columnExplorer.isStarQuery()) {
        rowGroupScan = rowGroupScan.copy(columnExplorer.getTableColumns());
        rowGroupScan.setOperatorId(rowGroupScan.getOperatorId());
    }//from ww w .j  a v  a2  s  .c  o  m

    AbstractDrillFileSystemManager fsManager = getDrillFileSystemCreator(oContext, context.getOptions());

    // keep footers in a map to avoid re-reading them
    Map<String, ParquetMetadata> footers = new HashMap<>();
    List<RecordReader> readers = new LinkedList<>();
    List<Map<String, String>> implicitColumns = new ArrayList<>();
    Map<String, String> mapWithMaxColumns = new LinkedHashMap<>();
    for (RowGroupReadEntry rowGroup : rowGroupScan.getRowGroupReadEntries()) {
        /*
        Here we could store a map from file names to footers, to prevent re-reading the footer for each row group in a file
        TODO - to prevent reading the footer again in the parquet record reader (it is read earlier in the ParquetStorageEngine)
        we should add more information to the RowGroupInfo that will be populated upon the first read to
        provide the reader with all of th file meta-data it needs
        These fields will be added to the constructor below
        */
        try {
            Stopwatch timer = logger.isTraceEnabled() ? Stopwatch.createUnstarted() : null;
            DrillFileSystem fs = fsManager.get(rowGroupScan.getFsConf(rowGroup), rowGroup.getPath());
            if (!footers.containsKey(rowGroup.getPath())) {
                if (timer != null) {
                    timer.start();
                }

                ParquetMetadata footer = readFooter(fs.getConf(), rowGroup.getPath());
                if (timer != null) {
                    long timeToRead = timer.elapsed(TimeUnit.MICROSECONDS);
                    logger.trace("ParquetTrace,Read Footer,{},{},{},{},{},{},{}", "", rowGroup.getPath(), "", 0,
                            0, 0, timeToRead);
                }
                footers.put(rowGroup.getPath(), footer);
            }
            ParquetMetadata footer = footers.get(rowGroup.getPath());

            boolean autoCorrectCorruptDates = rowGroupScan.areCorruptDatesAutoCorrected();
            ParquetReaderUtility.DateCorruptionStatus containsCorruptDates = ParquetReaderUtility
                    .detectCorruptDates(footer, rowGroupScan.getColumns(), autoCorrectCorruptDates);
            logger.debug("Contains corrupt dates: {}", containsCorruptDates);

            if (!context.getOptions().getBoolean(ExecConstants.PARQUET_NEW_RECORD_READER)
                    && !isComplex(footer)) {
                readers.add(new ParquetRecordReader(context, rowGroup.getPath(), rowGroup.getRowGroupIndex(),
                        rowGroup.getNumRecordsToRead(), fs,
                        CodecFactory.createDirectCodecFactory(fs.getConf(),
                                new ParquetDirectByteBufferAllocator(oContext.getAllocator()), 0),
                        footer, rowGroupScan.getColumns(), containsCorruptDates));
            } else {
                readers.add(new DrillParquetReader(context, footer, rowGroup, columnExplorer.getTableColumns(),
                        fs, containsCorruptDates));
            }

            List<String> partitionValues = rowGroupScan.getPartitionValues(rowGroup);
            Map<String, String> implicitValues = columnExplorer.populateImplicitColumns(rowGroup.getPath(),
                    partitionValues, rowGroupScan.supportsFileImplicitColumns());
            implicitColumns.add(implicitValues);
            if (implicitValues.size() > mapWithMaxColumns.size()) {
                mapWithMaxColumns = implicitValues;
            }

        } catch (IOException e) {
            throw new ExecutionSetupException(e);
        }
    }

    // all readers should have the same number of implicit columns, add missing ones with value null
    Map<String, String> diff = Maps.transformValues(mapWithMaxColumns, Functions.constant((String) null));
    for (Map<String, String> map : implicitColumns) {
        map.putAll(Maps.difference(map, diff).entriesOnlyOnRight());
    }

    return new ScanBatch(context, oContext, readers, implicitColumns);
}

From source file:no.ssb.jsonstat.v2.deser.DimensionDeserializer.java

private ImmutableMap<String, String> parseIndexAsMap(JsonParser p) throws IOException {
    ImmutableMap<String, String> index;

    Map<String, Integer> mapIndex = p.readValueAs(INDEX_MAP);

    // Even though the type is String, the sorting actually uses the
    // integer value thanks to the forMap function.
    Ordering<String> byValue = Ordering.natural().onResultOf(Functions.forMap(mapIndex));

    index = ImmutableSortedMap.copyOf(Maps.transformValues(mapIndex, Object::toString), byValue);
    return index;
}

From source file:org.diqube.execution.steps.GroupStep.java

@Override
protected void execute() {
    if (columnBuiltConsumer.getNumberOfTimesWired() > 0 && !allColumnsBuilt.get())
        // we wait until our columns are all built.
        return;// w ww . ja va 2 s.  c o m

    if (headGrouper == null)
        // create groupers. Do this just now, as we know that now really all columns are available!
        headGrouper = createGroupers(colNamesToGroupBy, 0).get();

    List<Long> activeRowIds = new ArrayList<>();
    Long newRowId;
    while ((newRowId = rowIds.poll()) != null)
        activeRowIds.add(newRowId);

    if (activeRowIds.size() > 0) {
        // use headGrouper to group the new RowIDs, collect the new groupings in a new map.
        Map<Long, List<Long>> changesGroups = new HashMap<>();
        headGrouper.groupRowIds(activeRowIds, changesGroups);

        logger.trace("Grouped new rowIds (limit each): {}",
                Maps.transformValues(changesGroups, lst -> Iterables.limit(lst, 50)));

        Set<Long> newGroupIds = Sets.difference(changesGroups.keySet(), groups.keySet());

        if (!newGroupIds.isEmpty()) {
            // If we started new groups, we need to resolve the values of the group-by fields (if they are selected, e.g.).
            // As each groupID is in fact a rowID (of one arbitrary row that is inside the group), we find those new row IDs
            // and send them to RowID consumers.
            Long[] newRowIdsArray = newGroupIds.stream().toArray(l -> new Long[l]);
            logger.trace("New group IDs (limit): {}", Iterables.limit(Arrays.asList(newRowIdsArray), 100));

            forEachOutputConsumerOfType(RowIdConsumer.class, c -> c.consume(newRowIdsArray));
        }

        for (Long groupId : changesGroups.keySet()) {
            if (!groups.containsKey(groupId))
                groups.put(groupId, new ArrayList<>(changesGroups.get(groupId)));
            else
                groups.get(groupId).addAll(changesGroups.get(groupId));
        }

        forEachOutputConsumerOfType(GroupDeltaConsumer.class, c -> c.consumeGroupDeltas(changesGroups));
        forEachOutputConsumerOfType(GroupConsumer.class, c -> c.consumeGroups(groups));
    }
    if (sourceIsEmpty.get() && rowIds.isEmpty()) {
        forEachOutputConsumerOfType(GenericConsumer.class, c -> c.sourceIsDone());
        doneProcessing();
    }
}

From source file:org.apache.drill.exec.store.hive.HiveDrillNativeScanBatchCreator.java

@Override
public ScanBatch getBatch(FragmentContext context, HiveDrillNativeParquetSubScan config,
        List<RecordBatch> children) throws ExecutionSetupException {
    final Table table = config.getTable();
    final List<InputSplit> splits = config.getInputSplits();
    final List<Partition> partitions = config.getPartitions();
    final List<SchemaPath> columns = config.getColumns();
    final String partitionDesignator = context.getOptions()
            .getOption(ExecConstants.FILESYSTEM_PARTITION_COLUMN_LABEL).string_val;
    List<Map<String, String>> implicitColumns = Lists.newLinkedList();
    boolean selectAllQuery = AbstractRecordReader.isStarQuery(columns);

    final boolean hasPartitions = (partitions != null && partitions.size() > 0);

    final List<String[]> partitionColumns = Lists.newArrayList();
    final List<Integer> selectedPartitionColumns = Lists.newArrayList();
    List<SchemaPath> newColumns = columns;
    if (!selectAllQuery) {
        // Separate out the partition and non-partition columns. Non-partition columns are passed directly to the
        // ParquetRecordReader. Partition columns are passed to ScanBatch.
        newColumns = Lists.newArrayList();
        Pattern pattern = Pattern.compile(String.format("%s[0-9]+", partitionDesignator));
        for (SchemaPath column : columns) {
            Matcher m = pattern.matcher(column.getAsUnescapedPath());
            if (m.matches()) {
                selectedPartitionColumns.add(
                        Integer.parseInt(column.getAsUnescapedPath().substring(partitionDesignator.length())));
            } else {
                newColumns.add(column);/*from  w w w  . j  a  v  a  2  s . c om*/
            }
        }
    }

    final OperatorContext oContext = context.newOperatorContext(config);

    int currentPartitionIndex = 0;
    final List<RecordReader> readers = Lists.newArrayList();

    final HiveConf conf = config.getHiveConf();

    // TODO: In future we can get this cache from Metadata cached on filesystem.
    final Map<String, ParquetMetadata> footerCache = Maps.newHashMap();

    Map<String, String> mapWithMaxColumns = Maps.newLinkedHashMap();
    try {
        for (InputSplit split : splits) {
            final FileSplit fileSplit = (FileSplit) split;
            final Path finalPath = fileSplit.getPath();
            final JobConf cloneJob = new ProjectionPusher().pushProjectionsAndFilters(new JobConf(conf),
                    finalPath.getParent());
            final FileSystem fs = finalPath.getFileSystem(cloneJob);

            ParquetMetadata parquetMetadata = footerCache.get(finalPath.toString());
            if (parquetMetadata == null) {
                parquetMetadata = ParquetFileReader.readFooter(cloneJob, finalPath);
                footerCache.put(finalPath.toString(), parquetMetadata);
            }
            final List<Integer> rowGroupNums = getRowGroupNumbersFromFileSplit(fileSplit, parquetMetadata);

            for (int rowGroupNum : rowGroupNums) {
                readers.add(new ParquetRecordReader(context,
                        Path.getPathWithoutSchemeAndAuthority(finalPath).toString(), rowGroupNum, fs,
                        CodecFactory.createDirectCodecFactory(fs.getConf(),
                                new ParquetDirectByteBufferAllocator(oContext.getAllocator()), 0),
                        parquetMetadata, newColumns));
                Map<String, String> implicitValues = Maps.newLinkedHashMap();

                if (hasPartitions) {
                    List<String> values = partitions.get(currentPartitionIndex).getValues();
                    for (int i = 0; i < values.size(); i++) {
                        if (selectAllQuery || selectedPartitionColumns.contains(i)) {
                            implicitValues.put(partitionDesignator + i, values.get(i));
                        }
                    }
                }
                implicitColumns.add(implicitValues);
                if (implicitValues.size() > mapWithMaxColumns.size()) {
                    mapWithMaxColumns = implicitValues;
                }
            }
            currentPartitionIndex++;
        }
    } catch (final IOException | RuntimeException e) {
        AutoCloseables.close(e, readers);
        throw new ExecutionSetupException("Failed to create RecordReaders. " + e.getMessage(), e);
    }

    // all readers should have the same number of implicit columns, add missing ones with value null
    mapWithMaxColumns = Maps.transformValues(mapWithMaxColumns, Functions.constant((String) null));
    for (Map<String, String> map : implicitColumns) {
        map.putAll(Maps.difference(map, mapWithMaxColumns).entriesOnlyOnRight());
    }

    // If there are no readers created (which is possible when the table is empty or no row groups are matched),
    // create an empty RecordReader to output the schema
    if (readers.size() == 0) {
        readers.add(new HiveRecordReader(table, null, null, columns, context, conf,
                ImpersonationUtil.createProxyUgi(config.getUserName(), context.getQueryUserName())));
    }

    return new ScanBatch(config, context, oContext, readers.iterator(), implicitColumns);
}

From source file:com.romeikat.datamessie.core.base.ui.panel.AbstractStatisticsPanel.java

private Map<Long, String> getSourcesIdsNames() {
    final Function<SourceOverviewDto, String> sourceToNameFunction = new Function<SourceOverviewDto, String>() {
        @Override/*  w  ww  .j av  a 2 s. c o m*/
        public String apply(final SourceOverviewDto source) {
            return source.getName();
        }
    };
    final Map<Long, String> sourcesIdsNames = Maps.transformValues(sourcesModel.getObject(),
            sourceToNameFunction);
    return sourcesIdsNames;
}

From source file:com.isotrol.impe3.pms.core.support.Mappers.java

/**
 * Copy a default and localized names from the model to a dto.
 * @param value Value/*from  ww w .j  a  v  a  2s .co  m*/
 * @param dto DTO.
 * @throws NullPointerException if any of the names is null.
 * @throws IllegalArgumentException if any of the locales is invalid.
 */
public static void dto2localizedName(WithLocalizedNameDTO dto, WithLocalizedName value) {
    // Validation
    final Map<String, NameDTO> dtoMap = dto.getLocalizedNames();
    Preconditions.checkArgument(Iterables.all(dtoMap.keySet(), MoreLocales.VALID));
    // Copy
    value.setName(DTO2NAME.apply(dto.getDefaultName()));
    Map<String, NameValue> map = value.getLocalizedNames();
    map.clear();
    map.putAll(Maps.transformValues(dtoMap, DTO2NAME));
}

From source file:ninja.leaping.permissionsex.backend.memory.MemorySubjectData.java

@Override
public Map<Set<Entry<String, String>>, Map<String, String>> getAllOptions() {
    return Maps.filterValues(
            Maps.transformValues(contexts, dataEntry -> dataEntry == null ? null : dataEntry.options),
            el -> el != null);//from   w ww.  j av  a  2  s.c o  m
}