Example usage for com.google.common.collect ImmutableMap.Builder putAll

List of usage examples for com.google.common.collect ImmutableMap.Builder putAll

Introduction

In this page you can find the example usage for com.google.common.collect ImmutableMap.Builder putAll.

Prototype

public final void putAll(Map<? extends K, ? extends V> map) 

Source Link

Usage

From source file:org.elasticsearch.snapshots.SnapshotsService.java

/**
 * Checks if any new shards should be snapshotted on this node
 *
 * @param snapshotMetaData snapshot metadata to be processed
 *///w  ww .  j a  v  a  2 s. c o m
private void processIndexShardSnapshots(SnapshotMetaData snapshotMetaData) {
    Map<SnapshotId, SnapshotShards> survivors = newHashMap();
    // First, remove snapshots that are no longer there
    for (Map.Entry<SnapshotId, SnapshotShards> entry : shardSnapshots.entrySet()) {
        if (snapshotMetaData != null && snapshotMetaData.snapshot(entry.getKey()) != null) {
            survivors.put(entry.getKey(), entry.getValue());
        }
    }

    // For now we will be mostly dealing with a single snapshot at a time but might have multiple simultaneously running
    // snapshots in the future
    Map<SnapshotId, Map<ShardId, IndexShardSnapshotStatus>> newSnapshots = newHashMap();
    // Now go through all snapshots and update existing or create missing
    final String localNodeId = clusterService.localNode().id();
    for (SnapshotMetaData.Entry entry : snapshotMetaData.entries()) {
        if (entry.state() == State.STARTED) {
            Map<ShardId, IndexShardSnapshotStatus> startedShards = newHashMap();
            SnapshotShards snapshotShards = shardSnapshots.get(entry.snapshotId());
            for (Map.Entry<ShardId, SnapshotMetaData.ShardSnapshotStatus> shard : entry.shards().entrySet()) {
                // Add all new shards to start processing on
                if (localNodeId.equals(shard.getValue().nodeId())) {
                    if (shard.getValue().state() == State.INIT
                            && (snapshotShards == null || !snapshotShards.shards.containsKey(shard.getKey()))) {
                        logger.trace("[{}] - Adding shard to the queue", shard.getKey());
                        startedShards.put(shard.getKey(), new IndexShardSnapshotStatus());
                    }
                }
            }
            if (!startedShards.isEmpty()) {
                newSnapshots.put(entry.snapshotId(), startedShards);
                if (snapshotShards != null) {
                    // We already saw this snapshot but we need to add more started shards
                    ImmutableMap.Builder<ShardId, IndexShardSnapshotStatus> shards = ImmutableMap.builder();
                    // Put all shards that were already running on this node
                    shards.putAll(snapshotShards.shards);
                    // Put all newly started shards
                    shards.putAll(startedShards);
                    survivors.put(entry.snapshotId(), new SnapshotShards(shards.build()));
                } else {
                    // Brand new snapshot that we haven't seen before
                    survivors.put(entry.snapshotId(), new SnapshotShards(ImmutableMap.copyOf(startedShards)));
                }
            }
        } else if (entry.state() == State.ABORTED) {
            // Abort all running shards for this snapshot
            SnapshotShards snapshotShards = shardSnapshots.get(entry.snapshotId());
            if (snapshotShards != null) {
                for (Map.Entry<ShardId, SnapshotMetaData.ShardSnapshotStatus> shard : entry.shards()
                        .entrySet()) {
                    IndexShardSnapshotStatus snapshotStatus = snapshotShards.shards.get(shard.getKey());
                    if (snapshotStatus != null) {
                        snapshotStatus.abort();
                    }
                }
            }
        }
    }

    // Update the list of snapshots that we saw and tried to started
    // If startup of these shards fails later, we don't want to try starting these shards again
    shutdownLock.lock();
    try {
        shardSnapshots = ImmutableMap.copyOf(survivors);
        if (shardSnapshots.isEmpty()) {
            // Notify all waiting threads that no more snapshots
            shutdownCondition.signalAll();
        }
    } finally {
        shutdownLock.unlock();
    }

    // We have new shards to starts
    if (!newSnapshots.isEmpty()) {
        for (final Map.Entry<SnapshotId, Map<ShardId, IndexShardSnapshotStatus>> entry : newSnapshots
                .entrySet()) {
            for (final Map.Entry<ShardId, IndexShardSnapshotStatus> shardEntry : entry.getValue().entrySet()) {
                try {
                    final IndexShardSnapshotAndRestoreService shardSnapshotService = indicesService
                            .indexServiceSafe(shardEntry.getKey().getIndex())
                            .shardInjectorSafe(shardEntry.getKey().id())
                            .getInstance(IndexShardSnapshotAndRestoreService.class);
                    threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(new Runnable() {
                        @Override
                        public void run() {
                            try {
                                shardSnapshotService.snapshot(entry.getKey(), shardEntry.getValue());
                                updateIndexShardSnapshotStatus(new UpdateIndexShardSnapshotStatusRequest(
                                        entry.getKey(), shardEntry.getKey(),
                                        new ShardSnapshotStatus(localNodeId, SnapshotMetaData.State.SUCCESS)));
                            } catch (Throwable t) {
                                logger.warn("[{}] [{}] failed to create snapshot", t, shardEntry.getKey(),
                                        entry.getKey());
                                updateIndexShardSnapshotStatus(new UpdateIndexShardSnapshotStatusRequest(
                                        entry.getKey(), shardEntry.getKey(),
                                        new ShardSnapshotStatus(localNodeId, SnapshotMetaData.State.FAILED,
                                                ExceptionsHelper.detailedMessage(t))));
                            }
                        }
                    });
                } catch (Throwable t) {
                    updateIndexShardSnapshotStatus(new UpdateIndexShardSnapshotStatusRequest(entry.getKey(),
                            shardEntry.getKey(), new ShardSnapshotStatus(localNodeId,
                                    SnapshotMetaData.State.FAILED, ExceptionsHelper.detailedMessage(t))));
                }
            }
        }
    }
}

From source file:org.grycap.gpf4med.DocumentFetcher.java

public void fecth(final ImmutableList<URL> urls) {
    checkArgument(urls != null, "Uninitialized URLs");
    final ImmutableMap.Builder<URI, File> pendingBuilder = new ImmutableMap.Builder<URI, File>();
    try {/*from  w w w  .j  a  v a  2 s .c o m*/
        final File cacheDir = new File(ConfigurationManager.INSTANCE.getLocalCacheDir(), "reports");
        final ImmutableMap.Builder<URI, File> requestBuilder = new ImmutableMap.Builder<URI, File>();
        for (final URL url : urls) {
            try {
                if (URLUtils.isRemoteProtocol(url)) {
                    final URI source = url.toURI().normalize();
                    final File destination = new File(cacheDir,
                            NamingUtils.genSafeFilename(new String[] { source.toString() }, null, ".xml"));
                    requestBuilder.put(source, destination);
                } else if (URLUtils.isFileProtocol(url)) {
                    FileQueue.INSTANCE.add(FileUtils.toFile(url));
                } else {
                    FileQueue.INSTANCE.failed(1);
                    LOGGER.warn("Ignoring unsupported URL: " + url.toString());
                }
            } catch (Exception e2) {
                FileQueue.INSTANCE.failed();
            }
        }
        final DownloadConfiguration downloadConfig = new DownloadConfiguration(CONNECTION_TIMEOUT_MILLIS,
                READ_TIMEOUT_MILLIS, RETRIES, TIMEOUT_INCREMENT_PERCENTAGE);
        final ImmutableMap<URI, File> pending = new DownloadService().download(requestBuilder.build(), null,
                downloadConfig, ConfigurationManager.INSTANCE.getFileEncryptionProvider(),
                new PostProcessTask<File>() {
                    @Override
                    public void apply(final File object) {
                        FileQueue.INSTANCE.add(object);
                    }
                });
        if (pending != null) {
            pendingBuilder.putAll(pending);
        }
    } catch (Exception e) {
        final ImmutableMap<URI, File> pending = pendingBuilder.build();
        if (pending != null && pending.size() > 0) {
            FileQueue.INSTANCE.failed(pending.size());
        }
    }
}

From source file:com.google.devtools.build.lib.rules.android.AndroidSkylarkData.java

@SkylarkCallable(name = "process_library_data", mandatoryPositionals = 2, // ctx and libraryClassJar are required
        parameters = {/*from   w w  w  .ja v a 2s.  c o m*/
                @Param(name = "manifest", positional = false, type = Artifact.class, defaultValue = "None", named = true, noneable = true, doc = "If passed, the manifest to use for this target. Otherwise, a dummy manifest will"
                        + " be generated."),
                @Param(name = "resources", positional = false, defaultValue = "None", type = SkylarkList.class, generic1 = FileProvider.class, named = true, noneable = true, doc = "Providers of this target's resources"),
                @Param(name = "assets", positional = false, defaultValue = "None", type = SkylarkList.class, generic1 = ConfiguredTarget.class, noneable = true, named = true, doc = "Targets containing raw assets for this target. If passed, 'assets_dir' must also"
                        + " be passed."),
                @Param(name = "assets_dir", positional = false, defaultValue = "None", type = String.class, noneable = true, named = true, doc = "Directory the assets are contained in. Must be passed if and only if 'assets' is"
                        + " passed. This path will be split off of the asset paths on the device."),
                @Param(name = "exports_manifest", positional = false, defaultValue = "None", type = Boolean.class, named = true, noneable = true, doc = "Defaults to False. If passed as True, this manifest will be exported to and"
                        + " eventually merged into targets that depend on it. Otherwise, it won't be"
                        + " inherited."),
                @Param(name = "custom_package", positional = false, defaultValue = "None", type = String.class, noneable = true, named = true, doc = "The Android application package to stamp the manifest with. If not provided, the"
                        + " current Java package, derived from the location of this target's BUILD"
                        + " file, will be used. For example, given a BUILD file in"
                        + " 'java/com/foo/bar/BUILD', the package would be 'com.foo.bar'."),
                @Param(name = "neverlink", positional = false, defaultValue = "False", type = Boolean.class, named = true, doc = "Defaults to False. If passed as True, these resources and assets will not be"
                        + " inherited by targets that depend on this one."),
                @Param(name = "enable_data_binding", positional = false, defaultValue = "False", type = Boolean.class, named = true, doc = "Defaults to False. If True, processes data binding expressions in layout"
                        + " resources."),
                @Param(name = "proguard_specs", type = SkylarkList.class, generic1 = ConfiguredTarget.class, defaultValue = "[]", positional = false, named = true, doc = "Files to be used as Proguard specification for this target, which will be"
                        + " inherited in the top-level target"),
                @Param(name = "deps", positional = false, defaultValue = "[]", type = SkylarkList.class, generic1 = AndroidAssetsInfo.class, named = true, doc = "Dependency targets. Providers will be extracted from these dependencies for each"
                        + " type of data."), }, useLocation = true, useEnvironment = true, doc = "Performs full processing of data for android_library or similar rules. Returns a dict"
                                + " from provider type to providers for the target.")
public SkylarkDict<Provider, NativeInfo> processLibraryData(AndroidDataContext ctx, Artifact libraryClassJar,
        Object manifest, Object resources, Object assets, Object assetsDir, Object exportsManifest,
        Object customPackage, boolean neverlink, boolean enableDataBinding,
        SkylarkList<ConfiguredTarget> proguardSpecs, SkylarkList<ConfiguredTarget> deps, Location location,
        Environment env) throws InterruptedException, EvalException {

    SkylarkList<AndroidResourcesInfo> resourceDeps = getProviders(deps, AndroidResourcesInfo.PROVIDER);
    SkylarkList<AndroidAssetsInfo> assetDeps = getProviders(deps, AndroidAssetsInfo.PROVIDER);

    ImmutableMap.Builder<Provider, NativeInfo> infoBuilder = ImmutableMap.builder();

    AndroidResourcesInfo resourcesInfo;
    AndroidAssetsInfo assetsInfo;
    if (isNone(manifest) && isNone(resources) && isNone(assets) && isNone(assetsDir)
            && isNone(exportsManifest)) {

        // If none of these parameters were specified, for backwards compatibility, do not trigger
        // data processing.
        resourcesInfo = resourcesFromDeps(ctx, resourceDeps, neverlink, customPackage, location, env);
        assetsInfo = assetsFromDeps(assetDeps, neverlink, env);

        infoBuilder.put(AndroidResourcesInfo.PROVIDER, resourcesInfo);
    } else {

        AndroidManifestInfo baseManifest = stampAndroidManifest(ctx, manifest, customPackage,
                fromNoneableOrDefault(exportsManifest, Boolean.class, false), location, env);

        SkylarkDict<Provider, NativeInfo> resourceOutput = mergeResources(ctx, baseManifest,
                listFromNoneableOrEmpty(resources, ConfiguredTarget.class), resourceDeps, neverlink,
                enableDataBinding, location, env);

        resourcesInfo = (AndroidResourcesInfo) resourceOutput.get(AndroidResourcesInfo.PROVIDER);
        assetsInfo = mergeAssets(ctx, assets, assetsDir, assetDeps, neverlink, location, env);

        infoBuilder.putAll(resourceOutput);
    }

    AndroidLibraryAarInfo aarInfo = makeAar(ctx, resourcesInfo, assetsInfo, libraryClassJar, proguardSpecs,
            getProviders(deps, AndroidLibraryAarInfo.PROVIDER), neverlink);

    // Only expose the aar provider in non-neverlinked actions
    if (!neverlink) {
        infoBuilder.put(AndroidLibraryAarInfo.PROVIDER, aarInfo);
    }

    // Expose the updated manifest that was changed by resource processing
    // TODO(b/30817309): Use the base manifest once manifests are no longer changed in resource
    // processing
    AndroidManifestInfo manifestInfo = resourcesInfo.getManifest().toProvider();

    return SkylarkDict.copyOf(/* env = */ null, infoBuilder.put(AndroidAssetsInfo.PROVIDER, assetsInfo)
            .put(AndroidManifestInfo.PROVIDER, manifestInfo).build());
}

From source file:org.sosy_lab.java_smt.utils.UfElimination.java

/**
 * Applies the Ackermann transformation to the given {@link Formula} with respect to the {@link
 * Result} of another formula. Quantified formulas are not supported.
 *
 * @param pF the {@link Formula} to remove all Ufs from
 * @param pOtherResult result of eliminating Ufs in another {@link BooleanFormula}
 * @return the {@link Result} of the Ackermannization
 *//* w ww.  j a  va2 s . com*/
public Result eliminateUfs(BooleanFormula pF, Result pOtherResult) {
    checkArgument(!isQuantified(pF));
    BooleanFormula f;
    if (!pOtherResult.getSubstitution().isEmpty()) {
        f = fmgr.substitute(pF, pOtherResult.getSubstitution());
    } else {
        f = pF;
    }

    int depth = getNestingDepthOfUfs(f);
    Multimap<FunctionDeclaration<?>, UninterpretedFunctionApplication> ufs = findUFs(f);
    ufs = merge(ufs, pOtherResult);

    ImmutableMap.Builder<Formula, Formula> substitutionsBuilder = ImmutableMap.builder();
    List<BooleanFormula> extraConstraints = new ArrayList<>();

    for (FunctionDeclaration<?> function : ufs.keySet()) {
        List<UninterpretedFunctionApplication> applications = new ArrayList<>(ufs.get(function));
        for (int idx1 = 0; idx1 < applications.size(); idx1++) {
            UninterpretedFunctionApplication application = applications.get(idx1);

            Formula uf = application.getFormula();
            List<Formula> args = application.getArguments();

            Formula substitution = application.getSubstitution();
            substitutionsBuilder.put(uf, substitution);

            for (int idx2 = idx1 + 1; idx2 < applications.size(); idx2++) {
                UninterpretedFunctionApplication application2 = applications.get(idx2);
                List<Formula> otherArgs = application2.getArguments();

                /*
                 * Add constraints to enforce functional consistency.
                 */
                Verify.verify(args.size() == otherArgs.size());
                Collection<BooleanFormula> argumentEquality = new ArrayList<>(args.size());
                for (int i = 0; i < args.size(); i++) {
                    Formula arg1 = args.get(i);
                    Formula arg2 = otherArgs.get(i);
                    argumentEquality.add(makeEqual(arg1, arg2));
                }

                BooleanFormula functionEquality = makeEqual(substitution, application2.getSubstitution());
                extraConstraints.add(bfmgr.implication(bfmgr.and(argumentEquality), functionEquality));
            }
        }
    }

    // Get rid of UFs.
    ImmutableMap<Formula, Formula> substitutions = substitutionsBuilder.build();
    BooleanFormula formulaWithoutUFs = fmgr.substitute(f, substitutions);

    // substitute all UFs in the additional constraints,
    // required if UFs are arguments of UFs, e.g. uf(uf(1, 2), 2)
    for (int i = 0; i < depth; i++) {
        extraConstraints = extraConstraints.stream().map(c -> fmgr.substitute(c, substitutions))
                .collect(Collectors.toList());
    }

    Map<Formula, Formula> otherSubstitution = difference(pOtherResult.getSubstitution(), substitutions)
            .entriesOnlyOnLeft();
    substitutionsBuilder.putAll(otherSubstitution);
    ImmutableMap<Formula, Formula> allSubstitutions = substitutionsBuilder.build();
    BooleanFormula constraints = bfmgr.and(extraConstraints);
    return new Result(formulaWithoutUFs, constraints, allSubstitutions, copyOf(ufs));
}

From source file:com.facebook.buck.features.apple.project.ProjectGenerator.java

/** @return a map of all exported platform headers without matching a specific platform. */
public static ImmutableMap<Path, SourcePath> parseAllPlatformHeaders(BuildTarget buildTarget,
        SourcePathResolver sourcePathResolver, ImmutableList<SourceSortedSet> platformHeaders, boolean export,
        CxxLibraryDescription.CommonArg args) {
    ImmutableMap.Builder<String, SourcePath> parsed = ImmutableMap.builder();

    String parameterName = (export) ? "exported_platform_headers" : "platform_headers";

    // Include all platform specific headers.
    for (SourceSortedSet sourceList : platformHeaders) {
        parsed.putAll(sourceList.toNameMap(buildTarget, sourcePathResolver, parameterName, path -> true,
                path -> path));// www . j  a va 2  s. c  om
    }
    return CxxPreprocessables.resolveHeaderMap(
            args.getHeaderNamespace().map(Paths::get).orElse(buildTarget.getBasePath()), parsed.build());
}

From source file:com.facebook.presto.hive.metastore.SemiTransactionalHiveMetastore.java

public synchronized Optional<Map<String, Map<String, HiveColumnStatistics>>> getPartitionColumnStatistics(
        String databaseName, String tableName, Set<String> partitionNames, Set<String> columnNames) {
    checkReadable();//from w ww  . j ava2  s .  c o m
    Optional<Table> table = getTable(databaseName, tableName);
    if (!table.isPresent()) {
        return Optional.empty();
    }
    TableSource tableSource = getTableSource(databaseName, tableName);
    Map<List<String>, Action<PartitionAndMore>> partitionActionsOfTable = partitionActions
            .computeIfAbsent(new SchemaTableName(databaseName, tableName), k -> new HashMap<>());
    ImmutableSet.Builder<String> partitionNamesToQuery = ImmutableSet.builder();
    ImmutableMap.Builder<String, Map<String, HiveColumnStatistics>> resultBuilder = ImmutableMap.builder();
    for (String partitionName : partitionNames) {
        List<String> partitionValues = toPartitionValues(partitionName);
        Action<PartitionAndMore> partitionAction = partitionActionsOfTable.get(partitionValues);
        if (partitionAction == null) {
            switch (tableSource) {
            case PRE_EXISTING_TABLE:
                partitionNamesToQuery.add(partitionName);
                break;
            case CREATED_IN_THIS_TRANSACTION:
                resultBuilder.put(partitionName, ImmutableMap.of());
                break;
            default:
                throw new UnsupportedOperationException("unknown table source");
            }
        } else {
            resultBuilder.put(partitionName, ImmutableMap.of());
        }
    }

    Optional<Map<String, Map<String, HiveColumnStatistics>>> delegateResult = delegate
            .getPartitionColumnStatistics(databaseName, tableName, partitionNamesToQuery.build(), columnNames);
    if (delegateResult.isPresent()) {
        resultBuilder.putAll(delegateResult.get());
    } else {
        partitionNamesToQuery.build().forEach(partionName -> resultBuilder.put(partionName, ImmutableMap.of()));
    }
    return Optional.of(resultBuilder.build());
}

From source file:io.prestosql.plugin.hive.metastore.SemiTransactionalHiveMetastore.java

public synchronized Map<String, Optional<Partition>> getPartitionsByNames(String databaseName, String tableName,
        List<String> partitionNames) {
    checkReadable();//from   ww  w.ja  v a2  s.  c  o m
    TableSource tableSource = getTableSource(databaseName, tableName);
    Map<List<String>, Action<PartitionAndMore>> partitionActionsOfTable = partitionActions
            .computeIfAbsent(new SchemaTableName(databaseName, tableName), k -> new HashMap<>());
    ImmutableList.Builder<String> partitionNamesToQuery = ImmutableList.builder();
    ImmutableMap.Builder<String, Optional<Partition>> resultBuilder = ImmutableMap.builder();
    for (String partitionName : partitionNames) {
        List<String> partitionValues = toPartitionValues(partitionName);
        Action<PartitionAndMore> partitionAction = partitionActionsOfTable.get(partitionValues);
        if (partitionAction == null) {
            switch (tableSource) {
            case PRE_EXISTING_TABLE:
                partitionNamesToQuery.add(partitionName);
                break;
            case CREATED_IN_THIS_TRANSACTION:
                resultBuilder.put(partitionName, Optional.empty());
                break;
            default:
                throw new UnsupportedOperationException("unknown table source");
            }
        } else {
            resultBuilder.put(partitionName, getPartitionFromPartitionAction(partitionAction));
        }
    }
    Map<String, Optional<Partition>> delegateResult = delegate.getPartitionsByNames(databaseName, tableName,
            partitionNamesToQuery.build());
    resultBuilder.putAll(delegateResult);
    return resultBuilder.build();
}

From source file:io.prestosql.plugin.hive.metastore.SemiTransactionalHiveMetastore.java

public synchronized Map<String, PartitionStatistics> getPartitionStatistics(String databaseName,
        String tableName, Set<String> partitionNames) {
    checkReadable();//from   w w  w.j  av a  2s  .co m
    Optional<Table> table = getTable(databaseName, tableName);
    if (!table.isPresent()) {
        return ImmutableMap.of();
    }
    TableSource tableSource = getTableSource(databaseName, tableName);
    Map<List<String>, Action<PartitionAndMore>> partitionActionsOfTable = partitionActions
            .computeIfAbsent(new SchemaTableName(databaseName, tableName), k -> new HashMap<>());
    ImmutableSet.Builder<String> partitionNamesToQuery = ImmutableSet.builder();
    ImmutableMap.Builder<String, PartitionStatistics> resultBuilder = ImmutableMap.builder();
    for (String partitionName : partitionNames) {
        List<String> partitionValues = toPartitionValues(partitionName);
        Action<PartitionAndMore> partitionAction = partitionActionsOfTable.get(partitionValues);
        if (partitionAction == null) {
            switch (tableSource) {
            case PRE_EXISTING_TABLE:
                partitionNamesToQuery.add(partitionName);
                break;
            case CREATED_IN_THIS_TRANSACTION:
                resultBuilder.put(partitionName, PartitionStatistics.empty());
                break;
            default:
                throw new UnsupportedOperationException("unknown table source");
            }
        } else {
            resultBuilder.put(partitionName, partitionAction.getData().getStatistics());
        }
    }

    Map<String, PartitionStatistics> delegateResult = delegate.getPartitionStatistics(databaseName, tableName,
            partitionNamesToQuery.build());
    if (!delegateResult.isEmpty()) {
        resultBuilder.putAll(delegateResult);
    } else {
        partitionNamesToQuery.build()
                .forEach(partitionName -> resultBuilder.put(partitionName, PartitionStatistics.empty()));
    }
    return resultBuilder.build();
}

From source file:io.prestosql.plugin.hive.HiveMetadata.java

private ConnectorTableMetadata getTableMetadata(SchemaTableName tableName) {
    Optional<Table> table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName());
    if (!table.isPresent() || table.get().getTableType().equals(TableType.VIRTUAL_VIEW.name())) {
        throw new TableNotFoundException(tableName);
    }/*from  w w w.jav  a2 s. c o m*/

    Function<HiveColumnHandle, ColumnMetadata> metadataGetter = columnMetadataGetter(table.get(), typeManager);
    ImmutableList.Builder<ColumnMetadata> columns = ImmutableList.builder();
    for (HiveColumnHandle columnHandle : hiveColumnHandles(table.get())) {
        columns.add(metadataGetter.apply(columnHandle));
    }

    // External location property
    ImmutableMap.Builder<String, Object> properties = ImmutableMap.builder();
    if (table.get().getTableType().equals(EXTERNAL_TABLE.name())) {
        properties.put(EXTERNAL_LOCATION_PROPERTY, table.get().getStorage().getLocation());
    }

    // Storage format property
    try {
        HiveStorageFormat format = extractHiveStorageFormat(table.get());
        properties.put(STORAGE_FORMAT_PROPERTY, format);
    } catch (PrestoException ignored) {
        // todo fail if format is not known
    }

    // Partitioning property
    List<String> partitionedBy = table.get().getPartitionColumns().stream().map(Column::getName)
            .collect(toList());
    if (!partitionedBy.isEmpty()) {
        properties.put(PARTITIONED_BY_PROPERTY, partitionedBy);
    }

    // Bucket properties
    Optional<HiveBucketProperty> bucketProperty = table.get().getStorage().getBucketProperty();
    if (bucketProperty.isPresent()) {
        properties.put(BUCKET_COUNT_PROPERTY, bucketProperty.get().getBucketCount());
        properties.put(BUCKETED_BY_PROPERTY, bucketProperty.get().getBucketedBy());
        properties.put(SORTED_BY_PROPERTY, bucketProperty.get().getSortedBy());
    }

    // ORC format specific properties
    String orcBloomFilterColumns = table.get().getParameters().get(ORC_BLOOM_FILTER_COLUMNS_KEY);
    if (orcBloomFilterColumns != null) {
        properties.put(ORC_BLOOM_FILTER_COLUMNS,
                Splitter.on(',').trimResults().omitEmptyStrings().splitToList(orcBloomFilterColumns));
    }
    String orcBloomFilterFfp = table.get().getParameters().get(ORC_BLOOM_FILTER_FPP_KEY);
    if (orcBloomFilterFfp != null) {
        properties.put(ORC_BLOOM_FILTER_FPP, Double.parseDouble(orcBloomFilterFfp));
    }

    // Avro specfic property
    String avroSchemaUrl = table.get().getParameters().get(AVRO_SCHEMA_URL_KEY);
    if (avroSchemaUrl != null) {
        properties.put(AVRO_SCHEMA_URL, avroSchemaUrl);
    }

    // Hook point for extended versions of the Hive Plugin
    properties.putAll(tableParameterCodec.decode(table.get().getParameters()));

    Optional<String> comment = Optional.ofNullable(table.get().getParameters().get(TABLE_COMMENT));

    return new ConnectorTableMetadata(tableName, columns.build(), properties.build(), comment);
}

From source file:com.google.devtools.build.lib.skyframe.SkyframeExecutor.java

private ImmutableMap<SkyFunctionName, SkyFunction> skyFunctions(PackageFactory pkgFactory,
        Predicate<PathFragment> allowedMissingInputs) {
    ConfiguredRuleClassProvider ruleClassProvider = (ConfiguredRuleClassProvider) pkgFactory
            .getRuleClassProvider();//w  ww  .j a v a  2s . c  o m
    // TODO(janakr): use this semaphore to bound memory usage for SkyFunctions besides
    // ConfiguredTargetFunction that may have a large temporary memory blow-up.
    Semaphore cpuBoundSemaphore = new Semaphore(ResourceUsage.getAvailableProcessors());
    // We use an immutable map builder for the nice side effect that it throws if a duplicate key
    // is inserted.
    ImmutableMap.Builder<SkyFunctionName, SkyFunction> map = ImmutableMap.builder();
    map.put(SkyFunctions.PRECOMPUTED, new PrecomputedFunction());
    map.put(SkyFunctions.CLIENT_ENVIRONMENT_VARIABLE, new ClientEnvironmentFunction());
    map.put(SkyFunctions.FILE_STATE, new FileStateFunction(tsgm, externalFilesHelper));
    map.put(SkyFunctions.DIRECTORY_LISTING_STATE, new DirectoryListingStateFunction(externalFilesHelper));
    map.put(SkyFunctions.FILE_SYMLINK_CYCLE_UNIQUENESS, new FileSymlinkCycleUniquenessFunction());
    map.put(SkyFunctions.FILE_SYMLINK_INFINITE_EXPANSION_UNIQUENESS,
            new FileSymlinkInfiniteExpansionUniquenessFunction());
    map.put(SkyFunctions.FILE, new FileFunction(pkgLocator));
    map.put(SkyFunctions.DIRECTORY_LISTING, new DirectoryListingFunction());
    map.put(SkyFunctions.PACKAGE_LOOKUP, new PackageLookupFunction(deletedPackages,
            crossRepositoryLabelViolationStrategy, buildFilesByPriority));
    map.put(SkyFunctions.CONTAINING_PACKAGE_LOOKUP, new ContainingPackageLookupFunction());
    map.put(SkyFunctions.AST_FILE_LOOKUP, new ASTFileLookupFunction(ruleClassProvider));
    map.put(SkyFunctions.SKYLARK_IMPORTS_LOOKUP, newSkylarkImportLookupFunction(ruleClassProvider, pkgFactory));
    map.put(SkyFunctions.GLOB, newGlobFunction());
    map.put(SkyFunctions.TARGET_PATTERN, new TargetPatternFunction());
    map.put(SkyFunctions.PREPARE_DEPS_OF_PATTERNS, new PrepareDepsOfPatternsFunction());
    map.put(SkyFunctions.PREPARE_DEPS_OF_PATTERN, new PrepareDepsOfPatternFunction(pkgLocator));
    map.put(SkyFunctions.PREPARE_DEPS_OF_TARGETS_UNDER_DIRECTORY,
            new PrepareDepsOfTargetsUnderDirectoryFunction(directories));
    map.put(SkyFunctions.COLLECT_TARGETS_IN_PACKAGE, new CollectTargetsInPackageFunction());
    map.put(SkyFunctions.COLLECT_PACKAGES_UNDER_DIRECTORY,
            new CollectPackagesUnderDirectoryFunction(directories));
    map.put(SkyFunctions.BLACKLISTED_PACKAGE_PREFIXES, new BlacklistedPackagePrefixesFunction());
    map.put(SkyFunctions.TESTS_IN_SUITE, new TestsInSuiteFunction());
    map.put(SkyFunctions.TEST_SUITE_EXPANSION, new TestSuiteExpansionFunction());
    map.put(SkyFunctions.TARGET_PATTERN_PHASE, new TargetPatternPhaseFunction());
    map.put(SkyFunctions.RECURSIVE_PKG, new RecursivePkgFunction(directories));
    map.put(SkyFunctions.PACKAGE, newPackageFunction(pkgFactory, packageManager, showLoadingProgress,
            packageFunctionCache, astCache, numPackagesLoaded, ruleClassProvider, packageProgress));
    map.put(SkyFunctions.PACKAGE_ERROR, new PackageErrorFunction());
    map.put(SkyFunctions.TARGET_MARKER, new TargetMarkerFunction());
    map.put(SkyFunctions.TRANSITIVE_TARGET, new TransitiveTargetFunction(ruleClassProvider));
    map.put(SkyFunctions.TRANSITIVE_TRAVERSAL, new TransitiveTraversalFunction());
    map.put(SkyFunctions.CONFIGURED_TARGET,
            new ConfiguredTargetFunction(new BuildViewProvider(), ruleClassProvider, cpuBoundSemaphore));
    map.put(SkyFunctions.ASPECT, new AspectFunction(new BuildViewProvider(), ruleClassProvider));
    map.put(SkyFunctions.LOAD_SKYLARK_ASPECT, new ToplevelSkylarkAspectFunction());
    map.put(SkyFunctions.POST_CONFIGURED_TARGET,
            new PostConfiguredTargetFunction(new BuildViewProvider(), ruleClassProvider));
    map.put(SkyFunctions.BUILD_CONFIGURATION, new BuildConfigurationFunction(directories, ruleClassProvider));
    map.put(SkyFunctions.CONFIGURATION_COLLECTION,
            new ConfigurationCollectionFunction(configurationFactory, ruleClassProvider));
    map.put(SkyFunctions.CONFIGURATION_FRAGMENT,
            new ConfigurationFragmentFunction(configurationFragments, ruleClassProvider));
    map.put(SkyFunctions.WORKSPACE_AST, new WorkspaceASTFunction(ruleClassProvider));
    map.put(SkyFunctions.WORKSPACE_FILE, new WorkspaceFileFunction(ruleClassProvider, pkgFactory, directories));
    map.put(SkyFunctions.EXTERNAL_PACKAGE, new ExternalPackageFunction());
    map.put(SkyFunctions.TARGET_COMPLETION, CompletionFunction.targetCompletionFunction(eventBus));
    map.put(SkyFunctions.ASPECT_COMPLETION, CompletionFunction.aspectCompletionFunction(eventBus));
    map.put(SkyFunctions.TEST_COMPLETION, new TestCompletionFunction());
    map.put(SkyFunctions.ARTIFACT, new ArtifactFunction(allowedMissingInputs));
    map.put(SkyFunctions.BUILD_INFO_COLLECTION, new BuildInfoCollectionFunction(artifactFactory));
    map.put(SkyFunctions.BUILD_INFO, new WorkspaceStatusFunction());
    map.put(SkyFunctions.COVERAGE_REPORT, new CoverageReportFunction());
    ActionExecutionFunction actionExecutionFunction = new ActionExecutionFunction(skyframeActionExecutor, tsgm);
    map.put(SkyFunctions.ACTION_EXECUTION, actionExecutionFunction);
    this.actionExecutionFunction = actionExecutionFunction;
    map.put(SkyFunctions.RECURSIVE_FILESYSTEM_TRAVERSAL, new RecursiveFilesystemTraversalFunction());
    map.put(SkyFunctions.FILESET_ENTRY, new FilesetEntryFunction());
    map.put(SkyFunctions.ACTION_TEMPLATE_EXPANSION, new ActionTemplateExpansionFunction());
    map.put(SkyFunctions.LOCAL_REPOSITORY_LOOKUP, new LocalRepositoryLookupFunction());
    map.putAll(extraSkyFunctions);
    return map.build();
}