List of usage examples for com.google.common.collect Maps transformValues
@GwtIncompatible("NavigableMap") public static <K, V1, V2> NavigableMap<K, V2> transformValues(NavigableMap<K, V1> fromMap, Function<? super V1, V2> function)
From source file:org.apache.apex.malhar.lib.state.managed.ManagedStateTestUtils.java
/** * Validates the bucket data on the File System. * @param fileAccess file access// ww w. java 2 s . c o m * @param bucketId bucket id * @param unsavedBucket bucket data to compare with. * @param keysPerTimeBucket num keys per time bucket * @throws IOException */ public static void validateBucketOnFileSystem(FileAccess fileAccess, long bucketId, Map<Slice, Bucket.BucketedValue> unsavedBucket, int keysPerTimeBucket) throws IOException { RemoteIterator<LocatedFileStatus> iterator = fileAccess.listFiles(bucketId); TreeMap<Slice, Slice> fromDisk = Maps.newTreeMap(new SliceComparator()); int size = 0; while (iterator.hasNext()) { LocatedFileStatus fileStatus = iterator.next(); String timeBucketStr = fileStatus.getPath().getName(); if (timeBucketStr.equals(BucketsFileSystem.META_FILE_NAME) || timeBucketStr.endsWith(".tmp")) { //ignoring meta file continue; } LOG.debug("bucket {} time-bucket {}", bucketId, timeBucketStr); FileAccess.FileReader reader = fileAccess.getReader(bucketId, timeBucketStr); reader.readFully(fromDisk); size += keysPerTimeBucket; Assert.assertEquals("size of bucket " + bucketId, size, fromDisk.size()); } Assert.assertEquals("size of bucket " + bucketId, unsavedBucket.size(), fromDisk.size()); Map<Slice, Slice> testBucket = Maps.transformValues(unsavedBucket, new Function<Bucket.BucketedValue, Slice>() { @Override public Slice apply(@Nullable Bucket.BucketedValue input) { assert input != null; return input.getValue(); } }); Assert.assertEquals("data of bucket" + bucketId, testBucket, fromDisk); }
From source file:com.continuuity.weave.internal.yarn.Hadoop20YarnLaunchContext.java
@Override public void setLocalResources(Map<String, YarnLocalResource> localResources) { launchContext.setLocalResources(Maps.transformValues(localResources, RESOURCE_TRANSFORM)); }
From source file:org.apache.aurora.scheduler.http.Quotas.java
/** * Dumps allocated resource quotas.// ww w . j a v a 2 s. c om * * @return HTTP response. */ @GET @Produces(MediaType.APPLICATION_JSON) public Response getQuotas(@QueryParam("role") final String role) { return storage.read(storeProvider -> { Map<String, IResourceAggregate> quotas; if (role == null) { quotas = storeProvider.getQuotaStore().fetchQuotas(); } else { Optional<IResourceAggregate> quota = storeProvider.getQuotaStore().fetchQuota(role); if (quota.isPresent()) { quotas = ImmutableMap.of(role, quota.get()); } else { quotas = ImmutableMap.of(); } } return Response.ok(Maps.transformValues(quotas, TO_BEAN)).build(); }); }
From source file:org.carrot2.output.metrics.NormalizedMutualInformationMetric.java
public void calculate() { final int partitionCount = getPartitionsCount(documents); if (partitionCount == 0) { return;//from w w w . j a v a 2 s. co m } if (clusters.size() == 0) { return; } final Set<Object> partitions = getPartitions(documents); final SetMultimap<Object, Document> documentsByPartition = getDocumentsByPartition(documents); final Map<Object, Integer> documentCountByPartition = getDocumentCountByPartition(documents); final int documentCount = documents.size(); if (partitions.size() <= 1) { normalizedMutualInformation = 0.0; return; } final Collection<Integer> partitionSizes = Maps .transformValues(documentsByPartition.asMap(), new Function<Collection<Document>, Integer>() { public Integer apply(Collection<Document> documents) { return documents.size(); } }).values(); double partitionEntropy = entropy(documentCount, partitionSizes.toArray(new Integer[partitionSizes.size()])); final List<Integer> clusterSizes = Lists.transform(clusters, new Function<Cluster, Integer>() { public Integer apply(Cluster cluster) { return cluster.size(); } }); double clusterEntropy = entropy(documentCount, clusterSizes.toArray(new Integer[clusterSizes.size()])); double mutualInformation = 0; for (Cluster cluster : this.clusters) { final int clusterSize = cluster.size(); for (Object partition : partitions) { final List<Document> clusterDocuments = cluster.getAllDocuments(); if (cluster.isOtherTopics() || clusterDocuments.size() == 0) { continue; } final Set<Document> commonDocuments = Sets.newHashSet(documentsByPartition.get(partition)); commonDocuments.retainAll(clusterDocuments); int commonDocumentsCount = commonDocuments.size(); if (commonDocumentsCount != 0) { mutualInformation += (commonDocumentsCount / (double) documentCount) * Math.log(documentCount * commonDocumentsCount / (double) (clusterSize * documentCountByPartition.get(partition))); } } } normalizedMutualInformation = mutualInformation / ((clusterEntropy + partitionEntropy) / 2); }
From source file:io.atomix.core.set.impl.DistributedSetProxy.java
@Override public CompletableFuture<Boolean> prepare(TransactionLog<SetUpdate<String>> transactionLog) { Map<PartitionId, List<SetUpdate<String>>> updatesGroupedBySet = Maps.newIdentityHashMap(); transactionLog.records().forEach(update -> { updatesGroupedBySet/* w w w.j ava 2 s . co m*/ .computeIfAbsent(getProxyClient().getPartitionId(update.element()), k -> Lists.newLinkedList()) .add(update); }); Map<PartitionId, TransactionLog<SetUpdate<String>>> transactionsBySet = Maps.transformValues( updatesGroupedBySet, list -> new TransactionLog<>(transactionLog.transactionId(), transactionLog.version(), list)); return Futures .allOf(transactionsBySet.entrySet().stream() .map(e -> getProxyClient().applyOn(e.getKey(), service -> service.prepare(e.getValue())) .thenApply(v -> v == PrepareResult.OK || v == PrepareResult.PARTIAL_FAILURE)) .collect(Collectors.toList())) .thenApply(list -> list.stream().reduce(Boolean::logicalAnd).orElse(true)); }
From source file:beans.MessageImpl.java
@Override public Void send(UserMessage message) { if (producer == null) { producer = new KafkaProducer<>(producerProperties); }/* w w w .j av a 2s.c o m*/ Request request = Controller.request(); Map<String, List<String>> query = Maps.transformValues(request.queryString(), value -> Arrays.asList(value)); RequestHeader header = RequestHeader.newBuilder().setRemoteAddress(request.remoteAddress()) .setHost(request.host()).setMethod(request.method()).setPath(request.path()).setQuery(query) .setSecure(request.secure()).setTimestamp(new Date().getTime()).build(); message.setRequestHeader(header); LOG.info("Producing message: " + message); try { producer.send(new ProducerRecord<>(Constants.TOPIC, message.getSubject(), message)); return null; } catch (Exception e) { throw new RuntimeException("Unable to send Kafka event for message: " + message, e); } }
From source file:net.staticsnow.nexus.repository.apt.internal.hosted.CompressingTempFileStore.java
public Map<String, FileMetadata> getFiles() { return Maps.transformValues(holdersByKey, holder -> new FileMetadata(holder)); }
From source file:ai.grakn.matcher.GraknMatchers.java
/** * Create a matcher to test against the results of a Graql query. */// w w w. j a v a 2 s . c o m public static Matcher<MatchQuery> results( Matcher<? extends Iterable<? extends Map<? extends Var, ? extends MatchableConcept>>> matcher) { return new PropertyMatcher<MatchQuery, Iterable<? extends Map<? extends Var, ? extends MatchableConcept>>>( matcher) { @Override public String getName() { return "results"; } @Override Iterable<? extends Map<Var, ? extends MatchableConcept>> transform(MatchQuery item) { return item.stream().map(m -> Maps.transformValues(m.map(), MatchableConcept::of)) .collect(toList()); } }; }
From source file:com.qcadoo.view.internal.components.grid.PredefinedFilter.java
public Map<String, String> getParsedFilterRestrictions() { return Maps.transformValues(filterRestrictions, PARSE_FILTER_VALUE_FUNC); }
From source file:ai.grakn.test.matcher.GraknMatchers.java
/** * Create a matcher to test against the results of a Graql query. */// ww w . j a va 2 s.c o m public static Matcher<MatchQuery> results( Matcher<? extends Iterable<? extends Map<? extends Var, ? extends MatchableConcept>>> matcher) { return new PropertyMatcher<MatchQuery, Iterable<? extends Map<? extends Var, ? extends MatchableConcept>>>( matcher) { @Override public String getName() { return "results"; } @Override Iterable<? extends Map<Var, ? extends MatchableConcept>> transform(MatchQuery item) { return item.stream().map(m -> Maps.transformValues(m.map(), MatchableConcept::new)) .collect(toList()); } }; }