List of usage examples for com.google.common.collect Maps uniqueIndex
public static <K, V> ImmutableMap<K, V> uniqueIndex(Iterator<V> values, Function<? super V, K> keyFunction)
From source file:com.twitter.aurora.scheduler.state.StateManagerImpl.java
private Map<String, TaskStateMachine> getStateMachines(final Set<String> taskIds) { return storage.consistentRead(new Work.Quiet<Map<String, TaskStateMachine>>() { @Override//from ww w. ja v a2s. co m public Map<String, TaskStateMachine> apply(StoreProvider storeProvider) { Map<String, IScheduledTask> existingTasks = Maps.uniqueIndex( storeProvider.getTaskStore().fetchTasks(Query.taskScoped(taskIds)), new Function<IScheduledTask, String>() { @Override public String apply(IScheduledTask input) { return input.getAssignedTask().getTaskId(); } }); ImmutableMap.Builder<String, TaskStateMachine> builder = ImmutableMap.builder(); for (String taskId : taskIds) { // Pass null get() values through. builder.put(taskId, getStateMachine(taskId, existingTasks.get(taskId))); } return builder.build(); } }); }
From source file:com.twitter.common.args.ArgScanner.java
/** * Applies argument values to fields based on their annotations. * * @param parserOracle ParserOracle available to parse raw args with. * @param verifiers Verifiers available to verify argument constraints with. * @param argsInfo Fields to apply argument values to. * @param args Unparsed argument values. * @param positionalArgs The unparsed positional arguments. * @return {@code true} if the given {@code args} were successfully applied to their * corresponding {@link com.twitter.common.args.Arg} fields. *//*from www. ja v a2 s. c om*/ private boolean process(final ParserOracle parserOracle, Verifiers verifiers, ArgsInfo argsInfo, Map<String, String> args, List<String> positionalArgs) { if (!Sets.intersection(args.keySet(), ArgumentInfo.HELP_ARGS).isEmpty()) { printHelp(verifiers, argsInfo); return false; } Optional<? extends PositionalInfo<?>> positionalInfoOptional = argsInfo.getPositionalInfo(); checkArgument(positionalInfoOptional.isPresent() || positionalArgs.isEmpty(), "Positional arguments have been supplied but there is no Arg annotated to received them."); Iterable<? extends OptionInfo<?>> optionInfos = argsInfo.getOptionInfos(); final Set<String> argsFailedToParse = Sets.newHashSet(); final Set<String> argsConstraintsFailed = Sets.newHashSet(); Set<String> argAllShortNamesNoCollisions = getNoCollisions(optionInfos); final Map<String, OptionInfo<?>> argsByName = ImmutableMap.<String, OptionInfo<?>>builder() // Map by short arg name -> arg def. .putAll(Maps.uniqueIndex(Iterables.filter(optionInfos, Predicates.compose(Predicates.in(argAllShortNamesNoCollisions), GET_OPTION_INFO_NAME)), GET_OPTION_INFO_NAME)) // Map by canonical arg name -> arg def. .putAll(Maps.uniqueIndex(optionInfos, GET_CANONICAL_ARG_NAME)) // Map by negated short arg name (for booleans) .putAll(Maps.uniqueIndex(Iterables.filter(Iterables.filter(optionInfos, IS_BOOLEAN), Predicates.compose(Predicates.in(argAllShortNamesNoCollisions), GET_OPTION_INFO_NEGATED_NAME)), GET_OPTION_INFO_NEGATED_NAME)) // Map by negated canonical arg name (for booleans) .putAll(Maps.uniqueIndex(Iterables.filter(optionInfos, IS_BOOLEAN), GET_CANONICAL_NEGATED_ARG_NAME)) .build(); // TODO(William Farner): Make sure to disallow duplicate arg specification by short and // canonical names. // TODO(William Farner): Support non-atomic argument constraints. @OnlyIfSet, @OnlyIfNotSet, // @ExclusiveOf to define inter-argument constraints. Set<String> recognizedArgs = Sets.intersection(argsByName.keySet(), args.keySet()); for (String argName : recognizedArgs) { String argValue = args.get(argName); OptionInfo<?> optionInfo = argsByName.get(argName); try { optionInfo.load(parserOracle, argName, argValue); } catch (IllegalArgumentException e) { argsFailedToParse.add(argName + " - " + e.getMessage()); } } if (positionalInfoOptional.isPresent()) { PositionalInfo<?> positionalInfo = positionalInfoOptional.get(); positionalInfo.load(parserOracle, positionalArgs); } Set<String> commandLineArgumentInfos = Sets.newTreeSet(); Iterable<? extends ArgumentInfo<?>> allArguments = argsInfo.getOptionInfos(); if (positionalInfoOptional.isPresent()) { PositionalInfo<?> positionalInfo = positionalInfoOptional.get(); allArguments = Iterables.concat(optionInfos, ImmutableList.of(positionalInfo)); } for (ArgumentInfo<?> anArgumentInfo : allArguments) { Arg<?> arg = anArgumentInfo.getArg(); commandLineArgumentInfos.add(String.format("%s (%s): %s", anArgumentInfo.getName(), anArgumentInfo.getCanonicalName(), arg.uncheckedGet())); try { anArgumentInfo.verify(verifiers); } catch (IllegalArgumentException e) { argsConstraintsFailed.add(anArgumentInfo.getName() + " - " + e.getMessage()); } } ImmutableMultimap<String, String> warningMessages = ImmutableMultimap.<String, String>builder() .putAll("Unrecognized arguments", Sets.difference(args.keySet(), argsByName.keySet())) .putAll("Failed to parse", argsFailedToParse) .putAll("Value did not meet constraints", argsConstraintsFailed).build(); if (!warningMessages.isEmpty()) { printHelp(verifiers, argsInfo); StringBuilder sb = new StringBuilder(); for (Map.Entry<String, Collection<String>> warnings : warningMessages.asMap().entrySet()) { sb.append(warnings.getKey()).append(":\n\t").append(Joiner.on("\n\t").join(warnings.getValue())) .append("\n"); } throw new IllegalArgumentException(sb.toString()); } LOG.info("-------------------------------------------------------------------------"); LOG.info("Command line argument values"); for (String commandLineArgumentInfo : commandLineArgumentInfos) { LOG.info(commandLineArgumentInfo); } LOG.info("-------------------------------------------------------------------------"); return true; }
From source file:org.opentestsystem.authoring.testauth.publish.BasePublisherHelper.java
protected Map<String, TestItem> buildTestItemMap(final List<TestItem> testItemList) { return Maps.uniqueIndex(testItemList, TESTITEM_ID_TRANSFORMER); }
From source file:com.b2international.snowowl.snomed.datastore.request.SnomedConceptUpdateRequest.java
private <T extends EObject, U extends SnomedComponent> boolean updateComponents( final TransactionContext context, final Concept concept, final Set<String> previousComponentIds, final Iterable<U> currentComponents, final Function<String, Request<TransactionContext, ?>> toDeleteRequest) { // pre process all incoming components currentComponents.forEach(component -> { // all incoming components should define their ID in order to be processed if (Strings.isNullOrEmpty(component.getId())) { throw new BadRequestException("New components require their id to be set."); }/*www . j a v a 2 s .com*/ // all components should have their module ID set if (Strings.isNullOrEmpty(component.getModuleId())) { throw new BadRequestException("It is required to specify the moduleId for the components."); } }); // collect new/changed/deleted components and process them final Map<String, U> currentComponentsById = Maps.uniqueIndex(currentComponents, component -> component.getId()); return Sets.union(previousComponentIds, currentComponentsById.keySet()).stream().map(componentId -> { if (!previousComponentIds.contains(componentId) && currentComponentsById.containsKey(componentId)) { // new component return currentComponentsById.get(componentId).toCreateRequest(concept.getId()); } else if (previousComponentIds.contains(componentId) && currentComponentsById.containsKey(componentId)) { // changed component return currentComponentsById.get(componentId).toUpdateRequest(); } else if (previousComponentIds.contains(componentId) && !currentComponentsById.containsKey(componentId)) { // deleted component return toDeleteRequest.apply(componentId); } else { throw new IllegalStateException("Invalid case, should not happen"); } }).map(req -> req.execute(context)).filter(Boolean.class::isInstance).map(Boolean.class::cast) .reduce(Boolean.FALSE, (r1, r2) -> r1 || r2); }
From source file:org.locationtech.geogig.remotes.pack.PushOp.java
/** * Prepares a request upon which {@link SendPackOp} will resolve the set of {@link RevObject}s * to transfer from the local to the remote repo. * /*from w w w.ja v a 2s.co m*/ * @param pushRequests the resolved push requests * @param remoteRefs the current state of the remote refs in it's local refs namespace (i.e. as * {@code refs/heads/*}, not {@code refs/remotes/...}) */ private PackRequest prepareRequest(List<PushReq> pushRequests, Set<Ref> remoteRefs) { PackRequest req = new PackRequest(); final Map<String, Ref> remoteRefsByName = Maps.uniqueIndex(remoteRefs, (r) -> r.getName()); for (PushReq preq : pushRequests) { if (preq.delete) { continue;// deletes are handled after data transfer } final Ref localRef = preq.localRef; final String remoteRefName = preq.remoteRef; checkNotNull(localRef); checkNotNull(remoteRefName); final ObjectId want = localRef.getObjectId(); Ref resolvedRemoteRef = remoteRefsByName.get(remoteRefName); final @Nullable ObjectId have; if (preq.forceUpdate) { have = findShallowestCommonAncestor(want, Sets.newHashSet(Iterables.transform(remoteRefs, (r) -> r.getObjectId()))); } else { try { checkPush(localRef, resolvedRemoteRef); } catch (SynchronizationException e) { if (e.statusCode == StatusCode.NOTHING_TO_PUSH) { continue; } throw e; } if (resolvedRemoteRef == null) { resolvedRemoteRef = remoteRefsByName.get(localRef.getName()); } if (resolvedRemoteRef == null) { // creating a new branch on the remote from a branch in the local repo, lets // check if we can figure out a common ancestor have = findShallowestCommonAncestor(want, Sets.newHashSet(Iterables.transform(remoteRefs, (r) -> r.getObjectId()))); } else { // have is guaranteed to be in the local repo because of checkPush above have = resolvedRemoteRef.getObjectId(); } } RefRequest refReq = RefRequest.want(localRef, have); req.addRef(refReq); } return req; }
From source file:com.twitter.aurora.scheduler.thrift.SchedulerThriftInterface.java
@Override public Response getJobs(@Nullable String maybeNullRole) { Optional<String> ownerRole = Optional.fromNullable(maybeNullRole); // Ensure we only return one JobConfiguration for each JobKey. Map<IJobKey, IJobConfiguration> jobs = Maps.newHashMap(); // Query the task store, find immediate jobs, and synthesize a JobConfiguration for them. // This is necessary because the ImmediateJobManager doesn't store jobs directly and // ImmediateJobManager#getJobs always returns an empty Collection. Query.Builder scope = ownerRole.isPresent() ? Query.roleScoped(ownerRole.get()) : Query.unscoped(); Multimap<IJobKey, IScheduledTask> tasks = Tasks .byJobKey(Storage.Util.weaklyConsistentFetchTasks(storage, scope.active())); jobs.putAll(Maps.transformEntries(tasks.asMap(), new Maps.EntryTransformer<IJobKey, Collection<IScheduledTask>, IJobConfiguration>() { @Override/*from ww w. j a v a2 s. co m*/ public IJobConfiguration transformEntry(IJobKey jobKey, Collection<IScheduledTask> tasks) { // Pick an arbitrary task for each immediate job. The chosen task might not be the most // recent if the job is in the middle of an update or some shards have been selectively // created. TaskConfig firstTask = tasks.iterator().next().getAssignedTask().getTask().newBuilder(); return IJobConfiguration.build( new JobConfiguration().setKey(jobKey.newBuilder()).setOwner(firstTask.getOwner()) .setTaskConfig(firstTask).setInstanceCount(tasks.size())); } })); // Get cron jobs directly from the manager. Do this after querying the task store so the real // template JobConfiguration for a cron job will overwrite the synthesized one that could have // been created above. Predicate<IJobConfiguration> configFilter = ownerRole.isPresent() ? Predicates.compose(Predicates.equalTo(ownerRole.get()), JobKeys.CONFIG_TO_ROLE) : Predicates.<IJobConfiguration>alwaysTrue(); jobs.putAll(Maps.uniqueIndex(FluentIterable.from(cronJobManager.getJobs()).filter(configFilter), JobKeys.FROM_CONFIG)); return new Response().setResponseCode(OK).setResult(Result .getJobsResult(new GetJobsResult().setConfigs(IJobConfiguration.toBuildersSet(jobs.values())))); }
From source file:com.google.api.codegen.csharp.CSharpGapicContext.java
public List<MethodInfo> getMethodInfos(Interface service) { final InterfaceConfig interfaceConfig = getApiConfig().getInterfaceConfig(service); RetryInfo retryInfo = getRetryInfo(service); final Map<String, RetryDefInfo> retryDefByName = Maps.uniqueIndex(retryInfo.defs(), new Function<RetryDefInfo, String>() { @Override/*from www . j a v a 2s . c om*/ public String apply(RetryDefInfo value) { return value.rawName(); } }); final Map<String, RetrySettingInfo> retrySettingByName = Maps.uniqueIndex(retryInfo.settings(), new Function<RetrySettingInfo, String>() { @Override public String apply(RetrySettingInfo value) { return value.rawName(); } }); // TODO: Change back to .from(service.getMethods()) once streaming is implemented. // We ignore streaming for now to not cause test failures. return FluentIterable.from(getNonStreamingMethods(service)).transform(new Function<Method, MethodInfo>() { @Override public MethodInfo apply(Method method) { MethodConfig methodConfig = interfaceConfig.getMethodConfig(method); return createMethodInfo(interfaceConfig, method, methodConfig, retryDefByName.get(methodConfig.getRetryCodesConfigName()), retrySettingByName.get(methodConfig.getRetrySettingsConfigName())); } }).filter(new Predicate<MethodInfo>() { @Override public boolean apply(MethodInfo method) { return method.anyFlats(); } }).toList(); }
From source file:org.obm.sync.calendar.Event.java
private Map<Date, Event> indexEventExceptionsByRecurrenceId(Iterable<Event> afterOccurrences) { Map<Date, Event> afterOccurrencesByReccurrenceId = Maps.uniqueIndex(afterOccurrences, new Function<Event, Date>() { @Override//from ww w . j a v a 2s. com public Date apply(Event input) { return input.getRecurrenceId(); } }); return afterOccurrencesByReccurrenceId; }
From source file:io.druid.client.cache.MemcachedCache.java
@Override public Map<NamedKey, byte[]> getBulk(Iterable<NamedKey> keys) { try (ResourceHolder<MemcachedClientIF> clientHolder = client.get()) { Map<String, NamedKey> keyLookup = Maps.uniqueIndex(keys, new Function<NamedKey, String>() { @Override//from w w w . jav a 2 s .com public String apply(@Nullable NamedKey input) { return computeKeyHash(memcachedPrefix, input); } }); Map<NamedKey, byte[]> results = Maps.newHashMap(); BulkFuture<Map<String, Object>> future; try { future = clientHolder.get().asyncGetBulk(keyLookup.keySet()); } catch (IllegalStateException e) { // operation did not get queued in time (queue is full) errorCount.incrementAndGet(); log.warn(e, "Unable to queue cache operation"); return results; } try { Map<String, Object> some = future.getSome(timeout, TimeUnit.MILLISECONDS); if (future.isTimeout()) { future.cancel(false); timeoutCount.incrementAndGet(); } missCount.addAndGet(keyLookup.size() - some.size()); hitCount.addAndGet(some.size()); for (Map.Entry<String, Object> entry : some.entrySet()) { final NamedKey key = keyLookup.get(entry.getKey()); final byte[] value = (byte[]) entry.getValue(); if (value != null) { results.put(key, deserializeValue(key, value)); } } return results; } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw Throwables.propagate(e); } catch (ExecutionException e) { errorCount.incrementAndGet(); log.warn(e, "Exception pulling item from cache"); return results; } } catch (IOException e) { throw Throwables.propagate(e); } }
From source file:org.locationtech.geogig.model.impl.LegacyTreeBuilder.java
private Map<Integer, RevTree> getBucketTrees(ImmutableSet<Integer> changedBucketIndexes) { Map<Integer, RevTree> bucketTrees = new HashMap<>(); List<Integer> missing = new ArrayList<>(changedBucketIndexes.size()); for (Integer bucketIndex : changedBucketIndexes) { Bucket bucket = bucketTreesByBucket.get(bucketIndex); RevTree cached = bucket == null ? RevTree.EMPTY : pendingWritesCache.get(bucket.getObjectId()); if (cached == null) { missing.add(bucketIndex);//from w w w . j av a 2s .c o m } else { bucketTrees.put(bucketIndex, cached); } } if (!missing.isEmpty()) { Map<ObjectId, Integer> ids = Maps.uniqueIndex(missing, new Function<Integer, ObjectId>() { @Override public ObjectId apply(Integer index) { return bucketTreesByBucket.get(index).getObjectId(); } }); Iterator<RevObject> all = obStore.getAll(ids.keySet()); while (all.hasNext()) { RevObject next = all.next(); bucketTrees.put(ids.get(next.getId()), (RevTree) next); } } return bucketTrees; }