List of usage examples for com.google.common.collect Maps uniqueIndex
public static <K, V> ImmutableMap<K, V> uniqueIndex(Iterator<V> values, Function<? super V, K> keyFunction)
From source file:com.microsoft.azure.management.appservice.implementation.WebAppImpl.java
@Override public Map<String, HostNameBinding> getHostNameBindings() { List<HostNameBindingInner> collectionInner = this.manager().inner().webApps() .listHostNameBindings(resourceGroupName(), name()); List<HostNameBinding> hostNameBindings = new ArrayList<>(); for (HostNameBindingInner inner : collectionInner) { hostNameBindings.add(new HostNameBindingImpl<>(inner, this)); }//from w w w. j a v a 2 s.c om return Collections .unmodifiableMap(Maps.uniqueIndex(hostNameBindings, new Function<HostNameBinding, String>() { @Override public String apply(HostNameBinding input) { return input.name().replace(name() + "/", ""); } })); }
From source file:org.opendaylight.controller.sal.connect.netconf.schema.mapping.NetconfMessageTransformer.java
public NetconfMessageTransformer(final SchemaContext schemaContext) { this.counter = new MessageCounter(); this.schemaContext = schemaContext; parserFactory = DomToNormalizedNodeParserFactory.getInstance(XmlUtils.DEFAULT_XML_CODEC_PROVIDER, schemaContext);// w ww.j a v a 2 s . c om mappedRpcs = Maps.uniqueIndex(schemaContext.getOperations(), QNAME_FUNCTION); mappedNotifications = Multimaps.index(schemaContext.getNotifications(), QNAME_NOREV_FUNCTION); }
From source file:com.siemens.sw360.portal.common.PortletUtils.java
public static void updateAttachmentsFromRequest(PortletRequest request, Set<Attachment> attachments) { if (attachments == null || attachments.size() == 0) return;/*from ww w.java 2 s . c o m*/ String[] ids = request.getParameterValues( Release._Fields.ATTACHMENTS.toString() + Attachment._Fields.ATTACHMENT_CONTENT_ID.toString()); String[] comments = request .getParameterValues(Release._Fields.ATTACHMENTS.toString() + Attachment._Fields.COMMENT.toString()); String[] atypes = request.getParameterValues( Release._Fields.ATTACHMENTS.toString() + Attachment._Fields.ATTACHMENT_TYPE.toString()); if (CommonUtils.oneIsNull(atypes, comments, ids)) { log.error("We have a problem null arrays"); } else if (atypes.length != comments.length || atypes.length != ids.length) log.error("We have a problem length != other.length "); else { Map<String, Attachment> attachmentMap = Maps.uniqueIndex(attachments, new Function<Attachment, String>() { @Override public String apply(Attachment attachment) { return attachment.getAttachmentContentId(); } }); int length = atypes.length; for (int i = 0; i < length; ++i) { String id = ids[i]; if (attachmentMap.containsKey(id)) { Attachment attachment = attachmentMap.get(id); attachment.setComment(comments[i]); attachment.setAttachmentType(getAttachmentTypefromString(atypes[i])); } else { log.error("Unable to find attachment!" + id); } } } }
From source file:edu.harvard.med.screensaver.service.cherrypicks.CherryPickRequestAllocator.java
/** * @return the set of <i>unfulfillable</i> cherry picks * @throws DataModelViolationException if the source wells for the labCherryPicks contain duplicates *//*from w w w. j a va2 s .c om*/ @Transactional public Set<LabCherryPick> allocate(Collection<LabCherryPick> labCherryPicks) { Set<LabCherryPick> unfulfillableLabCherryPicks = new HashSet<LabCherryPick>(); if (labCherryPicks.size() == 0) { return unfulfillableLabCherryPicks; } try { final ImmutableMap<Well, LabCherryPick> well2lcp = Maps.uniqueIndex(labCherryPicks, new Function<LabCherryPick, Well>() { public Well apply(LabCherryPick lcp) { return lcp.getSourceWell(); } }); CherryPickRequest cherryPickRequest = labCherryPicks.iterator().next().getCherryPickRequest(); Map<Well, Set<Copy>> copyCandidatesForWells = findCopyCandidatesForWells(well2lcp.keySet(), cherryPickRequest.getTransferVolumePerWellApproved()); // remove unfulfillable wells now, as they would force the minimum copy set to always be empty Set<Well> unfulfillableWells = removeUnfulfillableWells(copyCandidatesForWells); assert Sets.intersection(unfulfillableWells, copyCandidatesForWells.keySet()).isEmpty(); Set<Copy> minimumCopySetForWells = findMinimumCopySetForWells(copyCandidatesForWells); if (log.isDebugEnabled()) { log.debug("using minimum copy set: " + minimumCopySetForWells); } for (LabCherryPick labCherryPick : labCherryPicks) { if (!unfulfillableWells.contains(labCherryPick.getSourceWell())) { Set<Copy> copyCandidatesForWell = copyCandidatesForWells.get(labCherryPick.getSourceWell()); Set<Copy> copyCandidatesForWellAndPlate = Sets.intersection(minimumCopySetForWells, copyCandidatesForWell); if (log.isDebugEnabled()) { log.debug("copy candidates for well " + copyCandidatesForWell); log.debug("copy candidates for well and plate " + copyCandidatesForWellAndPlate); } assert !copyCandidatesForWellAndPlate .isEmpty() : "algorithm for determining minimum set of copies is incorrect"; Copy selectedCopy = Collections.min(copyCandidatesForWellAndPlate); labCherryPick.setAllocated(selectedCopy); if (log.isDebugEnabled()) { log.debug("volume for " + labCherryPick + " allocated from " + selectedCopy); } } } Iterable<LabCherryPick> unfulfillableLCPsIter = Iterables.transform(unfulfillableWells, new Function<Well, LabCherryPick>() { public LabCherryPick apply(Well well) { return well2lcp.get(well); } }); HashSet<LabCherryPick> unfulfillableLCPs = Sets.newHashSet(unfulfillableLCPsIter); if (log.isDebugEnabled()) { log.debug("unfulfillable lab cherry picks: " + unfulfillableLCPs); } return unfulfillableLCPs; } catch (IllegalArgumentException e) { // We do not allow requests for allocation of // multiple lab cherry picks that have the same source well. This is critical, // since multiple allocations of the same source well could result in // overdrawing reagent from the source well. This is due to the fact that // remaining well volume checking is based upon the remaining well volumes as // recorded in the database, and the implementation, above, does not currently handle // the case where two or more reservations are being made from the same source // well (though, it could be made to do so). throw new BusinessRuleViolationException( "cannot allocate lab cherry picks if source wells are not unique"); } }
From source file:org.gradle.api.internal.tasks.properties.DefaultTypeMetadataStore.java
public DefaultTypeMetadataStore(Iterable<? extends PropertyAnnotationHandler> customAnnotationHandlers, CrossBuildInMemoryCacheFactory cacheFactory) { Iterable<PropertyAnnotationHandler> allAnnotationHandlers = Iterables.concat(HANDLERS, customAnnotationHandlers);//from w w w .j a va 2 s. c om this.annotationHandlers = Maps.uniqueIndex(allAnnotationHandlers, new Function<PropertyAnnotationHandler, Class<? extends Annotation>>() { @Override public Class<? extends Annotation> apply(PropertyAnnotationHandler handler) { return handler.getAnnotationType(); } }); Multimap<Class<? extends Annotation>, Class<? extends Annotation>> annotationOverrides = collectAnnotationOverrides( allAnnotationHandlers); Set<Class<? extends Annotation>> relevantAnnotationTypes = collectRelevantAnnotationTypes( ((Map<Class<? extends Annotation>, PropertyAnnotationHandler>) Maps.uniqueIndex( allAnnotationHandlers, new Function<PropertyAnnotationHandler, Class<? extends Annotation>>() { @Override public Class<? extends Annotation> apply(PropertyAnnotationHandler handler) { return handler.getAnnotationType(); } })).keySet()); this.propertyExtractor = new PropertyExtractor(annotationHandlers.keySet(), relevantAnnotationTypes, annotationOverrides, IGNORED_SUPER_CLASSES, IGNORED_METHODS); this.cache = cacheFactory.newClassCache(); }
From source file:com.b2international.snowowl.snomed.datastore.request.EvaluateQueryRefSetMemberRequest.java
@Override public QueryRefSetMemberEvaluation execute(BranchContext context) { // TODO support pre-population??? final String query; final String targetReferenceSet; if (context instanceof TransactionContext) { SnomedQueryRefSetMember member = ((TransactionContext) context).lookup(memberId, SnomedQueryRefSetMember.class); query = member.getQuery();/* ww w .j a v a 2s .c o m*/ targetReferenceSet = member.getReferencedComponentId(); } else { final SnomedReferenceSetMember member = SnomedRequests.prepareGetMember(memberId).build() .execute(context); query = (String) member.getProperties().get(SnomedRf2Headers.FIELD_QUERY); targetReferenceSet = member.getReferencedComponent().getId(); } // GET matching members of a query final SnomedConcepts matchingConcepts = SnomedRequests.prepareSearchConcept().filterByEcl(query).all() .build().execute(context); final Map<String, SnomedConcept> conceptsToAdd = newHashMap(); final Collection<SnomedReferenceSetMember> membersToRemove = newHashSet(); final Map<String, String> conceptsToActivate = Maps.newHashMap(); // add all matching first for (SnomedConcept matchedConcept : matchingConcepts.getItems()) { if (matchedConcept.isActive()) { conceptsToAdd.put(matchedConcept.getId(), matchedConcept); } } // then re-evaluate all current members of the target simple type reference set final Collection<SnomedReferenceSetMember> curretMembersOfTarget = SnomedRequests.prepareSearchMember() .all().filterByRefSet(targetReferenceSet).build().execute(context).getItems(); for (SnomedReferenceSetMember currentMember : curretMembersOfTarget) { final String referencedComponentId = currentMember.getReferencedComponent().getId(); if (conceptsToAdd.containsKey(referencedComponentId)) { if (!currentMember.isActive()) { // TODO fix reactivation label??? conceptsToActivate.put(referencedComponentId, referencedComponentId); } else { conceptsToAdd.remove(referencedComponentId); } } else { membersToRemove.add(currentMember); } } final Collection<MemberChange> changes = newArrayList(); // fetch all referenced components final Set<String> referencedConceptIds = newHashSet(); referencedConceptIds.addAll(conceptsToAdd.keySet()); referencedConceptIds.addAll(FluentIterable.from(membersToRemove) .transform(new Function<SnomedReferenceSetMember, SnomedCoreComponent>() { @Override public SnomedCoreComponent apply(SnomedReferenceSetMember input) { return input.getReferencedComponent(); } }).transform(IComponent.ID_FUNCTION).toSet()); final Map<String, SnomedConcept> concepts; if (expand().containsKey("referencedComponent")) { final Options expandOptions = expand().getOptions("referencedComponent"); concepts = Maps.uniqueIndex(SnomedRequests.prepareSearchConcept().filterByIds(referencedConceptIds) .setLimit(referencedConceptIds.size()).setExpand(expandOptions.getOptions("expand")) .setLocales(locales()).build().execute(context), IComponent.ID_FUNCTION); } else { // initialize with empty SnomedConcept resources concepts = newHashMap(); for (String referencedConceptId : referencedConceptIds) { concepts.put(referencedConceptId, new SnomedConcept(referencedConceptId)); } } for (String id : conceptsToAdd.keySet()) { changes.add(MemberChangeImpl.added(concepts.get(id))); } for (SnomedReferenceSetMember memberToRemove : membersToRemove) { changes.add(MemberChangeImpl.removed(concepts.get(memberToRemove.getReferencedComponent().getId()), memberToRemove.getId())); } // TODO reactivation??? // for (String id : conceptsToActivate.keySet()) { // changes.add(new Diff(MemberChangeKind.ACTIVATE, id, conceptsToActivate.get(id))); // } return new QueryRefSetMemberEvaluationImpl(memberId, targetReferenceSet, changes); }
From source file:org.jclouds.compute.suppliers.ImageCacheSupplier.java
/** * Resets the cache to the given set of images. * <p>//from w ww .j a v a 2 s . com * This method is called when the memoized image supplier is reloaded, or * when the cache needs to be refreshed (for example when the TempalteBuilder * is invoked forcing a fresh image lookup. */ public void reset(Set<? extends Image> images) { imageCache.invalidateAll(); imageCache.putAll(Maps.uniqueIndex(images, new Function<Image, String>() { @Override public String apply(Image input) { return input.getId(); } })); }
From source file:org.apache.aurora.scheduler.preemptor.PendingTaskProcessor.java
@Timed("pending_task_processor_run") @Override//from www . j a v a 2 s .c o m public void run() { metrics.recordTaskProcessorRun(); storage.read(store -> { Multimap<String, PreemptionVictim> slavesToActiveTasks = clusterState.getSlavesToActiveTasks(); if (slavesToActiveTasks.isEmpty()) { // No preemption victims to consider. return null; } // Group the offers by slave id so they can be paired with active tasks from the same slave. Map<String, HostOffer> slavesToOffers = Maps.uniqueIndex(offerManager.getOffers(), OFFER_TO_SLAVE_ID); Set<String> allSlaves = Sets .newHashSet(Iterables.concat(slavesToOffers.keySet(), slavesToActiveTasks.keySet())); // The algorithm below attempts to find a reservation for every task group by matching // it against all available slaves until a preemption slot is found. Groups are evaluated // in a round-robin fashion to ensure fairness (e.g.: G1, G2, G3, G1, G2). // A slave is removed from further matching once a reservation is made. Similarly, all // identical task group instances are removed from further iteration if none of the // available slaves could yield a preemption proposal. A consuming iterator is used for // task groups to ensure iteration order is preserved after a task group is removed. LoadingCache<IJobKey, AttributeAggregate> jobStates = attributeCache(store); List<TaskGroupKey> pendingGroups = fetchIdlePendingGroups(store); Iterator<TaskGroupKey> groups = Iterators.consumingIterator(pendingGroups.iterator()); TaskGroupKey lastGroup = null; Iterator<String> slaveIterator = allSlaves.iterator(); while (!pendingGroups.isEmpty()) { boolean matched = false; TaskGroupKey group = groups.next(); ITaskConfig task = group.getTask(); metrics.recordPreemptionAttemptFor(task); // start over only if a different task group is being processed if (!group.equals(lastGroup)) { slaveIterator = allSlaves.iterator(); } while (slaveIterator.hasNext()) { String slaveId = slaveIterator.next(); Optional<ImmutableSet<PreemptionVictim>> candidates = preemptionVictimFilter .filterPreemptionVictims(task, slavesToActiveTasks.get(slaveId), jobStates.getUnchecked(task.getJob()), Optional.fromNullable(slavesToOffers.get(slaveId)), store); metrics.recordSlotSearchResult(candidates, task); if (candidates.isPresent()) { // Slot found -> remove slave to avoid multiple task reservations. slaveIterator.remove(); slotCache.put(new PreemptionProposal(candidates.get(), slaveId), group); matched = true; break; } } if (!matched) { // No slot found for the group -> remove group and reset group iterator. pendingGroups.removeAll(ImmutableSet.of(group)); groups = Iterators.consumingIterator(pendingGroups.iterator()); metrics.recordUnmatchedTask(); } lastGroup = group; } return null; }); }
From source file:com.facebook.presto.raptor.storage.organization.ShardOrganizationManager.java
@VisibleForTesting Set<Long> discoverAndInitializeTablesToOrganize() { Set<Long> enabledTableIds = metadataDao.getOrganizationEligibleTables(); Set<TableOrganizationInfo> tableOrganizationInfo = organizerDao .getNodeTableOrganizationInfo(currentNodeIdentifier); Map<Long, TableOrganizationInfo> organizationInfos = Maps.uniqueIndex(tableOrganizationInfo, TableOrganizationInfo::getTableId); // If this is the first time organizing a table, initialize the organization info for it difference(enabledTableIds, organizationInfos.keySet()) .forEach(tableId -> organizerDao.insertNode(currentNodeIdentifier, tableId)); ImmutableSet.Builder<Long> tableIds = ImmutableSet.builder(); for (Long tableId : enabledTableIds) { TableOrganizationInfo info = organizationInfos.get(tableId); if (info == null || shouldRunOrganization(info)) { tableIds.add(tableId);/* ww w .ja va2 s.c om*/ } } return tableIds.build(); }
From source file:com.nesscomputing.cache.PrefixedCache.java
@Nonnull public Map<K, V> get(P prefix, Collection<? extends K> keys) { final Function<K, String> prefixFunction = new PrefixFunction<P, K>(prefix, keySerializer); final Map<String, ? extends K> keyStrings = Maps.uniqueIndex(keys, prefixFunction); final Map<String, byte[]> res = nessCache.get(namespace, keyStrings.keySet()); final ImmutableMap.Builder<K, V> builder = ImmutableMap.builder(); for (final Map.Entry<String, byte[]> entry : res.entrySet()) { final K key = keyStrings.get(entry.getKey()); try {//from w w w. ja va 2 s. com final byte[] result = entry.getValue(); if (result != null) { builder.put(key, valueDeserializer.apply(result)); } } catch (Exception e) { clear(prefix, key); throw Throwables.propagate(e); } } return builder.build(); }