List of usage examples for com.google.common.collect Multimap values
Collection<V> values();
From source file:io.redlink.sdk.impl.analysis.model.RDFStructureParser.java
private Collection<Enhancement> resolveRelations(Multimap<Enhancement, String> relations, RepositoryConnection conn) throws EnhancementParserException { Queue<String> toParse = new LinkedList<String>(); toParse.addAll(Sets.newHashSet(relations.values())); Map<String, Enhancement> allRelations = new HashMap<String, Enhancement>(); Collection<Enhancement> initialEnhancements = relations.keys(); while (!toParse.isEmpty()) { String nextRelation = toParse.poll(); Enhancement nextEnhancement = parseEnhancement(nextRelation, conn, toParse, relations); if (nextEnhancement != null) allRelations.put(nextRelation, nextEnhancement); }/*from w w w. j av a 2 s . com*/ for (Enhancement e : relations.keys()) { Collection<String> relationsUris = relations.get(e); Collection<Enhancement> nextRelEnhancements = Sets.newHashSet(); for (String uri : relationsUris) if (uri != null) nextRelEnhancements.add(allRelations.get(uri)); e.setRelations(nextRelEnhancements); } return initialEnhancements; }
From source file:org.eclipse.xtext.ui.refactoring.impl.AbstractReferenceUpdater.java
protected void createReferenceUpdatesForCluster(ElementRenameArguments elementRenameArguments, Multimap<URI, IReferenceDescription> resource2references, ResourceSet resourceSet, IRefactoringUpdateAcceptor updateAcceptor, StatusWrapper status, IProgressMonitor monitor) { SubMonitor progress = SubMonitor.convert(monitor, 100); List<URI> unloadableResources = loadReferringResources(resourceSet, resource2references.keySet(), status, progress.newChild(10));// w ww .ja va2 s .c o m if (progress.isCanceled()) { throw new OperationCanceledException(); } for (URI unloadableResouce : unloadableResources) resource2references.removeAll(unloadableResouce); List<IReferenceDescription> unresolvableReferences = resolveReferenceProxies(resourceSet, resource2references.values(), status, progress.newChild(70)); if (progress.isCanceled()) { throw new OperationCanceledException(); } for (IReferenceDescription unresolvableReference : unresolvableReferences) { URI unresolvableReferringResource = unresolvableReference.getSourceEObjectUri().trimFragment(); resource2references.remove(unresolvableReferringResource, unresolvableReference); } elementRenameArguments.getRenameStrategy().applyDeclarationChange(elementRenameArguments.getNewName(), resourceSet); if (progress.isCanceled()) { throw new OperationCanceledException(); } createReferenceUpdates(elementRenameArguments, resource2references, resourceSet, updateAcceptor, progress.newChild(20)); if (progress.isCanceled()) { throw new OperationCanceledException(); } elementRenameArguments.getRenameStrategy().revertDeclarationChange(resourceSet); }
From source file:com.ning.billing.osgi.bundles.analytics.dao.factory.BusinessInvoiceFactory.java
/** * Create current business invoices and invoice items. * * @param accountId current accountId refreshed * @param context call context// w ww . ja v a 2 s . c om * @return all business invoice and invoice items to create * @throws com.ning.billing.osgi.bundles.analytics.AnalyticsRefreshException * */ public Map<BusinessInvoiceModelDao, Collection<BusinessInvoiceItemBaseModelDao>> createBusinessInvoicesAndInvoiceItems( final UUID accountId, final CallContext context) throws AnalyticsRefreshException { final Account account = getAccount(accountId, context); final Long accountRecordId = getAccountRecordId(account.getId(), context); final Long tenantRecordId = getTenantRecordId(context); final ReportGroup reportGroup = getReportGroup(account.getId(), context); // Lookup the invoices for that account final Collection<Invoice> invoices = getInvoicesByAccountId(account.getId(), context); // All invoice items across all invoices for that account (we need to be able to reference items across multiple invoices) final Multimap<UUID, InvoiceItem> allInvoiceItems = ArrayListMultimap.<UUID, InvoiceItem>create(); // Convenient mapping invoiceId -> invoice final Map<UUID, Invoice> invoiceIdToInvoiceMappings = new LinkedHashMap<UUID, Invoice>(); for (final Invoice invoice : invoices) { invoiceIdToInvoiceMappings.put(invoice.getId(), invoice); allInvoiceItems.get(invoice.getId()).addAll(invoice.getInvoiceItems()); } // Create the business invoice items final Multimap<UUID, BusinessInvoiceItemBaseModelDao> businessInvoiceItemsForInvoiceId = ArrayListMultimap .<UUID, BusinessInvoiceItemBaseModelDao>create(); for (final InvoiceItem invoiceItem : allInvoiceItems.values()) { final Invoice invoice = invoiceIdToInvoiceMappings.get(invoiceItem.getInvoiceId()); final Collection<InvoiceItem> otherInvoiceItems = Collections2.filter(allInvoiceItems.values(), new Predicate<InvoiceItem>() { @Override public boolean apply(final InvoiceItem input) { return input.getId() != null && !input.getId().equals(invoiceItem.getId()); } }); final BusinessInvoiceItemBaseModelDao businessInvoiceItem = createBusinessInvoiceItem(account, invoice, invoiceItem, otherInvoiceItems, accountRecordId, tenantRecordId, reportGroup, context); if (businessInvoiceItem != null) { businessInvoiceItemsForInvoiceId.get(invoice.getId()).add(businessInvoiceItem); } } // Now, create the business invoices final Map<BusinessInvoiceModelDao, Collection<BusinessInvoiceItemBaseModelDao>> businessRecords = new HashMap<BusinessInvoiceModelDao, Collection<BusinessInvoiceItemBaseModelDao>>(); for (final Invoice invoice : invoices) { final Collection<BusinessInvoiceItemBaseModelDao> businessInvoiceItems = businessInvoiceItemsForInvoiceId .get(invoice.getId()); if (businessInvoiceItems == null) { continue; } final BusinessInvoiceModelDao businessInvoice = createBusinessInvoice(account, invoice, accountRecordId, tenantRecordId, reportGroup, context); businessRecords.put(businessInvoice, businessInvoiceItems); } return businessRecords; }
From source file:org.sosy_lab.cpachecker.cfa.blocks.BlockToDotWriter.java
/** This function returns a structure which contains hierarchical dependencies between blocks. * The returned Multimap contains the outer block (father) as key * and the inner blocks (children) as values for the key. * We assume, that for each pair of blocks the following conditions hold: * a block is either completely part of the other block or there is nothing common in both blocks. */ private Multimap<Block, Block> getHierarchy() { // sort blocks, largest blocks first List<Block> sortedBlocks = Lists.newArrayList(blockPartitioning.getBlocks()); Collections.sort(sortedBlocks, new Comparator<Block>() { @Override//from w ww .ja v a2s .c om public int compare(Block b1, Block b2) { return b2.getNodes().size() - b1.getNodes().size(); } }); // build hierarchy, worst case runtime O(n^2), iff mainBlock contains all other blocks 'directly'. final Multimap<Block, Block> hierarchy = HashMultimap.create(); while (!sortedBlocks.isEmpty()) { // get smallest block and then the smallest outer block, that contains it Block currentBlock = sortedBlocks.remove(sortedBlocks.size() - 1); // get smallest block, for (Block possibleOuterBlock : Lists.reverse(sortedBlocks)) { // order is important, smallest first // trick: we know, iff one node is contained in outer block, all nodes must be contained. So we check only one. if (possibleOuterBlock.getNodes().contains(currentBlock.getNodes().iterator().next())) { hierarchy.put(possibleOuterBlock, currentBlock); break; } } } assert hierarchy.values().size() <= blockPartitioning.getBlocks().size() - 1 : "all blocks except mainBlock might appear at most once as child."; // there might also be blocks, that are not part of the hierarchy, for example unused functions. return hierarchy; }
From source file:co.cask.cdap.etl.planner.ControlDag.java
private void flattenFrom(String node) { Set<String> outputs = outgoingConnections.get(node); if (outputs.isEmpty()) { return;//from w w w.j av a 2 s . c o m } if (outputs.size() == 1) { flattenFrom(outputs.iterator().next()); return; } Multimap<String, String> branchEndpointOutputs = HashMultimap.create(); // can't just use branchEndpointOutputs.keySet(), // because that won't track branch endpoints that had no output (sinks) Set<String> branchEndpoints = new HashSet<>(); for (String output : outputs) { String branchEndpoint = findBranchEnd(output); branchEndpoints.add(branchEndpoint); branchEndpointOutputs.putAll(branchEndpoint, outgoingConnections.get(branchEndpoint)); } // if all the branch endpoints connect to a single node, there is no need to add a join node Set<String> endpointOutputs = new HashSet<>(branchEndpointOutputs.values()); if (endpointOutputs.size() == 1) { flattenFrom(endpointOutputs.iterator().next()); return; } // add a connection from each branch endpoint to a newly added join node // then move all outgoing connections from each branch endpoint so that they are coming out of the new join node String newJoinNode = generateJoinNodeName(branchEndpoints); addNode(newJoinNode, branchEndpoints, endpointOutputs); // remove the outgoing connections from endpoints that aren't going to our new join node for (Map.Entry<String, String> endpointEntry : branchEndpointOutputs.entries()) { removeConnection(endpointEntry.getKey(), endpointEntry.getValue()); } /* have to trim again due to reshuffling of nodes. For example, if we have: |--> n3 |--> n2 --| | |--> n4 n1 --| | | v |--> n5 -----> n6 after we insert the new join node we'll have: |--> n2 --| |--> n3 | | | n1 --| |--> join --|--> n4 | | | | |--> n5 --| | v |--> n6 and we need to remove the connection from join -> n6, otherwise the algorithm will get messed up */ trim(); // then keep flattening from the new join node flattenFrom(newJoinNode); }
From source file:org.caltoopia.cli.CompilationSession.java
/** * Adds .cal source files to the resource set * /*from ww w. j a v a 2s .c o m*/ * @param paths paths to scan for .cal files, separated by platform-specific delimiter (: or ;) */ private void scanPathsCreateResources(List<String> paths, List<String> excludedFiles, XtextResourceSet resourceSet) { List<String> files = scanPathsForCalFiles(paths, excludedFiles); Multimap<String, URI> uris = HashMultimap.create(); for (int i = 0; i < files.size(); i++) { uris.put(files.get(i), URI.createFileURI(files.get(i))); } ContainersStateFactory containersStateFactory = new ContainersStateFactory(); IAllContainersState containersState = containersStateFactory.getContainersState(files, uris); resourceSet.eAdapters().add(new DelegatingIAllContainerAdapter(containersState)); Collection<URI> values = Sets.newHashSet(uris.values()); for (URI uri : values) { resourceSet.createResource(uri); } }
From source file:org.sosy_lab.cpachecker.cpa.octagon.refiner.OctagonArgBasedDelegatingRefiner.java
private boolean performOctagonAnalysisRefinement(final ARGReachedSet reached, final OctagonAnalysisFeasabilityChecker checker) throws InterruptedException { UnmodifiableReachedSet reachedSet = reached.asReachedSet(); Precision precision = reachedSet.getPrecision(reachedSet.getLastState()); VariableTrackingPrecision octPrecision = (VariableTrackingPrecision) Precisions.asIterable(precision) .filter(VariableTrackingPrecision.isMatchingCPAClass(OctagonCPA.class)).get(0); Multimap<CFANode, MemoryLocation> increment = checker.getPrecisionIncrement(); // no newly tracked variables, so the refinement was not successful, TODO why is this code commented out? if (increment.isEmpty()) { // return false; }/*from w ww. j ava 2 s .c o m*/ reached.removeSubtree(((ARGState) reachedSet.getFirstState()).getChildren().iterator().next(), octPrecision.withIncrement(increment), VariableTrackingPrecision.isMatchingCPAClass(OctagonCPA.class)); logger.log(Level.INFO, "Refinement successful, precision incremented, following variables are now tracked additionally:\n" + new TreeSet<>(increment.values())); return true; }
From source file:org.activityinfo.legacy.shared.impl.GetSitesHandler.java
private void joinCalculatedIndicatorValues(final Promise<Void> complete, SqlTransaction tx, final Multimap<Integer, SiteDTO> siteMap) { Log.trace("Starting joinIndicatorValues()"); final Set<Integer> activityIds = Sets.newHashSet(); for (SiteDTO siteDTO : siteMap.values()) { activityIds.add(siteDTO.getActivityId()); }/*from w w w .java 2s.com*/ SqlQuery query = SqlQuery.select().appendColumn("I.IndicatorId", "indicatorId") .appendColumn("I.Name", "indicatorName").appendColumn("I.ActivityId", "activityId") .appendColumn("I.Type", "type").appendColumn("I.Expression", "expression") .appendColumn("I.nameInExpression", "code") .appendColumn("I.calculatedAutomatically", "calculatedAutomatically").from(Tables.INDICATOR, "I") .where("I.ActivityId").in(activityIds).and("I.dateDeleted IS NULL").orderBy("I.SortOrder"); Log.info(query.toString()); query.execute(tx, new SqlResultCallback() { @Override public void onSuccess(SqlTransaction tx, final SqlResultSet results) { List<FormField> fields = Lists.newArrayList(); for (SqlResultSetRow row : results.getRows()) { fields.add(createField(row)); } FormSymbolTable symbolTable = new FormSymbolTable(fields); PartialEvaluator<SiteDTO> evaluator = new PartialEvaluator<>(symbolTable, new SiteFieldReaderFactory()); List<CalculatedIndicatorReader> readers = Lists.newArrayList(); for (FormField field : fields) { if (field.getType() instanceof CalculatedFieldType) { FieldReader<SiteDTO> reader = evaluator.partiallyEvaluate(field); if (reader.getType() instanceof QuantityType) { readers.add(new CalculatedIndicatorReader(field, reader)); } } } for (SiteDTO site : siteMap.values()) { for (CalculatedIndicatorReader reader : readers) { reader.read(site); } } complete.onSuccess(null); } }); }
From source file:org.sosy_lab.cpachecker.cpa.apron.refiner.ApronARGBasedDelegatingRefiner.java
private boolean performApronAnalysisRefinement(final ARGReachedSet reached, final OctagonAnalysisFeasabilityChecker checker) throws InterruptedException { UnmodifiableReachedSet reachedSet = reached.asReachedSet(); Precision precision = reachedSet.getPrecision(reachedSet.getLastState()); VariableTrackingPrecision apronPrecision = (VariableTrackingPrecision) Precisions.asIterable(precision) .filter(VariableTrackingPrecision.isMatchingCPAClass(ApronCPA.class)).get(0); Multimap<CFANode, MemoryLocation> increment = checker.getPrecisionIncrement(); // no newly tracked variables, so the refinement was not successful // TODO why is this commented out if (increment.isEmpty()) { // return false; }/*from w w w . j av a2 s .co m*/ reached.removeSubtree(((ARGState) reachedSet.getFirstState()).getChildren().iterator().next(), apronPrecision.withIncrement(increment), VariableTrackingPrecision.isMatchingCPAClass(ApronCPA.class)); logger.log(Level.INFO, "Refinement successful, precision incremented, following variables are now tracked additionally:\n" + new TreeSet<>(increment.values())); return true; }
From source file:net.shibboleth.idp.saml.attribute.mapping.AbstractSAMLAttributesMapper.java
/** * Constructor to create the mapping from an existing resolver. <br/> * This code inverts the {@link AttributeEncoder} (internal attribute -> SAML Attributes) into * {@link AttributeMapper} (SAML [RequestedAttributes] -> internal [Requested] Attributes). <br/> * to generate the {@link AbstractSAMLAttributeMapper} (with no * {@link AbstractSAMLAttributeMapper#getAttributeIds()}. These are accumulated into a {@link Multimap}, where the * key is the {@link AbstractSAMLAttributeMapper} and the values are the (IdP) attribute names. The collection of * {@link AttributeMapper}s can then be extracted from the map, and the appropriate internal names added (these * being the value of the {@link Multimap}) * // w w w .j ava 2s .c o m * @param resolver The resolver * @param id The it * @param mapperFactory A factory to generate new mappers of the correct type. */ public AbstractSAMLAttributesMapper(@Nonnull final AttributeResolver resolver, @Nonnull @NotEmpty final String id, @Nonnull final Supplier<AbstractSAMLAttributeMapper<InType, OutType>> mapperFactory) { setId(id); final Multimap<AbstractSAMLAttributeMapper<InType, OutType>, String> theMappers; theMappers = HashMultimap.create(); for (final AttributeDefinition attributeDef : resolver.getAttributeDefinitions().values()) { for (final AttributeEncoder encode : attributeDef.getAttributeEncoders()) { if (encode instanceof AttributeMapperProcessor) { // There is an appropriate reverse mapper final AttributeMapperProcessor factory = (AttributeMapperProcessor) encode; final AbstractSAMLAttributeMapper<InType, OutType> mapper = mapperFactory.get(); factory.populateAttributeMapper(mapper); theMappers.put(mapper, attributeDef.getId()); } } } mappers = new ArrayList<>(theMappers.values().size()); for (final Entry<AbstractSAMLAttributeMapper<InType, OutType>, Collection<String>> entry : theMappers .asMap().entrySet()) { final AbstractSAMLAttributeMapper<InType, OutType> mapper = entry.getKey(); mapper.setAttributeIds(new ArrayList<>(entry.getValue())); mappers.add(mapper); } }