List of usage examples for com.google.common.collect Multimap get
Collection<V> get(@Nullable K key);
From source file:org.obiba.onyx.quartz.core.engine.questionnaire.util.QuestionnaireSharedCategory.java
/** * Return true is category associated to the given questionCategory is shared, false otherwise. we use this method if * question associated to questionCategory is not yet linked to the questionnaire. * (QuestionnaireFinder.findSharedCategories do not contains yet the category) * // w ww .ja va 2 s. c o m * @param question * @param questionCategory * @param category * @return */ public static boolean isSharedIfLink(final QuestionCategory questionCategory, Questionnaire questionnaire) { QuestionnaireFinder questionnaireFinder = QuestionnaireFinder.getInstance(questionnaire); questionnaire.setQuestionnaireCache(null); Multimap<Category, Question> categoriesFilterName = questionnaireFinder .findCategories(questionCategory.getCategory().getName()); Collection<Category> categories = Collections2.filter(categoriesFilterName.keySet(), new Predicate<Category>() { @Override public boolean apply(Category input) { return input == questionCategory.getCategory(); } }); if (categoriesFilterName.isEmpty() || categories.isEmpty()) { return false; } Collection<Question> questions = categoriesFilterName.get(categories.iterator().next()); Collection<Question> otherQuestions = Collections2.filter(questions, new Predicate<Question>() { @Override public boolean apply(Question input) { return input != questionCategory.getQuestion(); } }); return !otherQuestions.isEmpty(); }
From source file:org.apache.cassandra.service.PendingRangeCalculatorService.java
/** * Calculate pending ranges according to bootsrapping and leaving nodes. Reasoning is: * * (1) When in doubt, it is better to write too much to a node than too little. That is, if * there are multiple nodes moving, calculate the biggest ranges a node could have. Cleaning * up unneeded data afterwards is better than missing writes during movement. * (2) When a node leaves, ranges for other nodes can only grow (a node might get additional * ranges, but it will not lose any of its current ranges as a result of a leave). Therefore * we will first remove _all_ leaving tokens for the sake of calculation and then check what * ranges would go where if all nodes are to leave. This way we get the biggest possible * ranges with regard current leave operations, covering all subsets of possible final range * values./*from ww w . j ava 2 s .c o m*/ * (3) When a node bootstraps, ranges of other nodes can only get smaller. Without doing * complex calculations to see if multiple bootstraps overlap, we simply base calculations * on the same token ring used before (reflecting situation after all leave operations have * completed). Bootstrapping nodes will be added and removed one by one to that metadata and * checked what their ranges would be. This will give us the biggest possible ranges the * node could have. It might be that other bootstraps make our actual final ranges smaller, * but it does not matter as we can clean up the data afterwards. * * NOTE: This is heavy and ineffective operation. This will be done only once when a node * changes state in the cluster, so it should be manageable. */ // public & static for testing purposes public static void calculatePendingRanges(AbstractReplicationStrategy strategy, String keyspaceName) { TokenMetadata tm = StorageService.instance.getTokenMetadata(); Multimap<Range<Token>, InetAddress> pendingRanges = HashMultimap.create(); BiMultiValMap<Token, InetAddress> bootstrapTokens = tm.getBootstrapTokens(); Set<InetAddress> leavingEndpoints = tm.getLeavingEndpoints(); if (bootstrapTokens.isEmpty() && leavingEndpoints.isEmpty() && tm.getMovingEndpoints().isEmpty()) { if (logger.isDebugEnabled()) logger.debug( "No bootstrapping, leaving or moving nodes, and no relocating tokens -> empty pending ranges for {}", keyspaceName); tm.setPendingRanges(keyspaceName, pendingRanges); return; } Multimap<InetAddress, Range<Token>> addressRanges = strategy.getAddressRanges(); // Copy of metadata reflecting the situation after all leave operations are finished. TokenMetadata allLeftMetadata = tm.cloneAfterAllLeft(); // get all ranges that will be affected by leaving nodes Set<Range<Token>> affectedRanges = new HashSet<Range<Token>>(); for (InetAddress endpoint : leavingEndpoints) affectedRanges.addAll(addressRanges.get(endpoint)); // for each of those ranges, find what new nodes will be responsible for the range when // all leaving nodes are gone. TokenMetadata metadata = tm.cloneOnlyTokenMap(); // don't do this in the loop! #7758 for (Range<Token> range : affectedRanges) { Set<InetAddress> currentEndpoints = ImmutableSet .copyOf(strategy.calculateNaturalEndpoints(range.right, metadata)); Set<InetAddress> newEndpoints = ImmutableSet .copyOf(strategy.calculateNaturalEndpoints(range.right, allLeftMetadata)); pendingRanges.putAll(range, Sets.difference(newEndpoints, currentEndpoints)); } // At this stage pendingRanges has been updated according to leave operations. We can // now continue the calculation by checking bootstrapping nodes. // For each of the bootstrapping nodes, simply add and remove them one by one to // allLeftMetadata and check in between what their ranges would be. Multimap<InetAddress, Token> bootstrapAddresses = bootstrapTokens.inverse(); for (InetAddress endpoint : bootstrapAddresses.keySet()) { Collection<Token> tokens = bootstrapAddresses.get(endpoint); allLeftMetadata.updateNormalTokens(tokens, endpoint); for (Range<Token> range : strategy.getAddressRanges(allLeftMetadata).get(endpoint)) pendingRanges.put(range, endpoint); allLeftMetadata.removeEndpoint(endpoint); } // At this stage pendingRanges has been updated according to leaving and bootstrapping nodes. // We can now finish the calculation by checking moving and relocating nodes. // For each of the moving nodes, we do the same thing we did for bootstrapping: // simply add and remove them one by one to allLeftMetadata and check in between what their ranges would be. for (Pair<Token, InetAddress> moving : tm.getMovingEndpoints()) { InetAddress endpoint = moving.right; // address of the moving node // moving.left is a new token of the endpoint allLeftMetadata.updateNormalToken(moving.left, endpoint); for (Range<Token> range : strategy.getAddressRanges(allLeftMetadata).get(endpoint)) { pendingRanges.put(range, endpoint); } allLeftMetadata.removeEndpoint(endpoint); } tm.setPendingRanges(keyspaceName, pendingRanges); if (logger.isDebugEnabled()) logger.debug("Pending ranges:\n" + (pendingRanges.isEmpty() ? "<empty>" : tm.printPendingRanges())); }
From source file:com.ardor3d.util.scenegraph.DisplayListDelegate.java
private static void handleDisplayListDelete(final Renderer deleter, final Multimap<Object, Integer> idMap) { Object currentGLRef = null;//from www . jav a 2 s . c o m // Grab the current context, if any. if (deleter != null && ContextManager.getCurrentContext() != null) { currentGLRef = ContextManager.getCurrentContext().getGlContextRep(); } // For each affected context... for (final Object glref : idMap.keySet()) { // If we have a deleter and the context is current, immediately delete if (deleter != null && glref.equals(currentGLRef)) { deleter.deleteDisplayLists(idMap.get(glref)); } // Otherwise, add a delete request to that context's render task queue. else { GameTaskQueueManager.getManager(ContextManager.getContextForRef(glref)) .render(new RendererCallable<Void>() { public Void call() throws Exception { getRenderer().deleteDisplayLists(idMap.get(glref)); return null; } }); } } }
From source file:com.google.javascript.jscomp.lint.CheckRequiresSorted.java
/** * Canonicalizes a list of import statements by deduplicating and merging imports for the same * namespace, and sorting the result.//from ww w . j av a 2s . co m */ private static List<ImportStatement> canonicalizeImports(Multimap<String, ImportStatement> importsByNamespace) { List<ImportStatement> canonicalImports = new ArrayList<>(); for (String namespace : importsByNamespace.keySet()) { Collection<ImportStatement> allImports = importsByNamespace.get(namespace); // Find the strongest primitive across all existing imports. Every emitted import for this // namespace will use this primitive. This makes the logic simpler and cannot change runtime // behavior, but may produce spurious changes when multiple aliasing imports of differing // strength exist (which are already in violation of the style guide). ImportPrimitive strongestPrimitive = allImports.stream().map(ImportStatement::primitive) .reduce(ImportPrimitive.WEAKEST, ImportPrimitive::stronger); // Emit each aliasing import separately, as deduplicating them would require code references // to be rewritten. boolean hasAliasing = false; for (ImportStatement stmt : Iterables.filter(allImports, ImportStatement::isAliasing)) { canonicalImports.add(stmt.upgrade(strongestPrimitive)); hasAliasing = true; } // Emit a single destructuring import with a non-empty pattern, merged from the existing // destructuring imports. boolean hasDestructuring = false; ImmutableList<Node> destructuringNodes = allImports.stream().filter(ImportStatement::isDestructuring) .flatMap(i -> i.nodes().stream()).collect(toImmutableList()); ImmutableList<DestructuringBinding> destructures = allImports.stream() .filter(ImportStatement::isDestructuring).flatMap(i -> i.destructures().stream()).distinct() .sorted().collect(toImmutableList()); if (!destructures.isEmpty()) { canonicalImports.add(ImportStatement.of(destructuringNodes, strongestPrimitive, namespace, /* alias= */ null, destructures)); hasDestructuring = true; } // Emit a standalone import unless an aliasing or destructuring one already exists. if (!hasAliasing && !hasDestructuring) { ImmutableList<Node> standaloneNodes = allImports.stream().filter(ImportStatement::isStandalone) .flatMap(i -> i.nodes().stream()).collect(toImmutableList()); canonicalImports.add(ImportStatement.of(standaloneNodes, strongestPrimitive, namespace, /* alias= */ null, /* destructures= */ null)); } } // Sorting by natural order yields the correct result due to the implementation of // ImportStatement#compareTo. Collections.sort(canonicalImports); return canonicalImports; }
From source file:com.android.tools.idea.templates.GradleFilePsiMerger.java
/** * Looks for statements adding dependencies to different configurations (which look like 'configurationName "dependencyCoordinate"') * and tries to parse them into Gradle coordinates. If successful, adds the new coordinate to the map and removes the corresponding * PsiElement from the tree.// ww w . j a v a 2 s . c om * * @return true if new items were added to the map */ private static boolean pullDependenciesIntoMap(@NotNull PsiElement root, @NotNull Map<String, Multimap<String, GradleCoordinate>> allConfigurations, @Nullable List<String> unparsedDependencies) { boolean wasMapUpdated = false; for (PsiElement existingElem : root.getChildren()) { if (existingElem instanceof GrCall) { PsiElement reference = existingElem.getFirstChild(); if (reference instanceof GrReferenceExpression) { final String configurationName = reference.getText(); boolean parsed = false; GrCall call = (GrCall) existingElem; GrArgumentList arguments = call.getArgumentList(); // Don't try merging dependencies if one of them has a closure block attached. if (arguments != null && call.getClosureArguments().length == 0) { GrExpression[] expressionArguments = arguments.getExpressionArguments(); if (expressionArguments.length == 1 && expressionArguments[0] instanceof GrLiteral) { Object value = ((GrLiteral) expressionArguments[0]).getValue(); if (value instanceof String) { String coordinateText = (String) value; GradleCoordinate coordinate = GradleCoordinate .parseCoordinateString(coordinateText); if (coordinate != null) { parsed = true; Multimap<String, GradleCoordinate> map = allConfigurations .get(configurationName); if (map == null) { map = LinkedListMultimap.create(); allConfigurations.put(configurationName, map); } if (!map.get(coordinate.getId()).contains(coordinate)) { map.put(coordinate.getId(), coordinate); existingElem.delete(); wasMapUpdated = true; } } } } if (!parsed && unparsedDependencies != null) { unparsedDependencies.add(existingElem.getText()); } } } } } return wasMapUpdated; }
From source file:com.torodb.torod.db.backends.meta.routines.DeleteDocuments.java
public static int execute(Configuration configuration, CollectionSchema colSchema, Multimap<DocStructure, Integer> didsByStructure, @Nonnull DatabaseInterface databaseInterface) throws SQLException, RetryTransactionException { TableProvider tableProvider = new TableProvider(colSchema); DSLContext dsl = DSL.using(configuration); Set<SubDocTable> tables = Sets.newHashSet(); for (DocStructure structure : didsByStructure.keySet()) { tables.clear();/* w w w.jav a 2s.c o m*/ structure.accept(tableProvider, tables); executeDeleteSubDocuments(dsl, tables, didsByStructure.get(structure), databaseInterface); } Set<Integer> dids = Sets.newHashSet(didsByStructure.values()); return executeDeleteRoots(dsl, colSchema, dids, databaseInterface); }
From source file:jflowmap.views.flowmap.VisualNodeCluster.java
public static List<List<VisualNode>> combineClusters(List<List<VisualNode>> clusters1, List<List<VisualNode>> clusters2) { Map<VisualNode, Integer> map1 = createNodeToClusterIndexMap(clusters1); Map<VisualNode, Integer> map2 = createNodeToClusterIndexMap(clusters2); Multimap<Pair<Integer, Integer>, VisualNode> newClusters = LinkedHashMultimap.create(); for (List<VisualNode> cluster : clusters1) { for (VisualNode node : cluster) { newClusters.put(Pair.of(map1.get(node), map2.get(node)), node); }/*from w w w. j av a2s. com*/ } List<List<VisualNode>> newClustersList = Lists.newArrayList(); for (Pair<Integer, Integer> key : newClusters.asMap().keySet()) { newClustersList.add(ImmutableList.copyOf(newClusters.get(key))); } return newClustersList; }
From source file:com.zimbra.cs.db.DbBlobConsistency.java
public static int getNumRows(DbConnection conn, Mailbox mbox, String tableName, String idColName, Multimap<Integer, Integer> idRevs) throws ServiceException { Set<Integer> mail_itemIds = new HashSet<Integer>(); Multimap<Integer, Integer> rev_itemIds = HashMultimap.create(); for (Integer itemId : idRevs.keySet()) { Collection<Integer> revs = idRevs.get(itemId); for (int rev : revs) { if (rev == 0) { mail_itemIds.add(itemId); } else { rev_itemIds.put(itemId, rev); }// w w w . j a v a2 s .com } } PreparedStatement stmt = null; ResultSet rs = null; try { StringBuffer sql = new StringBuffer(); boolean revisionTable = tableName.startsWith(DbMailItem.TABLE_REVISION); sql.append("SELECT COUNT(*) FROM ").append(DbMailbox.qualifyTableName(mbox, tableName)) .append(" WHERE ").append(DbMailItem.IN_THIS_MAILBOX_AND); if (!revisionTable || mail_itemIds.size() > 0) { if (mail_itemIds.size() == 0) { sql.append(idColName).append(" in ('')"); } else { sql.append(DbUtil.whereIn(idColName, mail_itemIds.size())); } } if (revisionTable) { if (mail_itemIds.size() > 0 && rev_itemIds.size() > 0) { sql.append(" OR "); } if (rev_itemIds.size() > 0) { sql.append(DbUtil.whereIn(Db.getInstance().concat(idColName, "'-'", "version"), rev_itemIds.size())); } } stmt = conn.prepareStatement(sql.toString()); int pos = 1; pos = DbMailItem.setMailboxId(stmt, mbox, pos); for (int itemId : mail_itemIds) { stmt.setInt(pos++, itemId); } if (revisionTable) { for (Integer itemId : rev_itemIds.keySet()) { Collection<Integer> revs = rev_itemIds.get(itemId); for (int rev : revs) { stmt.setString(pos++, itemId + "-" + rev); } } } rs = stmt.executeQuery(); rs.next(); return rs.getInt(1); } catch (SQLException e) { throw ServiceException.FAILURE("getting number of rows for matching id's in " + tableName, e); } finally { DbPool.closeResults(rs); DbPool.quietCloseStatement(stmt); } }
From source file:ai.grakn.graql.internal.query.QueryOperationExecutor.java
/** * <a href=https://en.wikipedia.org/wiki/Composition_of_relations>Compose</a> two {@link Multimap}s together, * treating them like many-to-many relations. *///from w w w . j a v a 2 s. c o m private static <K, T, V> Multimap<K, V> composeMultimaps(Multimap<K, T> map1, Multimap<T, V> map2) { Multimap<K, V> composed = HashMultimap.create(); for (Map.Entry<K, T> entry1 : map1.entries()) { K key = entry1.getKey(); T intermediateValue = entry1.getValue(); for (V value : map2.get(intermediateValue)) { composed.put(key, value); } } return composed; }
From source file:com.zimbra.cs.db.DbBlobConsistency.java
public static void delete(DbConnection conn, Mailbox mbox, Multimap<Integer, Integer> idRevs) throws ServiceException { Set<Integer> mail_itemIds = new HashSet<Integer>(); Multimap<Integer, Integer> rev_itemIds = HashMultimap.create(); for (Integer itemId : idRevs.keySet()) { Collection<Integer> revs = idRevs.get(itemId); for (int rev : revs) { if (rev == 0) { mail_itemIds.add(itemId); } else { rev_itemIds.put(itemId, rev); }//from w w w . j av a 2 s. c om } } if (mail_itemIds.size() > 0) { PreparedStatement miDumpstmt = null; try { StringBuffer sql = new StringBuffer(); sql.append("DELETE FROM ").append(DbMailItem.getMailItemTableName(mbox, true)).append(" WHERE ") .append(DbMailItem.IN_THIS_MAILBOX_AND).append(DbUtil.whereIn("id", mail_itemIds.size())); miDumpstmt = conn.prepareStatement(sql.toString()); int pos = 1; pos = DbMailItem.setMailboxId(miDumpstmt, mbox, pos); for (int itemId : mail_itemIds) { miDumpstmt.setInt(pos++, itemId); } miDumpstmt.execute(); } catch (SQLException e) { throw ServiceException.FAILURE( "deleting " + idRevs.size() + " item(s): " + DbMailItem.getIdListForLogging(idRevs.keys()) + " from " + DbMailItem.TABLE_MAIL_ITEM_DUMPSTER + " table", e); } finally { DbPool.quietCloseStatement(miDumpstmt); } } if (rev_itemIds.size() > 0) { PreparedStatement revDumpstmt = null; try { StringBuffer sql = new StringBuffer(); sql.append("DELETE FROM ").append(DbMailItem.getRevisionTableName(mbox, true)).append(" WHERE ") .append(DbMailItem.IN_THIS_MAILBOX_AND).append(DbUtil .whereIn(Db.getInstance().concat("item_id", "'-'", "version"), rev_itemIds.size())); revDumpstmt = conn.prepareStatement(sql.toString()); int pos = 1; pos = DbMailItem.setMailboxId(revDumpstmt, mbox, pos); for (Integer itemId : rev_itemIds.keySet()) { Collection<Integer> revs = rev_itemIds.get(itemId); for (int rev : revs) { revDumpstmt.setString(pos++, itemId + "-" + rev); } } revDumpstmt.execute(); } catch (SQLException e) { throw ServiceException.FAILURE( "deleting " + idRevs.size() + " item(s): " + DbMailItem.getIdListForLogging(idRevs.keys()) + " from " + DbMailItem.TABLE_REVISION_DUMPSTER + " table", e); } finally { DbPool.quietCloseStatement(revDumpstmt); } } }