List of usage examples for com.google.common.collect Multimap keySet
Set<K> keySet();
From source file:com.gradleware.tooling.toolingmodel.repository.internal.DefaultOmniBuildInvocationsContainerBuilder.java
private static ImmutableSortedMap<Path, OmniBuildInvocations> buildBuildInvocationsMapping( GradleProject project, Multimap<Path, OmniProjectTask> projectTasks, Multimap<Path, OmniTaskSelector> taskSelectors) { Preconditions.checkState(taskSelectors.keySet().containsAll(projectTasks.keySet()), "Task selectors are always configured for all projects"); // create mappings for all projects which contain tasks selectors (which covers at least those projects that contain project tasks) ImmutableSortedMap.Builder<Path, OmniBuildInvocations> mapping = ImmutableSortedMap .orderedBy(Path.Comparator.INSTANCE); for (Path projectPath : taskSelectors.keySet()) { ImmutableList<OmniProjectTask> projectTasksOfProject = ImmutableSortedSet .orderedBy(TaskComparator.INSTANCE).addAll(projectTasks.get(projectPath)).build().asList(); ImmutableList<OmniTaskSelector> taskSelectorsOfProject = ImmutableSortedSet .orderedBy(TaskSelectorComparator.INSTANCE).addAll(taskSelectors.get(projectPath)).build() .asList();/* w w w . jav a 2s . com*/ mapping.put(projectPath, DefaultOmniBuildInvocations.from(projectTasksOfProject, taskSelectorsOfProject)); } // create additional mappings for all those projects which do not contain any task selectors // this is the case if a project does not contain any tasks nor does any of its child projects // these additional mappings ensure the caller never gets back null for any project in the hierarchy Set<Path> projectPaths = Sets.newLinkedHashSet(); collectProjectPathsRecursively(project, projectPaths); projectPaths.removeAll(taskSelectors.keySet()); for (Path projectPath : projectPaths) { mapping.put(projectPath, DefaultOmniBuildInvocations.from(ImmutableList.<OmniProjectTask>of(), ImmutableList.<OmniTaskSelector>of())); } return mapping.build(); }
From source file:com.eucalyptus.vm.NetworkGroupsMetadata.java
private static String rulesToString(Multimap<String, String> rules) { StringBuilder buf = new StringBuilder(); for (String networkName : rules.keySet()) { for (String rule : rules.get(networkName)) { buf.append("RULE ").append(networkName).append(" ").append(rule).append("\n"); }//from www . j a va 2 s . c om } return buf.toString(); }
From source file:com.eucalyptus.vm.NetworkGroupsMetadata.java
private static String groupsToString(Multimap<String, String> networks) { StringBuilder buf = new StringBuilder(); for (String networkName : networks.keySet()) { buf.append("GROUP ").append(networkName); for (String ip : networks.get(networkName)) { buf.append(" ").append(ip); }// w w w .j a v a 2 s . co m buf.append("\n"); } return buf.toString(); }
From source file:edu.isi.karma.modeling.research.ComputeGED.java
private static void computeGEDApp1() throws Exception { File ff = new File(Params.JGRAPHT_DIR); File[] files = ff.listFiles(); DirectedWeightedMultigraph<Node, Link> gMain, gKarmaInitial, gKarmaFinal, gApp1Rank1, gApp1Rank2, gApp1Rank3;/*from w w w . j a va 2s. c o m*/ HashSet<File> fileSet = new HashSet<File>(Arrays.asList(files)); Function<File, String> sameService = new Function<File, String>() { @Override public String apply(final File s) { return s.getName().substring(0, s.getName().indexOf('.')); } }; Multimap<String, File> index = Multimaps.index(fileSet, sameService); for (String s : index.keySet()) { System.out.println(s); Collection<File> serviceFiles = index.get(s); gMain = null; gKarmaInitial = null; gKarmaFinal = null; gApp1Rank1 = null; gApp1Rank2 = null; gApp1Rank3 = null; for (File f : serviceFiles) { if (f.getName().endsWith(".main.jgraph")) { gMain = GraphUtil.deserialize(f.getPath()); } else if (f.getName().endsWith(".karma.initial.jgraph")) { gKarmaInitial = GraphUtil.deserialize(f.getPath()); } else if (f.getName().endsWith(".karma.final.jgraph")) { gKarmaFinal = GraphUtil.deserialize(f.getPath()); } else if (f.getName().endsWith(".app1.rank1.jgraph")) { gApp1Rank1 = GraphUtil.deserialize(f.getPath()); } else if (f.getName().endsWith(".app1.rank2.jgraph")) { gApp1Rank2 = GraphUtil.deserialize(f.getPath()); } else if (f.getName().endsWith(".app1.rank3.jgraph")) { gApp1Rank3 = GraphUtil.deserialize(f.getPath()); } } if (gMain == null) continue; String label; double distance; Map<String, DirectedWeightedMultigraph<Node, Link>> graphs = new TreeMap<String, DirectedWeightedMultigraph<Node, Link>>(); label = "0- Main"; graphs.put(label, gMain); if (gKarmaInitial != null) { distance = Util.getDistance(gMain, gKarmaInitial); label = "1-Karma Initial" + "-distance:" + distance; graphs.put(label, gKarmaInitial); } if (gKarmaFinal != null) { distance = Util.getDistance(gMain, gKarmaFinal); label = "3-Karma Final" + "-distance:" + distance; graphs.put(label, gKarmaFinal); } if (gApp1Rank1 != null) { distance = Util.getDistance(gMain, gApp1Rank1); label = "4-Rank1" + "-distance:" + distance; graphs.put(label, gApp1Rank1); } if (gApp1Rank2 != null) { distance = Util.getDistance(gMain, gApp1Rank2); label = "5-Rank2" + "-distance:" + distance; graphs.put(label, gApp1Rank2); } if (gApp1Rank3 != null) { distance = Util.getDistance(gMain, gApp1Rank3); label = "6-Rank3" + "-distance:" + distance; graphs.put(label, gApp1Rank3); } GraphVizUtil.exportJGraphToGraphvizFile(graphs, s, Params.OUTPUT_DIR + s + ".app1.out.dot"); } }
From source file:org.eclipse.ocl.examples.codegen.cse.HashedAnalyses.java
public static <V> void printIndented(@NonNull Appendable appendable, @NonNull Multimap<Integer, V> map, @NonNull String indentation, @NonNull String title) { try {/*from ww w.j a v a 2s. c om*/ List<Integer> keys = new ArrayList<Integer>(map.keySet()); Collections.sort(keys); for (Integer key : keys) { appendable.append(indentation + title + " " + key + "\n"); for (V analysis : map.get(key)) { appendable.append(indentation + "\t" + analysis.toString() + "\n"); } } } catch (IOException e) { e.printStackTrace(); } }
From source file:scoutdoc.main.check.CheckstyleFileWriter.java
public static void write(List<Check> list, PrintWriter w) { w.println(XML_VERSION);/*from w w w .j av a 2 s. co m*/ w.println(CHECKSTYLE_OPEN); Multimap<String, Check> multimap = ArrayListMultimap.create(); for (Check check : list) { String filePath = PageUtility.toFile(check.getPage()).getAbsolutePath(); multimap.put(filePath, check); } for (String file : multimap.keySet()) { w.println("<file name=\"" + file + "\">"); for (Check check : multimap.get(file)) { w.print("<error"); w.print(" line=\"" + check.getLine() + "\""); w.print(" column=\"" + check.getColumn() + "\""); w.print(" severity=\"" + Objects.firstNonNull(check.getSeverity(), "") + "\""); w.print(" message=\"" + Strings.nullToEmpty(check.getMessage()) + "\""); w.print(" source=\"" + Strings.nullToEmpty(check.getSource()) + "\""); w.println("/>"); } w.println(FILE_CLOSE); } w.println(CHECKSTYLE_CLOSE); }
From source file:org.deephacks.confit.internal.hbase.HBeanPredecessors.java
public static Multimap<byte[], byte[]> getPredecessors(Multimap<String, String> predecessors, final UniqueIds uids) { final Multimap<byte[], byte[]> bytes = ArrayListMultimap.create(); for (String schemaName : predecessors.keySet()) { final byte[] sid = uids.getUsid().getId(schemaName); Collection<String> ids = predecessors.get(schemaName); bytes.put(sid, HBeanReferences.getIids(new ArrayList<String>(ids), uids)); }//from w w w . j a v a 2 s. co m return bytes; }
From source file:org.semanticweb.owlapi.model.OWLDocumentFormat.java
/** * @param punnings//from w w w . j a va2 s. c o m * input punnings * @return illegal punnings */ static Collection<IRI> computeIllegals(Multimap<IRI, EntityType<?>> punnings) { Collection<IRI> illegals = new HashSet<>(); for (IRI i : punnings.keySet()) { Collection<EntityType<?>> puns = punnings.get(i); if (puns.contains(EntityType.OBJECT_PROPERTY) && puns.contains(EntityType.ANNOTATION_PROPERTY)) { illegals.add(i); } else if (puns.contains(EntityType.DATA_PROPERTY) && puns.contains(EntityType.ANNOTATION_PROPERTY)) { illegals.add(i); } else if (puns.contains(EntityType.DATA_PROPERTY) && puns.contains(EntityType.OBJECT_PROPERTY)) { illegals.add(i); } else if (puns.contains(EntityType.DATATYPE) && puns.contains(EntityType.CLASS)) { illegals.add(i); } } return illegals; }
From source file:org.fenixedu.academic.domain.person.HumanName.java
private static boolean isException(char[] buffer, int i, Multimap<Integer, String> exceptionBySize) { for (Integer size : exceptionBySize.keySet()) { if (i + size > buffer.length) { continue; }//from w ww .j a va 2 s. c o m if (exceptionBySize.get(size).contains(String.valueOf(buffer, i, size))) { return true; } } return false; }
From source file:org.apache.cassandra.service.PendingRangeCalculatorService.java
/** * Calculate pending ranges according to bootsrapping and leaving nodes. Reasoning is: * * (1) When in doubt, it is better to write too much to a node than too little. That is, if * there are multiple nodes moving, calculate the biggest ranges a node could have. Cleaning * up unneeded data afterwards is better than missing writes during movement. * (2) When a node leaves, ranges for other nodes can only grow (a node might get additional * ranges, but it will not lose any of its current ranges as a result of a leave). Therefore * we will first remove _all_ leaving tokens for the sake of calculation and then check what * ranges would go where if all nodes are to leave. This way we get the biggest possible * ranges with regard current leave operations, covering all subsets of possible final range * values.//from ww w. j a v a2 s .com * (3) When a node bootstraps, ranges of other nodes can only get smaller. Without doing * complex calculations to see if multiple bootstraps overlap, we simply base calculations * on the same token ring used before (reflecting situation after all leave operations have * completed). Bootstrapping nodes will be added and removed one by one to that metadata and * checked what their ranges would be. This will give us the biggest possible ranges the * node could have. It might be that other bootstraps make our actual final ranges smaller, * but it does not matter as we can clean up the data afterwards. * * NOTE: This is heavy and ineffective operation. This will be done only once when a node * changes state in the cluster, so it should be manageable. */ // public & static for testing purposes public static void calculatePendingRanges(AbstractReplicationStrategy strategy, String keyspaceName) { TokenMetadata tm = StorageService.instance.getTokenMetadata(); Multimap<Range<Token>, InetAddress> pendingRanges = HashMultimap.create(); BiMultiValMap<Token, InetAddress> bootstrapTokens = tm.getBootstrapTokens(); Set<InetAddress> leavingEndpoints = tm.getLeavingEndpoints(); if (bootstrapTokens.isEmpty() && leavingEndpoints.isEmpty() && tm.getMovingEndpoints().isEmpty()) { if (logger.isDebugEnabled()) logger.debug( "No bootstrapping, leaving or moving nodes, and no relocating tokens -> empty pending ranges for {}", keyspaceName); tm.setPendingRanges(keyspaceName, pendingRanges); return; } Multimap<InetAddress, Range<Token>> addressRanges = strategy.getAddressRanges(); // Copy of metadata reflecting the situation after all leave operations are finished. TokenMetadata allLeftMetadata = tm.cloneAfterAllLeft(); // get all ranges that will be affected by leaving nodes Set<Range<Token>> affectedRanges = new HashSet<Range<Token>>(); for (InetAddress endpoint : leavingEndpoints) affectedRanges.addAll(addressRanges.get(endpoint)); // for each of those ranges, find what new nodes will be responsible for the range when // all leaving nodes are gone. TokenMetadata metadata = tm.cloneOnlyTokenMap(); // don't do this in the loop! #7758 for (Range<Token> range : affectedRanges) { Set<InetAddress> currentEndpoints = ImmutableSet .copyOf(strategy.calculateNaturalEndpoints(range.right, metadata)); Set<InetAddress> newEndpoints = ImmutableSet .copyOf(strategy.calculateNaturalEndpoints(range.right, allLeftMetadata)); pendingRanges.putAll(range, Sets.difference(newEndpoints, currentEndpoints)); } // At this stage pendingRanges has been updated according to leave operations. We can // now continue the calculation by checking bootstrapping nodes. // For each of the bootstrapping nodes, simply add and remove them one by one to // allLeftMetadata and check in between what their ranges would be. Multimap<InetAddress, Token> bootstrapAddresses = bootstrapTokens.inverse(); for (InetAddress endpoint : bootstrapAddresses.keySet()) { Collection<Token> tokens = bootstrapAddresses.get(endpoint); allLeftMetadata.updateNormalTokens(tokens, endpoint); for (Range<Token> range : strategy.getAddressRanges(allLeftMetadata).get(endpoint)) pendingRanges.put(range, endpoint); allLeftMetadata.removeEndpoint(endpoint); } // At this stage pendingRanges has been updated according to leaving and bootstrapping nodes. // We can now finish the calculation by checking moving and relocating nodes. // For each of the moving nodes, we do the same thing we did for bootstrapping: // simply add and remove them one by one to allLeftMetadata and check in between what their ranges would be. for (Pair<Token, InetAddress> moving : tm.getMovingEndpoints()) { InetAddress endpoint = moving.right; // address of the moving node // moving.left is a new token of the endpoint allLeftMetadata.updateNormalToken(moving.left, endpoint); for (Range<Token> range : strategy.getAddressRanges(allLeftMetadata).get(endpoint)) { pendingRanges.put(range, endpoint); } allLeftMetadata.removeEndpoint(endpoint); } tm.setPendingRanges(keyspaceName, pendingRanges); if (logger.isDebugEnabled()) logger.debug("Pending ranges:\n" + (pendingRanges.isEmpty() ? "<empty>" : tm.printPendingRanges())); }