List of usage examples for com.google.common.collect Sets difference
public static <E> SetView<E> difference(final Set<E> set1, final Set<?> set2)
From source file:ws.doerr.monitor.Monitor.java
private void updateWatches() { // Add any missing watches synchronized (folderToKey) { folderToHandler.keySet().forEach(folder -> { try { if (!folderToKey.containsKey(folder)) { WatchKey key = folder.register(service, ENTRY_CREATE, ENTRY_DELETE, ENTRY_MODIFY); folderToKey.put(folder, key); keyToFolder.put(key, folder); }// w ww. j ava 2s . co m } catch (IOException ex) { LOG.log(Level.WARNING, "Exception adding watch for " + folder.toString(), ex); } }); Set<Path> remove = Sets.difference(folderToKey.keySet(), folderToHandler.keySet()); remove.forEach(folder -> { WatchKey key = folderToKey.remove(folder); key.cancel(); keyToFolder.remove(key); }); } }
From source file:com.google.javascript.jscomp.ProcessDefines.java
private void overrideDefines(Map<String, DefineInfo> allDefines) { boolean changed = false; for (Map.Entry<String, DefineInfo> def : allDefines.entrySet()) { String defineName = def.getKey(); DefineInfo info = def.getValue(); Node inputValue = dominantReplacements.get(defineName); Node finalValue = inputValue != null ? inputValue : info.getLastValue(); if (finalValue != info.initialValue) { info.initialValueParent.replaceChild(info.initialValue, finalValue.cloneTree()); compiler.addToDebugLog("Overriding @define variable " + defineName); changed = changed || finalValue.getType() != info.initialValue.getType() || !finalValue.isEquivalentTo(info.initialValue); }/*from w w w .j a va 2 s. c o m*/ } if (changed) { compiler.reportCodeChange(); } Set<String> unusedReplacements = Sets.difference(dominantReplacements.keySet(), Sets.union(KNOWN_DEFINES, allDefines.keySet())); for (String unknownDefine : unusedReplacements) { compiler.report(JSError.make(UNKNOWN_DEFINE_WARNING, unknownDefine)); } }
From source file:com.opengamma.strata.measure.bond.DefaultLegalEntityDiscountingMarketDataLookup.java
@ImmutableValidator private void validate() { Set<RepoGroup> uniqueRepoGroups = new HashSet<>(repoCurveGroups.values()); Set<RepoGroup> uniqueRepoCurves = repoCurves.keySet().stream().map(p -> p.getFirst()) .collect(toImmutableSet());//from w ww . ja v a 2 s . c om if (!uniqueRepoCurves.containsAll(uniqueRepoGroups)) { throw new IllegalArgumentException("Repo curve groups defined without matching curve mappings: " + Sets.difference(uniqueRepoGroups, uniqueRepoCurves)); } Set<LegalEntityGroup> uniqueIssuerGroups = new HashSet<>(issuerCurveGroups.values()); Set<LegalEntityGroup> uniqueIssuerCurves = issuerCurves.keySet().stream().map(p -> p.getFirst()) .collect(toImmutableSet()); if (!uniqueIssuerCurves.containsAll(uniqueIssuerGroups)) { throw new IllegalArgumentException("Issuer curve groups defined without matching curve mappings: " + Sets.difference(uniqueIssuerGroups, uniqueIssuerCurves)); } }
From source file:org.waveprotocol.box.server.waveserver.WaveletNotificationDispatcher.java
@Override public void waveletUpdate(ReadableWaveletData wavelet, ImmutableList<WaveletDeltaRecord> deltas, ImmutableSet<String> domainsToNotify) { DeltaSequence sequence = DeltaSequence.of(transformedDeltasOf(deltas)); for (WaveBus.Subscriber s : subscribers) { try {// ww w . j a va 2 s. c o m s.waveletUpdate(wavelet, sequence); } catch (RuntimeException e) { LOG.severe("Runtime exception in update to wave bus subscriber " + s, e); } } Set<String> remoteDomainsToNotify = Sets.difference(domainsToNotify, localDomains); if (!remoteDomainsToNotify.isEmpty()) { ImmutableList<ByteString> serializedAppliedDeltas = serializedAppliedDeltasOf(deltas); for (String domain : remoteDomainsToNotify) { try { federationHosts.get(domain).waveletDeltaUpdate(WaveletDataUtil.waveletNameOf(wavelet), serializedAppliedDeltas, federationCallback("delta update")); } catch (ExecutionException ex) { throw new RuntimeException(ex); } } } }
From source file:org.apache.rya.indexing.accumulo.entity.AccumuloDocIdIndexer.java
@Override public CloseableIteration<BindingSet, QueryEvaluationException> queryDocIndex(final StarQuery query, final Collection<BindingSet> constraints) throws TableNotFoundException, QueryEvaluationException { final StarQuery starQ = query; final Iterator<BindingSet> bs = constraints.iterator(); final Iterator<BindingSet> bs2 = constraints.iterator(); final Set<String> unCommonVarNames; final Set<String> commonVarNames; if (bs2.hasNext()) { final BindingSet currBs = bs2.next(); commonVarNames = StarQuery.getCommonVars(query, currBs); unCommonVarNames = Sets.difference(currBs.getBindingNames(), commonVarNames); } else {// w ww . j a v a2 s .c o m commonVarNames = Sets.newHashSet(); unCommonVarNames = Sets.newHashSet(); } if (commonVarNames.size() == 1 && !query.commonVarConstant() && commonVarNames.contains(query.getCommonVarName())) { final HashMultimap<String, BindingSet> map = HashMultimap.create(); final String commonVar = starQ.getCommonVarName(); final Iterator<Entry<Key, Value>> intersections; final BatchScanner scan; final Set<Range> ranges = Sets.newHashSet(); while (bs.hasNext()) { final BindingSet currentBs = bs.next(); if (currentBs.getBinding(commonVar) == null) { continue; } final String row = currentBs.getBinding(commonVar).getValue().stringValue(); ranges.add(new Range(row)); map.put(row, currentBs); } scan = runQuery(starQ, ranges); intersections = scan.iterator(); return new CloseableIteration<BindingSet, QueryEvaluationException>() { private QueryBindingSet currentSolutionBs = null; private boolean hasNextCalled = false; private boolean isEmpty = false; private Iterator<BindingSet> inputSet = new ArrayList<BindingSet>().iterator(); private BindingSet currentBs; private Key key; @Override public boolean hasNext() throws QueryEvaluationException { if (!hasNextCalled && !isEmpty) { while (inputSet.hasNext() || intersections.hasNext()) { if (!inputSet.hasNext()) { key = intersections.next().getKey(); inputSet = map.get(key.getRow().toString()).iterator(); } currentBs = inputSet.next(); currentSolutionBs = deserializeKey(key, starQ, currentBs, unCommonVarNames); if (currentSolutionBs.size() == unCommonVarNames.size() + starQ.getUnCommonVars().size() + 1) { hasNextCalled = true; return true; } } isEmpty = true; return false; } else if (isEmpty) { return false; } else { return true; } } @Override public BindingSet next() throws QueryEvaluationException { if (hasNextCalled) { hasNextCalled = false; } else if (isEmpty) { throw new NoSuchElementException(); } else { if (this.hasNext()) { hasNextCalled = false; } else { throw new NoSuchElementException(); } } return currentSolutionBs; } @Override public void remove() throws QueryEvaluationException { throw new UnsupportedOperationException(); } @Override public void close() throws QueryEvaluationException { scan.close(); } }; } else { return new CloseableIteration<BindingSet, QueryEvaluationException>() { @Override public void remove() throws QueryEvaluationException { throw new UnsupportedOperationException(); } private Iterator<Entry<Key, Value>> intersections = null; private QueryBindingSet currentSolutionBs = null; private boolean hasNextCalled = false; private boolean isEmpty = false; private boolean init = false; private BindingSet currentBs; private StarQuery sq = new StarQuery(starQ); private final Set<Range> emptyRangeSet = Sets.newHashSet(); private BatchScanner scan; @Override public BindingSet next() throws QueryEvaluationException { if (hasNextCalled) { hasNextCalled = false; } else if (isEmpty) { throw new NoSuchElementException(); } else { if (this.hasNext()) { hasNextCalled = false; } else { throw new NoSuchElementException(); } } return currentSolutionBs; } @Override public boolean hasNext() throws QueryEvaluationException { if (!init) { if (intersections == null && bs.hasNext()) { currentBs = bs.next(); sq = StarQuery.getConstrainedStarQuery(sq, currentBs); scan = runQuery(sq, emptyRangeSet); intersections = scan.iterator(); // binding set empty } else if (intersections == null && !bs.hasNext()) { currentBs = new QueryBindingSet(); scan = runQuery(starQ, emptyRangeSet); intersections = scan.iterator(); } init = true; } if (!hasNextCalled && !isEmpty) { while (intersections.hasNext() || bs.hasNext()) { if (!intersections.hasNext()) { scan.close(); currentBs = bs.next(); sq = StarQuery.getConstrainedStarQuery(sq, currentBs); scan = runQuery(sq, emptyRangeSet); intersections = scan.iterator(); } if (intersections.hasNext()) { currentSolutionBs = deserializeKey(intersections.next().getKey(), sq, currentBs, unCommonVarNames); } else { continue; } if (sq.commonVarConstant() && currentSolutionBs.size() == unCommonVarNames.size() + sq.getUnCommonVars().size()) { hasNextCalled = true; return true; } else if (currentSolutionBs.size() == unCommonVarNames.size() + sq.getUnCommonVars().size() + 1) { hasNextCalled = true; return true; } } isEmpty = true; return false; } else if (isEmpty) { return false; } else { return true; } } @Override public void close() throws QueryEvaluationException { scan.close(); } }; } }
From source file:org.apache.heron.scheduler.dryrun.UpdateTableDryRunRenderer.java
private String renderContainerDiffView(int containerId, ContainersDiffView diffView) { StringBuilder builder = new StringBuilder(); Optional<PackingPlan.ContainerPlan> oldPackingPlan = diffView.getOldPlan(); Optional<PackingPlan.ContainerPlan> newPackingPlan = diffView.getNewPlan(); String header = String.format("%s ", formatter.renderContainerName(containerId)); builder.append(header);/* ww w.j a va 2s.c om*/ // Container exists in both old and new packing plan if (oldPackingPlan.isPresent() && newPackingPlan.isPresent()) { PackingPlan.ContainerPlan newContainerPlan = newPackingPlan.get(); PackingPlan.ContainerPlan oldContainerPlan = oldPackingPlan.get(); // Container plan did not change if (newContainerPlan.equals(oldContainerPlan)) { builder.append(formatter.renderContainerChange(ContainerChange.UNAFFECTED) + "\n"); String resourceUsage = formatter.renderResourceUsage(newContainerPlan.getRequiredResource()); List<Row> rows = new ArrayList<>(); for (PackingPlan.InstancePlan plan : newContainerPlan.getInstances()) { rows.add(formatter.rowOfInstancePlan(plan, TextColor.DEFAULT, TextStyle.DEFAULT)); } String containerTable = formatter.renderOneContainer(rows); builder.append(resourceUsage + "\n"); builder.append(containerTable); } else { // Container plan has changed String resourceUsage = formatter.renderResourceUsageChange(oldContainerPlan.getRequiredResource(), newContainerPlan.getRequiredResource()); Set<PackingPlan.InstancePlan> oldInstancePlans = oldContainerPlan.getInstances(); Set<PackingPlan.InstancePlan> newInstancePlans = newContainerPlan.getInstances(); Set<PackingPlan.InstancePlan> unchangedPlans = Sets.intersection(oldInstancePlans, newInstancePlans) .immutableCopy(); Set<PackingPlan.InstancePlan> newPlans = Sets.difference(newInstancePlans, oldInstancePlans); Set<PackingPlan.InstancePlan> removedPlans = Sets.difference(oldInstancePlans, newInstancePlans); List<Row> rows = new ArrayList<>(); for (PackingPlan.InstancePlan plan : unchangedPlans) { rows.add(formatter.rowOfInstancePlan(plan, TextColor.DEFAULT, TextStyle.DEFAULT)); } for (PackingPlan.InstancePlan plan : newPlans) { rows.add(formatter.rowOfInstancePlan(plan, TextColor.GREEN, TextStyle.DEFAULT)); } for (PackingPlan.InstancePlan plan : removedPlans) { rows.add(formatter.rowOfInstancePlan(plan, TextColor.RED, TextStyle.STRIKETHROUGH)); } builder.append(formatter.renderContainerChange(ContainerChange.MODIFIED) + "\n"); builder.append(resourceUsage + "\n"); String containerTable = formatter.renderOneContainer(rows); builder.append(containerTable); } } else if (oldPackingPlan.isPresent()) { // Container has been removed PackingPlan.ContainerPlan oldContainerPlan = oldPackingPlan.get(); List<Row> rows = new ArrayList<>(); for (PackingPlan.InstancePlan plan : oldContainerPlan.getInstances()) { rows.add(formatter.rowOfInstancePlan(plan, TextColor.RED, TextStyle.STRIKETHROUGH)); } builder.append(formatter.renderContainerChange(ContainerChange.REMOVED) + "\n"); builder.append(formatter.renderResourceUsage(oldContainerPlan.getRequiredResource()) + "\n"); builder.append(formatter.renderOneContainer(rows)); } else if (newPackingPlan.isPresent()) { // New container has been added PackingPlan.ContainerPlan newContainerPlan = newPackingPlan.get(); List<Row> rows = new ArrayList<>(); for (PackingPlan.InstancePlan plan : newContainerPlan.getInstances()) { rows.add(formatter.rowOfInstancePlan(plan, TextColor.GREEN, TextStyle.DEFAULT)); } builder.append(formatter.renderContainerChange(ContainerChange.NEW) + "\n"); builder.append(formatter.renderResourceUsage(newContainerPlan.getRequiredResource()) + "\n"); builder.append(formatter.renderOneContainer(rows)); } else { throw new RuntimeException( "Unexpected error: either new container plan or old container plan has to exist"); } return builder.toString(); }
From source file:co.cask.cdap.etl.planner.ConnectorDag.java
/** * Insert connector nodes into the dag./*w w w . j a v a 2 s . co m*/ * * A connector node is a boundary at which the pipeline can be split into sub dags. * It is treated as a sink within one subdag and as a source in another subdag. * A connector is inserted in front of a reduce node (aggregator plugin type, etc) * when there is a path from some source to one or more reduce nodes or sinks. * This is required because in a single mapper, we can't write to both a sink and do a reduce. * We also can't have 2 reducers in a single mapreduce job. * A connector is also inserted in front of any node if the inputs into the node come from multiple sources. * A connector is also inserted in front of a reduce node that has another reduce node as its input. * * After splitting, the result will be a collection of subdags, with each subdag representing a single * mapreduce job (or possibly map-only job). Or in spark, each subdag would be a series of operations from * one rdd to another rdd. * * @return the nodes that had connectors inserted in front of them */ public Set<String> insertConnectors() { // none of this is particularly efficient, but this should never be a bottleneck // unless we're dealing with very very large dags Set<String> addedAlready = new HashSet<>(); /* Isolate the specified node by inserting a connector in front of and behind the node. If all inputs into the the node are sources, a connector will not be inserted in front. If all outputs from the node are sinks, a connector will not be inserted after. Other connectors count as both a source and a sink. */ for (String isolationNode : isolationNodes) { isolate(isolationNode, addedAlready); } /* Find sections of the dag where a source is writing to both a sink and a reduce node or to multiple reduce nodes. a connector counts as both a source and a sink. for example, if a source is writing to both a sink and a reduce: |---> sink1 source ---| |---> reduce ---> sink2 we need to split this up into: |---> sink1 source ---| => connector ---> reduce ---> sink2 |---> connector The same logic applies if a source is writing to multiple reduce nodes. So if we run into this scenario, we will add a connector in front of all reduce nodes accessible from the source. When trying to find a path from a source to multiple reduce nodes, we also need to stop searching once we see a reduce node or a connector. Otherwise, every single reduce node would end up with a connector in front of it. */ for (String node : getTopologicalOrder()) { if (!sources.contains(node) && !connectors.contains(node)) { continue; } Set<String> accessibleByNode = accessibleFrom(node, Sets.union(connectors, reduceNodes)); Set<String> sinksAndReduceNodes = Sets.intersection(accessibleByNode, Sets.union(connectors, Sets.union(sinks, reduceNodes))); // don't count this node sinksAndReduceNodes = Sets.difference(sinksAndReduceNodes, ImmutableSet.of(node)); if (sinksAndReduceNodes.size() > 1) { for (String reduceNodeConnector : Sets.intersection(sinksAndReduceNodes, reduceNodes)) { addConnectorInFrontOf(reduceNodeConnector, addedAlready); } } } /* Find nodes that have input from multiple sources and add them to the connectors set. We can probably remove this part once we support multiple sources. Even though we don't support multiple sources today, the fact that we support forks means we have to deal with the multi-input case and break it down into separate phases. For example: |---> reduce1 ---| n1 ---| |---> n2 |---> reduce2 ---| From the previous section, both reduces will get a connector inserted in front: |---> reduce1.connector reduce1.connector ---> reduce1 ---| n1 ---| => |---> n2 |---> reduce2.connector reduce2.connector ---> reduce2 ---| Since we don't support multi-input yet, we need to convert that further into 3 phases: reduce1.connector ---> reduce1 ---> n2.connector => sink.connector ---> n2 reduce2.connector ---> reduce2 ---> n2.connector To find these nodes, we traverse the graph in order and keep track of sources that have a path to each node with a map of node -> [ sources that have a path to the node ] if we find that a node is accessible by more than one source, we insert a connector in front of it and reset all sources for that node to its connector */ SetMultimap<String, String> nodeSources = HashMultimap.create(); for (String source : sources) { nodeSources.put(source, source); } for (String node : getTopologicalOrder()) { Set<String> connectedSources = nodeSources.get(node); /* If this node is a connector, replace all sources for this node with itself, since a connector is a source Taking the example above, we end up with: reduce1.connector ---> reduce1 ---| |---> n2 reduce2.connector ---> reduce2 ---| When we get to n2, we need it to see that it has 2 sources: reduce1.connector and reduce2.connector So when get to reduce1.connector, we need to replace its source (n1) with itself. Similarly, when we get to reduce2.connector, we need to replaces its source (n1) with itself. If we didn't, when we got to n2, it would think its only source is n1, and we would miss the connector that should be inserted in front of it. */ if (connectors.contains(node)) { connectedSources = new HashSet<>(); connectedSources.add(node); nodeSources.replaceValues(node, connectedSources); } // if more than one source is connected to this node, then we need to insert a connector in front of this node. // its source should then be changed to the connector that was inserted in front of it. if (connectedSources.size() > 1) { String connectorNode = addConnectorInFrontOf(node, addedAlready); connectedSources = new HashSet<>(); connectedSources.add(connectorNode); nodeSources.replaceValues(node, connectedSources); } for (String nodeOutput : getNodeOutputs(node)) { // propagate the source connected to me to all my outputs nodeSources.putAll(nodeOutput, connectedSources); } } /* Find reduce nodes that are accessible from other reduce nodes. For example: source ---> reduce1 ---> reduce2 ---> sink Needs to be broken down into: source ---> reduce1 ---> reduce2.connector => reduce2.connector ---> reduce2 ---> sink */ for (String reduceNode : reduceNodes) { Set<String> accessibleByNode = accessibleFrom(reduceNode, Sets.union(connectors, reduceNodes)); Set<String> accessibleReduceNodes = Sets.intersection(accessibleByNode, reduceNodes); // Sets.difference because we don't want to add ourselves accessibleReduceNodes = Sets.difference(accessibleReduceNodes, ImmutableSet.of(reduceNode)); for (String accessibleReduceNode : accessibleReduceNodes) { addConnectorInFrontOf(accessibleReduceNode, addedAlready); } } return addedAlready; }
From source file:org.spf4j.ds.Traversals.java
public static <V, E> void customTraverse(final Graph<V, E> graph, final V startNode, final TraversalCallback<V, E> handler) { Set<V> traversedNodes = new HashSet<V>(); Queue<VertexHolder<V>> traversalQueue = new PriorityQueue<VertexHolder<V>>(16); int counter = 0; traversalQueue.add(new VertexHolder<V>(startNode, counter++, 0)); boolean done = false; do {//from ww w.j a v a 2 s. co m boolean first = true; while (!traversalQueue.isEmpty()) { V node = traversalQueue.remove().getVertex(); VertexEdges<V, E> edges = graph.getEdges(node); if (traversedNodes.contains(node)) { continue; } Map<E, V> incomming = edges.getIncomming(); boolean hasIncomingBeenTraversed = true; for (V val : incomming.values()) { if (!traversedNodes.contains(val)) { hasIncomingBeenTraversed = false; break; } } if (!first && !hasIncomingBeenTraversed) { continue; } handler.handle(node, incomming); traversedNodes.add(node); first = false; Map<E, V> outgoing = edges.getOutgoing(); for (V next : outgoing.values()) { traversalQueue .add(new VertexHolder<V>(next, counter++, graph.getEdges(next).getIncomming().size())); } } Set<V> leftNodes = Sets.difference(graph.getVertices(), traversedNodes); if (leftNodes.isEmpty()) { done = true; } else { boolean added = false; for (V node : leftNodes) { Collection<V> incomingNodes = graph.getEdges(node).getIncomming().values(); for (V incoming : incomingNodes) { if (traversedNodes.contains(incoming)) { traversalQueue.add(new VertexHolder<V>(node, counter++, 0)); added = true; break; } } if (added) { break; } } } } while (!done); }
From source file:com.alibaba.jstorm.schedule.FollowerRunnable.java
private void setupBlobstore() throws Exception { BlobStore blobStore = data.getBlobStore(); StormClusterState clusterState = data.getStormClusterState(); Set<String> localSetOfKeys = Sets.newHashSet(blobStore.listKeys()); Set<String> allKeys = Sets.newHashSet(clusterState.active_keys()); Set<String> localAvailableActiveKeys = Sets.intersection(localSetOfKeys, allKeys); // keys on local but not on zk, we will delete it Set<String> keysToDelete = Sets.difference(localSetOfKeys, allKeys); LOG.debug("deleting keys not on zookeeper {}", keysToDelete); for (String key : keysToDelete) { blobStore.deleteBlob(key);//from w ww . j av a 2s. co m } LOG.debug("Creating list of key entries for blobstore inside zookeeper {} local {}", allKeys, localAvailableActiveKeys); for (String key : localAvailableActiveKeys) { int versionForKey = BlobStoreUtils.getVersionForKey(key, data.getNimbusHostPortInfo(), data.getConf()); clusterState.setup_blobstore(key, data.getNimbusHostPortInfo(), versionForKey); } }
From source file:no.ssb.vtl.script.functions.AbstractVTLFunction.java
/** * Checks if the named arguments are of the correct types. *///from w ww.ja v a 2 s . co m private void validateArguments(Map<String, VTLObject> arguments) { if (arguments.isEmpty()) return; // No need to check if empty. // Named arguments that are not in the signature. Sets.SetView<String> unknown = Sets.difference(arguments.keySet(), signature.keySet()); checkArgument(unknown.isEmpty(), UNKNOWN_ARGUMENTS, unknown); // Filter the optional arguments. Set<String> requiredArgumentNames = signature.values().stream() .filter(obj -> !OptionalArgument.class.isInstance(obj)).map(Argument::getName) .collect(Collectors.toSet()); // Non-optional named arguments missing. Sets.SetView<String> missing = Sets.difference(requiredArgumentNames, arguments.keySet()); checkArgument(missing.isEmpty(), MISSING_ARGUMENTS, missing); }