List of usage examples for com.google.common.collect Sets symmetricDifference
public static <E> SetView<E> symmetricDifference(final Set<? extends E> set1, final Set<? extends E> set2)
From source file:org.apache.brooklyn.entity.nosql.cassandra.CassandraDatacenterImpl.java
@Override public void update() { synchronized (mutex) { // Update our seeds, as necessary seedTracker.refreshSeeds();/*from ww w .ja va 2s . c o m*/ // Choose the first available cluster member to set host and port (and compute one-up) Optional<Entity> upNode = Iterables.tryFind(getMembers(), EntityPredicates.attributeEqualTo(SERVICE_UP, Boolean.TRUE)); if (upNode.isPresent()) { sensors().set(HOSTNAME, upNode.get().getAttribute(Attributes.HOSTNAME)); sensors().set(THRIFT_PORT, upNode.get().getAttribute(CassandraNode.THRIFT_PORT)); List<String> currentNodes = getAttribute(CASSANDRA_CLUSTER_NODES); Set<String> oldNodes = (currentNodes != null) ? ImmutableSet.copyOf(currentNodes) : ImmutableSet.<String>of(); Set<String> newNodes = MutableSet.<String>of(); for (Entity member : getMembers()) { if (member instanceof CassandraNode && Boolean.TRUE.equals(member.getAttribute(SERVICE_UP))) { String hostname = member.getAttribute(Attributes.HOSTNAME); Integer thriftPort = member.getAttribute(CassandraNode.THRIFT_PORT); if (hostname != null && thriftPort != null) { newNodes.add(HostAndPort.fromParts(hostname, thriftPort).toString()); } } } if (Sets.symmetricDifference(oldNodes, newNodes).size() > 0) { sensors().set(CASSANDRA_CLUSTER_NODES, MutableList.copyOf(newNodes)); } } else { sensors().set(HOSTNAME, null); sensors().set(THRIFT_PORT, null); sensors().set(CASSANDRA_CLUSTER_NODES, Collections.<String>emptyList()); } ServiceNotUpLogic.updateNotUpIndicatorRequiringNonEmptyList(this, CASSANDRA_CLUSTER_NODES); } }
From source file:com.streamreduce.storm.bolts.AbstractMetricsBolt.java
/** * Returns a map containing hashtag changes and their respective values. * * * Map key: The hashtag added/removed/*from w w w . j a v a 2 s.c o m*/ * * Map value: 1.0 for added hashtags and -1.0 for deleted hashtags * * @param eventId the event's id * @param targetId the event's target id * @param metadata the event's metadata * @param hashtagsKey the metadata key containing the event's hashtags * * @return the map */ protected Map<String, Float> getHashtagChanges(EventId eventId, String targetId, Map<String, Object> metadata, String hashtagsKey) { Float eventValue = getEventValue(eventId); Map<String, Float> hashtagChanges = new TreeMap<>(); Set<String> hashtags = metadata.get(hashtagsKey) != null ? (Set<String>) metadata.get(hashtagsKey) : Collections.EMPTY_SET; if (!Float.isNaN(eventValue)) { if (Math.abs(eventValue) == 1.0f) { // If the event value is 1.0 or -1.0 (CREATE/DELETE), just process as all added/deleted for (String hashtag : hashtags) { hashtagChanges.put(hashtag, eventValue); } } else if (eventValue == 0.0f) { // If the event value is 0.0 (UPDATE), figure out the added/deleted hashtags and process as such Integer targetVersion = metadata.get("targetVersion") != null ? (Integer) metadata.get("targetVersion") : 0; Map<String, Object> previousEvent = MESSAGE_DB_MONGO_CLIENT.getEventForTargetAndVersion(targetId, --targetVersion); Map<String, Object> previousMetadata = previousEvent != null && previousEvent.get("metadata") != null ? (Map<String, Object>) previousEvent.get("metadata") : Collections.<String, Object>emptyMap(); Set<String> previousHashtags = previousMetadata.get(hashtagsKey) != null ? (Set<String>) previousMetadata.get(hashtagsKey) : Collections.EMPTY_SET; Set<String> differences = Sets.symmetricDifference(hashtags, previousHashtags); for (String hashtag : differences) { if (hashtags.contains(hashtag)) { // Added hashtagChanges.put(hashtag, 1.0f); } else { // Deleted hashtagChanges.put(hashtag, -1.0f); } } } } return hashtagChanges; }
From source file:com.cinchapi.concourse.server.ConcourseServer.java
/** * Revert {@code key} in {@code record} to its state {@code timestamp} using * the provided atomic {@code operation}. * /*from ww w.j av a 2 s . c om*/ * @param key * @param record * @param timestamp * @param operation * @throws AtomicStateException */ private static void revertAtomic(String key, long record, long timestamp, AtomicOperation operation) throws AtomicStateException { Set<TObject> past = operation.select(key, record, timestamp); Set<TObject> present = operation.select(key, record); Set<TObject> xor = Sets.symmetricDifference(past, present); for (TObject value : xor) { if (present.contains(value)) { operation.remove(key, value, record); } else { operation.add(key, value, record); } } }
From source file:ome.services.graphs.GraphTraversal.java
/** * Traverse model object graph to determine steps for the proposed operation. * Assumes that the internal {@code planning} field is set up and mutates it accordingly. * @param session the Hibernate session to use for HQL queries * @throws GraphException if the model objects were not as expected *//*from w ww .ja v a 2s . c om*/ private void planOperation(Session session) throws GraphException { /* track state to guarantee progress in reprocessing objects whose orphan status is relevant */ Set<CI> optimisticReprocess = null; /* set of not-last objects after latest review */ Set<CI> isNotLast = null; while (true) { /* process any pending objects */ while (!(planning.toProcess.isEmpty() && planning.findIfLast.isEmpty())) { /* first process any cached objects that do not await orphan status determination */ final Set<CI> toProcess = new HashSet<CI>(planning.toProcess); toProcess.retainAll(planning.cached); toProcess.removeAll(planning.findIfLast); if (!toProcess.isEmpty()) { if (optimisticReprocess != null && !Sets.difference(planning.toProcess, optimisticReprocess).isEmpty()) { /* processing something beyond optimistic suggestion, so circumstances have changed */ optimisticReprocess = null; } for (final CI nextObject : toProcess) { reviewObject(nextObject, false); } continue; } /* if none of the above exist, then fill the cache */ final Set<CI> toCache = new HashSet<CI>(planning.toProcess); toCache.removeAll(planning.cached); if (!toCache.isEmpty()) { optimisticReprocess = null; cache(session, toCache); continue; } /* try processing the findIfLast in case of any changes */ if (!planning.toProcess.isEmpty()) { final Set<CI> previousToProcess = new HashSet<CI>(planning.toProcess); final Set<CI> previousFindIfLast = new HashSet<CI>(planning.findIfLast); for (final CI nextObject : previousToProcess) { reviewObject(nextObject, false); } /* This condition is tricky. We do want to reprocess objects that are suggested for such, while * avoiding an infinite loop that comes of such processing not resolving any orphan status. */ if (!Sets.symmetricDifference(previousFindIfLast, planning.findIfLast).isEmpty() || (optimisticReprocess == null || !Sets.symmetricDifference(planning.toProcess, optimisticReprocess).isEmpty()) && !Sets.symmetricDifference(previousToProcess, planning.toProcess).isEmpty()) { optimisticReprocess = new HashSet<CI>(planning.toProcess); continue; } } /* if no other processing or caching is needed, then deem outstanding objects orphans */ optimisticReprocess = null; for (final CI orphan : planning.findIfLast) { planning.foundIfLast.put(orphan, true); if (log.isDebugEnabled()) { log.debug("marked " + orphan + " as " + Orphan.IS_LAST); } } planning.toProcess.addAll(planning.findIfLast); planning.findIfLast.clear(); } /* determine which objects are now not last */ final Set<CI> latestIsNotLast = new HashSet<CI>(); for (final Entry<CI, Boolean> objectAndIsLast : planning.foundIfLast.entrySet()) { if (!objectAndIsLast.getValue()) { latestIsNotLast.add(objectAndIsLast.getKey()); } } if (latestIsNotLast.isEmpty() || (isNotLast != null && Sets.difference(isNotLast, latestIsNotLast).isEmpty())) { /* no fewer not-last objects than before */ break; } /* before completing processing, verify not-last status of objects */ isNotLast = latestIsNotLast; planning.toProcess.addAll(isNotLast); planning.findIfLast.addAll(isNotLast); for (final CI object : isNotLast) { planning.foundIfLast.remove(object); if (log.isDebugEnabled()) { log.debug("marked " + object + " as " + Orphan.RELEVANT + " to verify " + Orphan.IS_NOT_LAST + " status"); } } } }
From source file:com.opengamma.masterdb.batch.DbBatchWriter.java
public synchronized RiskRun startBatchInTransaction(ViewCycleMetadata cycleMetadata, Map<String, String> batchParameters, RunCreationMode runCreationMode, SnapshotMode snapshotMode) { s_logger.info("Starting batch ... {}", cycleMetadata); RiskRun run;//from w w w . ja va2 s .c o m switch (runCreationMode) { case AUTO: run = findRiskRunInDb(cycleMetadata.getValuationTime(), cycleMetadata.getVersionCorrection(), cycleMetadata.getViewDefinitionId(), cycleMetadata.getMarketDataSnapshotId()); if (run != null) { // also check parameter equality Map<String, String> existingProperties = run.getPropertiesMap(); if (!existingProperties.equals(batchParameters)) { Set<Map.Entry<String, String>> symmetricDiff = Sets .symmetricDifference(existingProperties.entrySet(), batchParameters.entrySet()); throw new IllegalStateException( "Run parameters stored in DB differ from new parameters with respect to: " + symmetricDiff); } } if (run == null) { run = createRiskRunInTransaction(cycleMetadata.getViewDefinitionId(), cycleMetadata.getMarketDataSnapshotId(), cycleMetadata.getVersionCorrection(), cycleMetadata.getValuationTime(), batchParameters, snapshotMode); } else { restartRunInTransaction(run); } break; case CREATE_NEW_OVERWRITE: run = findRiskRunInDb(cycleMetadata.getValuationTime(), cycleMetadata.getVersionCorrection(), cycleMetadata.getViewDefinitionId(), cycleMetadata.getMarketDataSnapshotId()); if (run != null) { deleteRunInTransaction(run); } run = createRiskRunInTransaction(cycleMetadata.getViewDefinitionId(), cycleMetadata.getMarketDataSnapshotId(), cycleMetadata.getVersionCorrection(), cycleMetadata.getValuationTime(), batchParameters, snapshotMode); break; case CREATE_NEW: run = createRiskRunInTransaction(cycleMetadata.getViewDefinitionId(), cycleMetadata.getMarketDataSnapshotId(), cycleMetadata.getVersionCorrection(), cycleMetadata.getValuationTime(), batchParameters, snapshotMode); break; case REUSE_EXISTING: run = findRiskRunInDb(cycleMetadata.getValuationTime(), cycleMetadata.getVersionCorrection(), cycleMetadata.getViewDefinitionId(), cycleMetadata.getMarketDataSnapshotId()); if (run == null) { throw new IllegalStateException("Cannot find run in database for " + cycleMetadata); } restartRunInTransaction(run); break; default: throw new RuntimeException("Unexpected run creation mode " + runCreationMode); } populateCalculationConfigurations(run.getId(), cycleMetadata); populateRiskValueRequirements(cycleMetadata); Collection<ComputationTargetSpecification> computationTargets = newArrayList(); for (final String configName : cycleMetadata.getAllCalculationConfigurationNames()) { for (com.opengamma.engine.ComputationTargetSpecification computationTarget : cycleMetadata .getComputationTargets(configName)) { computationTargets.add(computationTarget); } } populateComputationTargets(computationTargets); _statusCacheByRunId.put(run.getId(), new ConcurrentHashMap<Pair<Long, Long>, StatusEntry>()); _computeFailureCacheByRunId.put(run.getId(), new ConcurrentHashMap<ComputeFailureKey, ComputeFailure>()); _riskRunsByIds.put(run.getId(), run); return run; }
From source file:org.cinchapi.concourse.server.ConcourseServer.java
/** * Start an {@link AtomicOperation} with {@code store} as the destination * and do the work to revert {@code key} in {@code record} to * {@code timestamp}.//w w w . ja v a 2s. c o m * * @param key * @param record * @param timestamp * @param store * @return the AtomicOperation that must be committed */ private AtomicOperation doRevert(String key, long record, long timestamp, Compoundable store) { AtomicOperation operation = store.startAtomicOperation(); try { Set<TObject> past = operation.fetch(key, record, timestamp); Set<TObject> present = operation.fetch(key, record); Set<TObject> xor = Sets.symmetricDifference(past, present); for (TObject value : xor) { if (present.contains(value)) { operation.remove(key, value, record); } else { operation.add(key, value, record); } } return operation; } catch (AtomicStateException e) { return null; } }
From source file:com.cinchapi.concourse.server.ConcourseServer.java
@Override @ThrowsThriftExceptions// w w w. j a va 2s.c o m public Map<Diff, Set<TObject>> diffKeyRecordStartEnd(String key, long record, long start, long end, AccessToken creds, TransactionToken transaction, String environment) throws TException { checkAccess(creds, transaction); AtomicSupport store = getStore(transaction, environment); AtomicOperation atomic = null; Set<TObject> startValues = null; Set<TObject> endValues = null; while (atomic == null || !atomic.commit()) { atomic = store.startAtomicOperation(); try { startValues = store.select(key, record, start); endValues = store.select(key, record, end); } catch (AtomicStateException e) { atomic = null; } } Map<Diff, Set<TObject>> result = Maps.newHashMapWithExpectedSize(2); Set<TObject> xor = Sets.symmetricDifference(startValues, endValues); int expectedSize = xor.size() / 2; Set<TObject> added = Sets.newHashSetWithExpectedSize(expectedSize); Set<TObject> removed = Sets.newHashSetWithExpectedSize(expectedSize); for (TObject current : xor) { if (!startValues.contains(current)) added.add(current); else { removed.add(current); } } if (!added.isEmpty()) { result.put(Diff.ADDED, added); } if (!removed.isEmpty()) { result.put(Diff.REMOVED, removed); } return result; }
From source file:com.opengamma.engine.value.ValueProperties.java
/** * Compares two sets./*from ww w. j ava 2 s .com*/ * * @param s1 the first set, may be null * @param s2 the second set, may be null * @return negative if the first is less, zero if equal, positive if greater */ protected static int compareSet(final Set<String> s1, final Set<String> s2) { if (s1 == null) { if (s2 == null) { return 0; } else { return -1; } } else if (s2 == null) { return 1; } if (s1.isEmpty()) { if (s2.isEmpty()) { return 0; } else { return 1; } } else if (s2.isEmpty()) { return -1; } if (s1.size() < s2.size()) { return -1; } else if (s1.size() > s2.size()) { return 1; } List<String> sorted = new ArrayList<String>(Sets.symmetricDifference(s1, s2)); Collections.sort(sorted); for (String s : sorted) { if (s1.contains(s)) { return -1; } else { return 1; } } return 0; }
From source file:com.cinchapi.concourse.server.ConcourseServer.java
@Override @ThrowsThriftExceptions/* w ww . j a v a 2 s . c o m*/ public Map<TObject, Map<Diff, Set<Long>>> diffKeyStartEnd(String key, long start, long end, AccessToken creds, TransactionToken transaction, String environment) throws TException { checkAccess(creds, transaction); AtomicSupport store = getStore(transaction, environment); AtomicOperation atomic = null; Map<TObject, Set<Long>> startData = null; Map<TObject, Set<Long>> endData = null; while (atomic == null || !atomic.commit()) { atomic = store.startAtomicOperation(); try { startData = store.browse(key, start); endData = store.browse(key, end); } catch (AtomicStateException e) { atomic = null; } } Set<TObject> startValues = startData.keySet(); Set<TObject> endValues = endData.keySet(); Set<TObject> xor = Sets.symmetricDifference(startValues, endValues); Set<TObject> intersection = startValues.size() < endValues.size() ? Sets.intersection(startValues, endValues) : Sets.intersection(endValues, startValues); Map<TObject, Map<Diff, Set<Long>>> result = TMaps .newLinkedHashMapWithCapacity(xor.size() + intersection.size()); for (TObject value : xor) { Map<Diff, Set<Long>> entry = Maps.newHashMapWithExpectedSize(1); if (!startValues.contains(value)) { entry.put(Diff.ADDED, endData.get(value)); } else { entry.put(Diff.REMOVED, endData.get(value)); } result.put(value, entry); } for (TObject value : intersection) { Set<Long> startRecords = startData.get(value); Set<Long> endRecords = endData.get(value); Set<Long> xorRecords = Sets.symmetricDifference(startRecords, endRecords); if (!xorRecords.isEmpty()) { Set<Long> added = Sets.newHashSetWithExpectedSize(xorRecords.size()); Set<Long> removed = Sets.newHashSetWithExpectedSize(xorRecords.size()); for (Long record : xorRecords) { if (!startRecords.contains(record)) { added.add(record); } else { removed.add(record); } } Map<Diff, Set<Long>> entry = Maps.newHashMapWithExpectedSize(2); if (!added.isEmpty()) { entry.put(Diff.ADDED, added); } if (!removed.isEmpty()) { entry.put(Diff.REMOVED, removed); } result.put(value, entry); } } return result; }
From source file:com.cinchapi.concourse.server.ConcourseServer.java
@Override @ThrowsThriftExceptions/*from ww w .ja v a 2 s . c om*/ public Map<String, Map<Diff, Set<TObject>>> diffRecordStartEnd(long record, long start, long end, AccessToken creds, TransactionToken transaction, String environment) throws TException { checkAccess(creds, transaction); AtomicSupport store = getStore(transaction, environment); AtomicOperation atomic = null; Map<String, Set<TObject>> startData = null; Map<String, Set<TObject>> endData = null; while (atomic == null || !atomic.commit()) { atomic = store.startAtomicOperation(); try { startData = store.select(record, start); endData = store.select(record, end); } catch (AtomicStateException e) { atomic = null; } } Set<String> startKeys = startData.keySet(); Set<String> endKeys = endData.keySet(); Set<String> xor = Sets.symmetricDifference(startKeys, endKeys); Set<String> intersection = Sets.intersection(startKeys, endKeys); Map<String, Map<Diff, Set<TObject>>> result = TMaps .newLinkedHashMapWithCapacity(xor.size() + intersection.size()); for (String key : xor) { Map<Diff, Set<TObject>> entry = Maps.newHashMapWithExpectedSize(1); if (!startKeys.contains(key)) { entry.put(Diff.ADDED, endData.get(key)); } else { entry.put(Diff.REMOVED, endData.get(key)); } result.put(key, entry); } for (String key : intersection) { Set<TObject> startValues = startData.get(key); Set<TObject> endValues = endData.get(key); Set<TObject> xorValues = Sets.symmetricDifference(startValues, endValues); if (!xorValues.isEmpty()) { Set<TObject> added = Sets.newHashSetWithExpectedSize(xorValues.size()); Set<TObject> removed = Sets.newHashSetWithExpectedSize(xorValues.size()); for (TObject value : xorValues) { if (!startValues.contains(value)) { added.add(value); } else { removed.add(value); } } Map<Diff, Set<TObject>> entry = Maps.newHashMapWithExpectedSize(2); if (!added.isEmpty()) { entry.put(Diff.ADDED, added); } if (!removed.isEmpty()) { entry.put(Diff.REMOVED, removed); } result.put(key, entry); } } return result; }