List of usage examples for com.google.common.collect Sets union
public static <E> SetView<E> union(final Set<? extends E> set1, final Set<? extends E> set2)
From source file:com.google.javascript.jscomp.ProcessDefines.java
private void overrideDefines(Map<String, DefineInfo> allDefines) { boolean changed = false; for (Map.Entry<String, DefineInfo> def : allDefines.entrySet()) { String defineName = def.getKey(); DefineInfo info = def.getValue(); Node inputValue = dominantReplacements.get(defineName); Node finalValue = inputValue != null ? inputValue : info.getLastValue(); if (finalValue != info.initialValue) { info.initialValueParent.replaceChild(info.initialValue, finalValue.cloneTree()); compiler.addToDebugLog("Overriding @define variable " + defineName); changed = changed || finalValue.getType() != info.initialValue.getType() || !finalValue.isEquivalentTo(info.initialValue); }//ww w .j av a2 s . c o m } if (changed) { compiler.reportCodeChange(); } Set<String> unusedReplacements = Sets.difference(dominantReplacements.keySet(), Sets.union(KNOWN_DEFINES, allDefines.keySet())); for (String unknownDefine : unusedReplacements) { compiler.report(JSError.make(UNKNOWN_DEFINE_WARNING, unknownDefine)); } }
From source file:com.google.devtools.moe.client.codebase.CodebaseMerger.java
/** * For each file in the union of the modified and destination codebases, run * generateMergedFile(...) and then report() the results. * * @return the merged Codebase/*www .jav a2s.co m*/ */ public Codebase merge() { Set<String> filesToMerge = Sets.union(destinationCodebase.getRelativeFilenames(), modifiedCodebase.getRelativeFilenames()); for (String filename : filesToMerge) { this.generateMergedFile(filename); } this.report(); return mergedCodebase; }
From source file:org.apache.druid.firehose.rocketmq.RocketMQFirehoseFactory.java
@Override public Firehose connect(InputRowParser<ByteBuffer> byteBufferInputRowParser, File temporaryDirectory) throws IOException, ParseException { Set<String> newDimExclus = Sets.union( byteBufferInputRowParser.getParseSpec().getDimensionsSpec().getDimensionExclusions(), Sets.newHashSet("feed")); final InputRowParser<ByteBuffer> theParser = byteBufferInputRowParser .withParseSpec(byteBufferInputRowParser.getParseSpec().withDimensionsSpec(byteBufferInputRowParser .getParseSpec().getDimensionsSpec().withDimensionExclusions(newDimExclus))); /**// w w w .j a v a 2 s .c o m * Topic-Queue mapping. */ final ConcurrentHashMap<String, Set<MessageQueue>> topicQueueMap; /** * Default Pull-style client for RocketMQ. */ final DefaultMQPullConsumer defaultMQPullConsumer; final DruidPullMessageService pullMessageService; messageQueueTreeSetMap.clear(); windows.clear(); try { defaultMQPullConsumer = new DefaultMQPullConsumer(this.consumerGroup); defaultMQPullConsumer.setMessageModel(MessageModel.CLUSTERING); topicQueueMap = new ConcurrentHashMap<>(); pullMessageService = new DruidPullMessageService(defaultMQPullConsumer); for (String topic : feed) { Validators.checkTopic(topic); topicQueueMap.put(topic, defaultMQPullConsumer.fetchSubscribeMessageQueues(topic)); } DruidMessageQueueListener druidMessageQueueListener = new DruidMessageQueueListener( Sets.newHashSet(feed), topicQueueMap, defaultMQPullConsumer); defaultMQPullConsumer.setMessageQueueListener(druidMessageQueueListener); defaultMQPullConsumer.start(); pullMessageService.start(); } catch (MQClientException e) { LOGGER.error(e, "Failed to start DefaultMQPullConsumer"); throw new IOException("Failed to start RocketMQ client", e); } return new Firehose() { private Iterator<InputRow> nextIterator = Collections.emptyIterator(); @Override public boolean hasMore() { if (nextIterator.hasNext()) { return true; } boolean hasMore = false; DruidPullRequest earliestPullRequest = null; for (Map.Entry<String, Set<MessageQueue>> entry : topicQueueMap.entrySet()) { for (MessageQueue messageQueue : entry.getValue()) { ConcurrentSkipListSet<MessageExt> messages = messageQueueTreeSetMap.get(messageQueue); if (messages != null && !messages.isEmpty()) { hasMore = true; } else { try { long offset = defaultMQPullConsumer.fetchConsumeOffset(messageQueue, false); int batchSize = (null == pullBatchSize || pullBatchSize.isEmpty()) ? DEFAULT_PULL_BATCH_SIZE : Integer.parseInt(pullBatchSize); DruidPullRequest newPullRequest = new DruidPullRequest(messageQueue, null, offset, batchSize, !hasMessagesPending()); // notify pull message service to pull messages from brokers. pullMessageService.putRequest(newPullRequest); // set the earliest pull in case we need to block. if (null == earliestPullRequest) { earliestPullRequest = newPullRequest; } } catch (MQClientException e) { LOGGER.error("Failed to fetch consume offset for queue: %s", entry.getKey()); } } } } // Block only when there is no locally pending messages. if (!hasMore && null != earliestPullRequest) { try { earliestPullRequest.getCountDownLatch().await(); hasMore = true; } catch (InterruptedException e) { LOGGER.error(e, "CountDownLatch await got interrupted"); } } return hasMore; } @Nullable @Override public InputRow nextRow() { if (nextIterator.hasNext()) { return nextIterator.next(); } for (Map.Entry<MessageQueue, ConcurrentSkipListSet<MessageExt>> entry : messageQueueTreeSetMap .entrySet()) { if (!entry.getValue().isEmpty()) { MessageExt message = entry.getValue().pollFirst(); nextIterator = theParser.parseBatch(ByteBuffer.wrap(message.getBody())).iterator(); windows.computeIfAbsent(entry.getKey(), k -> new ConcurrentSkipListSet<>()) .add(message.getQueueOffset()); return nextIterator.next(); } } // should never happen. throw new RuntimeException("Unexpected Fatal Error! There should have been one row available."); } @Override public Runnable commit() { return new Runnable() { @Override public void run() { OffsetStore offsetStore = defaultMQPullConsumer.getOffsetStore(); Set<MessageQueue> updated = new HashSet<>(); // calculate offsets according to consuming windows. for (Map.Entry<MessageQueue, ConcurrentSkipListSet<Long>> entry : windows.entrySet()) { while (!entry.getValue().isEmpty()) { long offset = offsetStore.readOffset(entry.getKey(), ReadOffsetType.MEMORY_FIRST_THEN_STORE); if (offset + 1 > entry.getValue().first()) { entry.getValue().pollFirst(); } else if (offset + 1 == entry.getValue().first()) { entry.getValue().pollFirst(); offsetStore.updateOffset(entry.getKey(), offset + 1, true); updated.add(entry.getKey()); } else { break; } } } offsetStore.persistAll(updated); } }; } @Override public void close() { defaultMQPullConsumer.shutdown(); pullMessageService.shutdown(false); } }; }
From source file:org.sosy_lab.cpachecker.cpa.livevar.DeclarationCollectingVisitor.java
@Override public Set<ASimpleDeclaration> visit(ABinaryExpression exp) throws RuntimeException { return Sets.union(accept0(exp.getOperand1()), accept0(exp.getOperand2())); }
From source file:com.googlecode.blaisemath.style.AttributeSet.java
/** * Get this attributes, and all parent attributes. * @return attribute keys/*from w w w .j a v a2 s . co m*/ */ public Set<String> getAllAttributes() { if (parent.isPresent()) { return Sets.union(attributeMap.keySet(), parent.get().getAllAttributes()); } else { return getAttributes(); } }
From source file:org.sosy_lab.cpachecker.cfa.ast.c.CIdExpressionCollectingVisitor.java
@Override public Set<CIdExpression> visit(CArrayRangeDesignator pArrayRangeDesignator) throws RuntimeException { return Sets.union(pArrayRangeDesignator.getFloorExpression().accept(this), pArrayRangeDesignator.getCeilExpression().accept(this)); }
From source file:org.jclouds.digitalocean2.compute.strategy.CreateKeyPairsThenCreateNodes.java
@Override public Map<?, ListenableFuture<Void>> execute(String group, int count, Template template, Set<NodeMetadata> goodNodes, Map<NodeMetadata, Exception> badNodes, Multimap<NodeMetadata, CustomizationResponse> customizationResponses) { DigitalOcean2TemplateOptions options = template.getOptions().as(DigitalOcean2TemplateOptions.class); Set<Integer> generatedSshKeyIds = Sets.newHashSet(); // If no key has been configured and the auto-create option is set, then generate a key pair if (options.getSshKeyIds().isEmpty() && options.getAutoCreateKeyPair() && Strings.isNullOrEmpty(options.getPublicKey())) { generateKeyPairAndAddKeyToSet(options, generatedSshKeyIds, group); }//from w w w.j ava 2s . c om // If there is a script to run in the node, make sure a private key has // been configured so jclouds will be able to access the node if (options.getRunScript() != null && Strings.isNullOrEmpty(options.getLoginPrivateKey())) { logger.warn(">> A runScript has been configured but no SSH key has been provided." + " Authentication will delegate to the ssh-agent"); } // If there is a key configured, then make sure there is a key pair for it if (!Strings.isNullOrEmpty(options.getPublicKey())) { createKeyPairForPublicKeyInOptionsAndAddToSet(options, generatedSshKeyIds); } // Set all keys (the provided and the auto-generated) in the options object so the // DigitalOceanComputeServiceAdapter adds them all options.sshKeyIds(Sets.union(generatedSshKeyIds, options.getSshKeyIds())); Map<?, ListenableFuture<Void>> responses = super.execute(group, count, template, goodNodes, badNodes, customizationResponses); // Key pairs in DigitalOcean are only required to create the Droplets. They aren't used anymore so it is better // to delete the auto-generated key pairs at this point where we know exactly which ones have been // auto-generated by jclouds. registerAutoGeneratedKeyPairCleanupCallbacks(responses, generatedSshKeyIds); return responses; }
From source file:co.mitro.core.servlets.GetPendingGroupApprovals.java
@SuppressWarnings("deprecation") @Override/*from ww w.ja v a2 s. c o m*/ protected MitroRPC processCommand(MitroRequestContext context) throws IOException, SQLException, MitroServletException { GetPendingGroupApprovalsRequest in = gson.fromJson(context.jsonRequest, GetPendingGroupApprovalsRequest.class); RPC.GetPendingGroupApprovalsResponse out = new RPC.GetPendingGroupApprovalsResponse(); out.pendingAdditionsAndModifications = Lists.newArrayList(); out.pendingDeletions = Lists.newArrayList(); out.diffs = Collections.emptyMap(); out.newOrgMembers = Collections.emptyList(); out.deletedOrgMembers = Collections.emptyList(); AuthenticatedDB userDb = AuthenticatedDB.deprecatedNew(context.manager, context.requestor); Set<DBGroup> orgs = userDb.getOrganizations(); if (orgs.isEmpty()) { logger.warn("ignoring request from user who is not a member of any organizations"); return out; } else { assert orgs.size() == 1; } final DBGroup org = orgs.iterator().next(); assert (userDb.isOrganizationAdmin(org.getId())); List<DBPendingGroup> pendingDBGroups = context.manager.pendingGroupDao.queryForEq(DBPendingGroup.OWNING_ORG, org.getId()); Map<String, PendingGroupApproval> pendingGroups = Maps.newHashMap(); String nonce = null; String scope = null; for (DBPendingGroup databasePendingGroup : pendingDBGroups) { PendingGroupApproval pendingGroupOutput = new PendingGroupApproval(); if (nonce == null) { nonce = databasePendingGroup.getSyncNonce(); } else { assert (nonce.equals(databasePendingGroup.getSyncNonce())) : "only one nonce per sync data allowed " + nonce + " " + databasePendingGroup.getSyncNonce(); } // TODO: support multiple scopes per org by merging groups from multiple scopes. if (scope == null) { scope = databasePendingGroup.getScope(); } else { assert scope.equals(databasePendingGroup.getScope()) : "non-unique scope detected: " + scope + ", " + databasePendingGroup.getScope(); } databasePendingGroup.fillPendingGroup(pendingGroupOutput); pendingGroups.put(pendingGroupOutput.groupName, pendingGroupOutput); } out.syncNonce = nonce; Map<String, DBGroup> existingGroups = Maps.newHashMap(); Map<String, GroupDiff> diffs = Maps.newHashMap(); Map<String, MemberList> groupNameToMemberListMap = Maps.newHashMap(); AddPendingGroupServlet.calculatePendingGroupDiffs(context, pendingGroups.values(), org, existingGroups, diffs, groupNameToMemberListMap, scope); for (String groupName : Sets.union(pendingGroups.keySet(), diffs.keySet())) { if (pendingGroups.containsKey(groupName)) { if (!diffs.containsKey(groupName)) { continue; } final PendingGroupApproval groupApprovalOut = pendingGroups.get(groupName); final GroupDiff gd = diffs.get(groupName); assert (!gd.groupModification.equals(GroupModificationType.IS_DELETED)); if (gd.groupModification.equals(GroupModificationType.IS_NEW)) { groupApprovalOut.matchedGroup = null; } else { final DBGroup matchedGroup = existingGroups.get(groupName); groupApprovalOut.matchedGroup = new EditGroupRequest(); GetGroup.fillEditGroupRequest(context.manager, matchedGroup, groupApprovalOut.matchedGroup, in.deviceId); } out.pendingAdditionsAndModifications.add(groupApprovalOut); } else { assert (diffs.containsKey(groupName)); final GroupDiff gd = diffs.get(groupName); assert (gd.groupModification.equals(GroupModificationType.IS_DELETED)); // otherwise it should have been handled above. EditGroupRequest groupToDelete = new EditGroupRequest(); GetGroup.fillEditGroupRequest(context.manager, existingGroups.get(gd.groupName), groupToDelete, in.deviceId); out.pendingDeletions.add(groupToDelete); } } if (!diffs.isEmpty()) { // TODO: eventually pull this out of a special ALL group that ought to be synced. Set<String> allUserNames = Sets.newHashSet(); for (MemberList ml : groupNameToMemberListMap.values()) { allUserNames.addAll(ml.memberList); } // see which users we need to add to the org. Set<Integer> orgUserIds = MutateOrganization.getMemberIdsAndPrivateGroupIdsForOrg(context.manager, org) .keySet(); Set<String> existingOrgMembers = DBIdentity.getUserNamesFromIds(context.manager, orgUserIds); out.newOrgMembers = Lists.newArrayList(Sets.difference(allUserNames, existingOrgMembers)); // TODO: if none of the groups that are synced is an ALL group, // i.e. we're syncing a subset of the org, this will suggest // deleting most org members, which is not good. We need a better // solution for this. out.deletedOrgMembers = Lists.newArrayList(Sets.difference(existingOrgMembers, allUserNames)); } else { out.deletedOrgMembers = Collections.emptyList(); out.newOrgMembers = Collections.emptyList(); } out.diffs = diffs; out.orgId = org.getId(); assert (out.diffs.size() == (out.pendingAdditionsAndModifications.size() + out.pendingDeletions.size())); return out; }
From source file:com.google.caliper.core.BenchmarkClassModel.java
/** * Returns a multimap containing the full set of parameter values to use for the benchmark. For * parameters on the benchmark that have values in the given user-supplied parameters, the user's * specified values are used. For all other parameters, the default values specified in the * annotation or implied by the type are used. * * @throws IllegalArgumentException if a parameter for the benchmark has neither user-specified * values nor default values/*from ww w . jav a 2s . c o m*/ */ public final ImmutableSetMultimap<String, String> fillInDefaultParameterValues( ImmutableSetMultimap<String, String> userParameters) { ImmutableSetMultimap.Builder<String, String> combined = ImmutableSetMultimap.builder(); // For user parameters, this'll actually be the same as parameters().keySet(), since any extras // given at the command line are treated as errors; for VM parameters this is not the case. for (String name : Sets.union(parameters().keySet(), userParameters.keySet())) { ImmutableSet<String> values = userParameters.containsKey(name) ? userParameters.get(name) : parameters().get(name); combined.putAll(name, values); checkArgument(!values.isEmpty(), "ERROR: No default value provided for %s", name); } return combined.orderKeysBy(Ordering.natural()).build(); }
From source file:cpw.mods.fml.common.modloader.ModLoaderModContainer.java
/** * *//* w w w . j av a 2 s. c om*/ private void configureMod(Class<? extends BaseModProxy> modClazz, ASMDataTable asmData) { File configDir = Loader.instance().getConfigDir(); File modConfig = new File(configDir, String.format("%s.cfg", getModId())); Properties props = new Properties(); boolean existingConfigFound = false; boolean mlPropFound = false; if (modConfig.exists()) { try { FMLLog.fine("Reading existing configuration file for %s : %s", getModId(), modConfig.getName()); FileReader configReader = new FileReader(modConfig); props.load(configReader); configReader.close(); } catch (Exception e) { FMLLog.log(Level.SEVERE, e, "Error occured reading mod configuration file %s", modConfig.getName()); throw new LoaderException(e); } existingConfigFound = true; } StringBuffer comments = new StringBuffer(); comments.append("MLProperties: name (type:default) min:max -- information\n"); List<ModProperty> mlPropFields = Lists.newArrayList(); try { for (ASMData dat : Sets.union(asmData.getAnnotationsFor(this).get("net.minecraft.src.MLProp"), asmData.getAnnotationsFor(this).get("MLProp"))) { if (dat.getClassName().equals(modClazzName)) { try { mlPropFields.add(new ModProperty(modClazz.getDeclaredField(dat.getObjectName()), dat.getAnnotationInfo())); FMLLog.finest("Found an MLProp field %s in %s", dat.getObjectName(), getModId()); } catch (Exception e) { FMLLog.log(Level.WARNING, e, "An error occured trying to access field %s in mod %s", dat.getObjectName(), getModId()); } } } for (ModProperty property : mlPropFields) { if (!Modifier.isStatic(property.field().getModifiers())) { FMLLog.info("The MLProp field %s in mod %s appears not to be static", property.field().getName(), getModId()); continue; } FMLLog.finest("Considering MLProp field %s", property.field().getName()); Field f = property.field(); String propertyName = !Strings.nullToEmpty(property.name()).isEmpty() ? property.name() : f.getName(); String propertyValue = null; Object defaultValue = null; try { defaultValue = f.get(null); propertyValue = props.getProperty(propertyName, extractValue(defaultValue)); Object currentValue = parseValue(propertyValue, property, f.getType(), propertyName); FMLLog.finest( "Configuration for %s.%s found values default: %s, configured: %s, interpreted: %s", modClazzName, propertyName, defaultValue, propertyValue, currentValue); if (currentValue != null && !currentValue.equals(defaultValue)) { FMLLog.finest("Configuration for %s.%s value set to: %s", modClazzName, propertyName, currentValue); f.set(null, currentValue); } } catch (Exception e) { FMLLog.log(Level.SEVERE, e, "Invalid configuration found for %s in %s", propertyName, modConfig.getName()); throw new LoaderException(e); } finally { comments.append( String.format("MLProp : %s (%s:%s", propertyName, f.getType().getName(), defaultValue)); if (property.min() != Double.MIN_VALUE) { comments.append(",>=").append(String.format("%.1f", property.min())); } if (property.max() != Double.MAX_VALUE) { comments.append(",<=").append(String.format("%.1f", property.max())); } comments.append(")"); if (!Strings.nullToEmpty(property.info()).isEmpty()) { comments.append(" -- ").append(property.info()); } if (propertyValue != null) { props.setProperty(propertyName, extractValue(propertyValue)); } comments.append("\n"); } mlPropFound = true; } } finally { if (!mlPropFound && !existingConfigFound) { FMLLog.fine("No MLProp configuration for %s found or required. No file written", getModId()); return; } if (!mlPropFound && existingConfigFound) { File mlPropBackup = new File(modConfig.getParent(), modConfig.getName() + ".bak"); FMLLog.fine( "MLProp configuration file for %s found but not required. Attempting to rename file to %s", getModId(), mlPropBackup.getName()); boolean renamed = modConfig.renameTo(mlPropBackup); if (renamed) { FMLLog.fine("Unused MLProp configuration file for %s renamed successfully to %s", getModId(), mlPropBackup.getName()); } else { FMLLog.fine("Unused MLProp configuration file for %s renamed UNSUCCESSFULLY to %s", getModId(), mlPropBackup.getName()); } return; } try { FileWriter configWriter = new FileWriter(modConfig); props.store(configWriter, comments.toString()); configWriter.close(); FMLLog.fine("Configuration for %s written to %s", getModId(), modConfig.getName()); } catch (IOException e) { FMLLog.log(Level.SEVERE, e, "Error trying to write the config file %s", modConfig.getName()); throw new LoaderException(e); } } }