List of usage examples for com.google.common.collect Sets union
public static <E> SetView<E> union(final Set<? extends E> set1, final Set<? extends E> set2)
From source file:com.google.caja.ancillary.linter.Linter.java
/** * @param ac the node to check.// w ww. ja va 2 s .co m * @param mq receives messages about violations of canRead and canSet. */ private static void lint(AncestorChain<?> ac, final Environment env, Set<String> provides, final Set<String> requires, final Set<String> overrides, MessageQueue mq) { ScopeAnalyzer sa = new ScopeAnalyzer() { @Override protected boolean introducesScope(AncestorChain<?> ac) { if (super.introducesScope(ac)) { return true; } return isLoopy(ac); } @Override protected void initScope(LexicalScope scope) { super.initScope(scope); if (scope.isFunctionScope()) { FunctionConstructor fc = scope.root.cast(FunctionConstructor.class).node; // Simulate JScript quirks around named functions if (fc.getIdentifierName() != null && scope.root.parent != null && !(scope.root.parent.node instanceof FunctionDeclaration)) { LexicalScope containing = scope.parent; while (containing.parent != null && (hoist(scope.root, containing) || isLoopy(containing.root))) { containing = containing.parent; } containing.symbols.declare(fc.getIdentifierName(), scope.root); } } else if (scope.isGlobal()) { for (String symbolName : Sets.union(env.outers, Sets.union(requires, overrides))) { if (scope.symbols.getSymbol(symbolName) == null) { scope.symbols.declare(symbolName, scope.root); } } } } boolean isLoopy(AncestorChain<?> ac) { ParseTreeNode node = ac.node; return node instanceof ForEachLoop || node instanceof Loop; } }; List<LexicalScope> scopes = sa.computeLexicalScopes(ac); LexicalScope globalScope = scopes.get(0); VariableLiveness.LiveCalc lc = VariableLiveness.calculateLiveness(ac.node); NodeBuckets buckets = NodeBuckets.maker().with(ExpressionStmt.class).with(LabeledStatement.class) .with(StringLiteral.class).under(globalScope.root); checkDeclarations(scopes, overrides, mq); checkLabels(lc, buckets, mq); checkUses(scopes, lc.vars, provides, requires, overrides, mq); checkSideEffects(buckets, mq); checkDeadCode(buckets, mq); checkStringsEmbeddable((Block) ac.node, buckets, mq); checkBareWords(globalScope.root.node, mq); }
From source file:org.graylog2.security.realm.LdapUserAuthenticator.java
private void updateFromLdap(User user, LdapEntry userEntry, LdapSettings ldapSettings, String username) { final String displayNameAttribute = ldapSettings.getDisplayNameAttribute(); final String fullName = firstNonNull(userEntry.get(displayNameAttribute), username); user.setName(username);/*from w w w .jav a 2s .c om*/ user.setFullName(fullName); user.setExternal(true); if (user.getTimeZone() == null) { user.setTimeZone(rootTimeZone); } final String email = userEntry.getEmail(); if (isNullOrEmpty(email)) { LOG.debug("No email address found for user {} in LDAP. Using {}@localhost", username, username); user.setEmail(username + "@localhost"); } else { user.setEmail(email); } // TODO This is a crude hack until we have a proper way to distinguish LDAP users from normal users if (isNullOrEmpty(user.getHashedPassword())) { ((UserImpl) user).setHashedPassword("User synced from LDAP."); } // map ldap groups to user roles, if the mapping is present final Set<String> translatedRoleIds = Sets.newHashSet(Sets.union( Sets.newHashSet(ldapSettings.getDefaultGroupId()), ldapSettings.getAdditionalDefaultGroupIds())); if (!userEntry.getGroups().isEmpty()) { // ldap search returned groups, these always override the ones set on the user try { final Map<String, Role> roleNameToRole = roleService.loadAllLowercaseNameMap(); for (String ldapGroupName : userEntry.getGroups()) { final String roleName = ldapSettings.getGroupMapping().get(ldapGroupName); if (roleName == null) { LOG.debug("User {}: No group mapping for ldap group <{}>", username, ldapGroupName); continue; } final Role role = roleNameToRole.get(roleName.toLowerCase(Locale.ENGLISH)); if (role != null) { LOG.debug("User {}: Mapping ldap group <{}> to role <{}>", username, ldapGroupName, role.getName()); translatedRoleIds.add(role.getId()); } else { LOG.warn("User {}: No role found for ldap group <{}>", username, ldapGroupName); } } } catch (NotFoundException e) { LOG.error("Unable to load user roles", e); } } else if (ldapSettings.getGroupMapping().isEmpty() || ldapSettings.getGroupSearchBase().isEmpty() || ldapSettings.getGroupSearchPattern().isEmpty() || ldapSettings.getGroupIdAttribute().isEmpty()) { // no group mapping or configuration set, we'll leave the previously set groups alone on sync // when first creating the user these will be empty translatedRoleIds.addAll(user.getRoleIds()); } user.setRoleIds(translatedRoleIds); // preserve the raw permissions (the ones without the synthetic self-edit permissions or the "*" admin one) user.setPermissions(user.getPermissions()); }
From source file:com.opengamma.strata.measure.fra.FraMeasureCalculations.java
private CurrencyParameterSensitivities pv01SemiParallelGammaBucketed(ResolvedFraTrade trade, RatesMarketData marketData) {//w w w . j a va 2s. com // find the curve identifiers and resolve to a single curve Currency currency = trade.getProduct().getCurrency(); Set<IborIndex> indices = trade.getProduct().allIndices(); ImmutableSet<MarketDataId<?>> discountIds = marketData.getLookup().getDiscountMarketDataIds(currency); ImmutableSet<MarketDataId<?>> forwardIds = indices.stream() .flatMap(idx -> marketData.getLookup().getForwardMarketDataIds(idx).stream()) .collect(toImmutableSet()); Set<MarketDataId<?>> allIds = Sets.union(discountIds, forwardIds); if (allIds.size() != 1) { throw new IllegalArgumentException(Messages.format( "Implementation only supports a single curve, but lookup refers to more than one: {}", allIds)); } MarketDataId<?> singleId = allIds.iterator().next(); if (!(singleId instanceof CurveId)) { throw new IllegalArgumentException(Messages.format( "Implementation only supports a single curve, but lookup does not refer to a curve: {} {}", singleId.getClass().getName(), singleId)); } CurveId curveId = (CurveId) singleId; Curve curve = marketData.getMarketData().getValue(curveId); // calculate gamma CurrencyParameterSensitivity gamma = CurveGammaCalculator.DEFAULT.calculateSemiParallelGamma(curve, currency, c -> calculateCurveSensitivity(trade, marketData, curveId, c)); return CurrencyParameterSensitivities.of(gamma).multipliedBy(ONE_BASIS_POINT * ONE_BASIS_POINT); }
From source file:com.spotify.apollo.core.ServiceImpl.java
Set<ApolloModule> discoverAllModules() { final Set<ApolloModule> allModules; if (moduleDiscovery) { allModules = Sets.union(modules, ImmutableSet.copyOf(ServiceLoader.load(ApolloModule.class))); } else {//www .j a v a 2 s . c om allModules = modules; } return allModules; }
From source file:org.jboss.weld.introspector.jlr.WeldClassImpl.java
protected WeldClassImpl(Class<T> rawType, Type type, AnnotatedType<T> annotatedType, LazyValueHolder<Set<Type>> typeClosure, Map<Class<? extends Annotation>, Annotation> annotationMap, Map<Class<? extends Annotation>, Annotation> declaredAnnotationMap, ClassTransformer classTransformer) { super(annotationMap, declaredAnnotationMap, classTransformer, rawType, type, typeClosure); boolean modified; if (annotatedType instanceof DiscoveredExternalAnnotatedType) { discovered = true;/*from w w w . jav a 2s . co m*/ modified = DiscoveredExternalAnnotatedType.class.cast(annotatedType).isModifed(); } else if (annotatedType instanceof ExternalAnnotatedType) { discovered = false; modified = false; } else { discovered = true; modified = false; } if (modified) { this.superclass = classTransformer.loadClass(Object.class); } else if (rawType.getSuperclass() != null) { this.superclass = classTransformer.loadClass(rawType.getSuperclass()); } else { this.superclass = null; } // Assign class field information this.declaredAnnotatedFields = ArrayListMultimap .<Class<? extends Annotation>, WeldField<?, ? super T>>create(); this.declaredMetaAnnotatedFields = ArrayListMultimap.<Class<? extends Annotation>, WeldField<?, ?>>create(); Set<WeldField<?, ?>> fieldsTemp = null; ArrayList<WeldField<?, ?>> declaredFieldsTemp = new ArrayList<WeldField<?, ?>>(); if (annotatedType == null) { this.annotatedFields = null; if (rawType != Object.class) { for (Field field : SecureReflections.getDeclaredFields(rawType)) { WeldField<?, T> annotatedField = WeldFieldImpl.of(field, this.<T>getDeclaringWeldClass(field, classTransformer), classTransformer); declaredFieldsTemp.add(annotatedField); for (Annotation annotation : annotatedField.getAnnotations()) { this.declaredAnnotatedFields.put(annotation.annotationType(), annotatedField); for (Annotation metaAnnotation : annotation.annotationType().getAnnotations()) { this.declaredMetaAnnotatedFields.put(metaAnnotation.annotationType(), annotatedField); } } } fieldsTemp = new ArraySet<WeldField<?, ?>>(declaredFieldsTemp).trimToSize(); if ((superclass != null) && (superclass.getJavaClass() != Object.class)) { fieldsTemp = Sets.union(fieldsTemp, Reflections.<Set<WeldField<?, ?>>>cast(superclass.getFields())); } } this.declaredFields = new ArraySet<WeldField<?, ?>>(declaredFieldsTemp); } else { this.annotatedFields = ArrayListMultimap.<Class<? extends Annotation>, WeldField<?, ?>>create(); fieldsTemp = new HashSet<WeldField<?, ?>>(); for (AnnotatedField<? super T> annotatedField : annotatedType.getFields()) { WeldField<?, ? super T> weldField = WeldFieldImpl.of(annotatedField, this, classTransformer); fieldsTemp.add(weldField); if (annotatedField.getDeclaringType().getJavaClass() == rawType) { declaredFieldsTemp.add(weldField); } for (Annotation annotation : weldField.getAnnotations()) { this.annotatedFields.put(annotation.annotationType(), weldField); if (annotatedField.getDeclaringType().getJavaClass() == rawType) { this.declaredAnnotatedFields.put(annotation.annotationType(), weldField); for (Annotation metaAnnotation : annotation.annotationType().getAnnotations()) { this.declaredMetaAnnotatedFields.put(metaAnnotation.annotationType(), weldField); } } } } this.declaredFields = new ArraySet<WeldField<?, ?>>(declaredFieldsTemp); fieldsTemp = new ArraySet<WeldField<?, ?>>(fieldsTemp).trimToSize(); this.annotatedFields.trimToSize(); } this.fields = fieldsTemp; this.declaredFields.trimToSize(); this.declaredAnnotatedFields.trimToSize(); this.declaredMetaAnnotatedFields.trimToSize(); // Assign constructor information this.constructors = new ArraySet<WeldConstructor<T>>(); this.declaredConstructorsBySignature = new HashMap<ConstructorSignature, WeldConstructor<?>>(); if (annotatedType == null) { for (Constructor<?> constructor : SecureReflections.getDeclaredConstructors(rawType)) { Constructor<T> c = Reflections.cast(constructor); WeldConstructor<T> annotatedConstructor = WeldConstructorImpl.of(c, this.<T>getDeclaringWeldClass(c, classTransformer), classTransformer); this.constructors.add(annotatedConstructor); this.declaredConstructorsBySignature.put(annotatedConstructor.getSignature(), annotatedConstructor); } } else { for (AnnotatedConstructor<T> constructor : annotatedType.getConstructors()) { WeldConstructor<T> weldConstructor = WeldConstructorImpl.of(constructor, this, classTransformer); this.constructors.add(weldConstructor); List<Class<?>> parameterTypes = new ArrayList<Class<?>>(); for (AnnotatedParameter<T> parameter : constructor.getParameters()) { parameterTypes.add(Reflections.getRawType(parameter.getBaseType())); } this.declaredConstructorsBySignature.put(weldConstructor.getSignature(), weldConstructor); } } this.constructors.trimToSize(); // Assign method information this.declaredAnnotatedMethods = ArrayListMultimap .<Class<? extends Annotation>, WeldMethod<?, ? super T>>create(); this.declaredMethodsByAnnotatedParameters = ArrayListMultimap .<Class<? extends Annotation>, WeldMethod<?, ? super T>>create(); Set<WeldMethod<?, ? super T>> methodsTemp = null; ArrayList<WeldMethod<?, ? super T>> declaredMethodsTemp = new ArrayList<WeldMethod<?, ? super T>>(); if (annotatedType == null) { this.annotatedMethods = null; if (rawType != Object.class) { for (Method method : SecureReflections.getDeclaredMethods(rawType)) { WeldMethod<?, T> weldMethod = WeldMethodImpl.of(method, this.<T>getDeclaringWeldClass(method, classTransformer), classTransformer); declaredMethodsTemp.add(weldMethod); for (Annotation annotation : weldMethod.getAnnotations()) { this.declaredAnnotatedMethods.put(annotation.annotationType(), weldMethod); } for (Class<? extends Annotation> annotationType : WeldMethod.MAPPED_PARAMETER_ANNOTATIONS) { if (weldMethod.getWeldParameters(annotationType).size() > 0) { this.declaredMethodsByAnnotatedParameters.put(annotationType, weldMethod); } } } methodsTemp = new ArraySet<WeldMethod<?, ? super T>>(declaredMethodsTemp).trimToSize(); if (superclass != null) { WeldClass<?> current = superclass; while (current.getJavaClass() != Object.class) { Set<WeldMethod<?, ? super T>> superClassMethods = Reflections .cast(current.getDeclaredWeldMethods()); methodsTemp = Sets.union(methodsTemp, superClassMethods); current = current.getWeldSuperclass(); } } } this.declaredMethods = new ArraySet<WeldMethod<?, ? super T>>(declaredMethodsTemp); } else { this.annotatedMethods = ArrayListMultimap.<Class<? extends Annotation>, WeldMethod<?, ?>>create(); methodsTemp = new HashSet<WeldMethod<?, ? super T>>(); for (AnnotatedMethod<? super T> method : annotatedType.getMethods()) { WeldMethod<?, ? super T> weldMethod = WeldMethodImpl.of(method, this, classTransformer); methodsTemp.add(weldMethod); if (method.getDeclaringType().getJavaClass() == rawType) { declaredMethodsTemp.add(weldMethod); } for (Annotation annotation : weldMethod.getAnnotations()) { annotatedMethods.put(annotation.annotationType(), weldMethod); if (method.getDeclaringType().getJavaClass() == rawType) { this.declaredAnnotatedMethods.put(annotation.annotationType(), weldMethod); } } for (Class<? extends Annotation> annotationType : WeldMethod.MAPPED_PARAMETER_ANNOTATIONS) { if (weldMethod.getWeldParameters(annotationType).size() > 0) { if (method.getDeclaringType().getJavaClass() == rawType) { this.declaredMethodsByAnnotatedParameters.put(annotationType, weldMethod); } } } } this.declaredMethods = new ArraySet<WeldMethod<?, ? super T>>(declaredMethodsTemp); methodsTemp = new ArraySet<WeldMethod<?, ? super T>>(methodsTemp).trimToSize(); this.annotatedMethods.trimToSize(); } this.methods = methodsTemp; this.declaredMethods.trimToSize(); this.declaredAnnotatedMethods.trimToSize(); this.declaredMethodsByAnnotatedParameters.trimToSize(); ArraySetMultimap<Class<? extends Annotation>, Annotation> declaredMetaAnnotationMap = new ArraySetMultimap<Class<? extends Annotation>, Annotation>(); for (Annotation declaredAnnotation : declaredAnnotationMap.values()) { addMetaAnnotations(declaredMetaAnnotationMap, declaredAnnotation, declaredAnnotation.annotationType().getAnnotations(), true); addMetaAnnotations(declaredMetaAnnotationMap, declaredAnnotation, classTransformer.getTypeStore().get(declaredAnnotation.annotationType()), true); declaredMetaAnnotationMap.putSingleElement(declaredAnnotation.annotationType(), declaredAnnotation); } declaredMetaAnnotationMap.trimToSize(); this.declaredMetaAnnotationMap = SharedObjectFacade.wrap(declaredMetaAnnotationMap); }
From source file:org.diqube.im.IdentityHandler.java
@Override public void logout(Ticket ticket) throws TException, AuthorizationException { if (!ticketSignatureService .isValidTicketSignature(TicketUtil.deserialize(ByteBuffer.wrap(TicketUtil.serialize(ticket))))) { // filter out tickets with invalid signature, since we do not want to let users flood the consensus cluster with // requests. logger.info("Someone tried to logout with an invalid ticket. Username provided in ticket is '{}'", ticket.getClaim().getUsername()); throw new AuthorizationException("Ticket signaure invalid."); }//from w w w .j av a2 s . co m logger.info("Logging out user '{}', ticket valid until {}", ticket.getClaim().getUsername(), ticket.getClaim().getValidUntil()); ticketValidityService.markTicketAsInvalid(TicketInfoUtil.fromTicket(ticket)); // quickly (but unreliably) distribute the logout to all known cluster nodes and all interested callbacks. for (RNodeAddress addr : Sets.union(clusterLayout.getNodesInsecure().stream() .map(addr -> addr.createRemote()).collect(Collectors.toSet()), callbackRegistry.getRegisteredNodesInsecure())) { if (addr.equals(ourNodeAddressProvider.getOurNodeAddress())) continue; try (Connection<IdentityCallbackService.Iface> con = connectionPool .reserveConnection(IdentityCallbackService.Iface.class, addr, null)) { con.getService().ticketBecameInvalid(TicketInfoUtil.fromTicket(ticket)); } catch (ConnectionException | IOException e) { // swallow, as we distribute the information reliably using the state machine below. } catch (InterruptedException e) { logger.warn("Interrupted while distributing logout information.", e); return; } } // then: distribute logout reliably (but probably slower) across the consensus cluster. This will again ensure that // all registered callbacks are called accordingly. try (ClosableProvider<LogoutStateMachine> p = consensusClient .getStateMachineClient(LogoutStateMachine.class)) { p.getClient().logout(Logout.local(ticket)); } catch (ConsensusClusterUnavailableException e) { throw new RuntimeException("Consensus cluster unavailable", e); } logger.info("Logout of user '{}', ticket {}, valid until {} successful.", ticket.getClaim().getUsername(), RUuidUtil.toUuid(ticket.getClaim().getTicketId()), ticket.getClaim().getValidUntil()); }
From source file:org.apache.beam.runners.dataflow.worker.graph.CreateRegisterFnOperationFunction.java
@Override public MutableNetwork<Node, Edge> apply(MutableNetwork<Node, Edge> network) { // Record all SDK nodes, and all root nodes. Set<Node> runnerRootNodes = new HashSet<>(); Set<Node> sdkNodes = new HashSet<>(); Set<Node> sdkRootNodes = new HashSet<>(); for (ParallelInstructionNode node : Iterables.filter(network.nodes(), ParallelInstructionNode.class)) { if (executesInSdkHarness(node)) { sdkNodes.add(node);// www . ja v a2s.co m if (network.inDegree(node) == 0) { sdkRootNodes.add(node); } } else if (network.inDegree(node) == 0) { runnerRootNodes.add(node); } } // If nothing executes within the SDK harness, return the original network. if (sdkNodes.isEmpty()) { return network; } // Represents the set of nodes which represent gRPC boundaries from the Runner to the SDK. Set<Node> runnerToSdkBoundaries = new HashSet<>(); // Represents the set of nodes which represent gRPC boundaries from the SDK to the Runner. Set<Node> sdkToRunnerBoundaries = new HashSet<>(); ImmutableNetwork<Node, Edge> originalNetwork = ImmutableNetwork.copyOf(network); // Update the network with outputs which are meant to bridge the instructions // that execute in different harnesses. One output per direction of information // flow from runner to SDK and SDK to runner per original output node. for (InstructionOutputNode outputNode : Iterables.filter(originalNetwork.nodes(), InstructionOutputNode.class)) { // Categorize all predecessor instructions Set<Node> predecessorRunnerInstructions = new HashSet<>(); Set<Node> predecessorSdkInstructions = new HashSet<>(); for (Node predecessorInstruction : originalNetwork.predecessors(outputNode)) { if (sdkNodes.contains(predecessorInstruction)) { predecessorSdkInstructions.add(predecessorInstruction); } else { predecessorRunnerInstructions.add(predecessorInstruction); } } // Categorize all successor instructions Set<Node> successorRunnerInstructions = new HashSet<>(); Set<Node> successorSdkInstructions = new HashSet<>(); for (Node successorInstruction : originalNetwork.successors(outputNode)) { if (sdkNodes.contains(successorInstruction)) { successorSdkInstructions.add(successorInstruction); } else { successorRunnerInstructions.add(successorInstruction); } } // If there is data that will be flowing from the Runner to the SDK, rewire network to have // nodes connected across a gRPC node. Also add the gRPC node as an SDK root. if (!predecessorRunnerInstructions.isEmpty() && !successorSdkInstructions.isEmpty()) { runnerToSdkBoundaries.add(rewireAcrossSdkRunnerPortNode(network, outputNode, predecessorRunnerInstructions, successorSdkInstructions)); } // If there is data that will be flowing from the SDK to the Runner, rewire network to have // nodes connected across a gRPC node. if (!predecessorSdkInstructions.isEmpty() && !successorRunnerInstructions.isEmpty()) { sdkToRunnerBoundaries.add(rewireAcrossSdkRunnerPortNode(network, outputNode, predecessorSdkInstructions, successorRunnerInstructions)); } // Remove original output node if it was rewired because it will have become disconnected // through the new output node. if (network.inDegree(outputNode) == 0) { network.removeNode(outputNode); } } // Create the subnetworks that represent potentially multiple fused SDK portions and a single // fused Runner portion replacing the SDK portion that is embedded within the Runner portion // with a RegisterFnOperation, adding edges to maintain proper happens before relationships. Set<Node> allRunnerNodes = Networks.reachableNodes(network, Sets.union(runnerRootNodes, sdkToRunnerBoundaries), runnerToSdkBoundaries); if (this.useExecutableStageBundleExecution) { // When using shared library, there is no grpc node in runner graph. allRunnerNodes = Sets.difference(allRunnerNodes, Sets.union(runnerToSdkBoundaries, sdkToRunnerBoundaries)); } MutableNetwork<Node, Edge> runnerNetwork = Graphs.inducedSubgraph(network, allRunnerNodes); // TODO: Reduce the amount of 'copying' of SDK nodes by breaking potential cycles // between the SDK networks and the Runner network. Cycles can occur because entire // SDK subnetworks are replaced by a singular node within the Runner network. // khines@ suggested to look at go/priority-based-fusion for an algorithm based upon // using poison paths. for (Node sdkRoot : Sets.union(sdkRootNodes, runnerToSdkBoundaries)) { Set<Node> sdkSubnetworkNodes = Networks.reachableNodes(network, ImmutableSet.of(sdkRoot), sdkToRunnerBoundaries); MutableNetwork<Node, Edge> sdkNetwork = Graphs.inducedSubgraph(network, sdkSubnetworkNodes); Node registerFnNode = registerFnOperationFunction.apply(sdkNetwork); runnerNetwork.addNode(registerFnNode); // Create happens before relationships between all Runner and SDK nodes which are in the // SDK subnetwork; direction dependent on whether its a predecessor of the SDK subnetwork or // a successor. if (this.useExecutableStageBundleExecution) { // When using shared library, there is no gprc node in runner graph. Then the registerFnNode // should be linked directly to 2 OutputInstruction nodes. for (Node predecessor : Sets.intersection(sdkSubnetworkNodes, runnerToSdkBoundaries)) { predecessor = network.predecessors(predecessor).iterator().next(); runnerNetwork.addEdge(predecessor, registerFnNode, HappensBeforeEdge.create()); } for (Node successor : Sets.intersection(sdkSubnetworkNodes, sdkToRunnerBoundaries)) { successor = network.successors(successor).iterator().next(); runnerNetwork.addEdge(registerFnNode, successor, HappensBeforeEdge.create()); } } else { for (Node predecessor : Sets.intersection(sdkSubnetworkNodes, runnerToSdkBoundaries)) { runnerNetwork.addEdge(predecessor, registerFnNode, HappensBeforeEdge.create()); } for (Node successor : Sets.intersection(sdkSubnetworkNodes, sdkToRunnerBoundaries)) { runnerNetwork.addEdge(registerFnNode, successor, HappensBeforeEdge.create()); } } } return runnerNetwork; }
From source file:org.apache.beam.runners.core.construction.graph.GreedyPipelineFuser.java
/** * Fuses a {@link Pipeline} into a collection of {@link ExecutableStage}. * * <p>The input is the initial collection of siblings sets which will be fused into {@link * ExecutableStage stages}. A sibling in this context represents a pair of (PCollection, * PTransform), where the PTransform consumes input elements on a per-element basis from the * PCollection, represented by a {@link CollectionConsumer}. A sibling set is a collection of * siblings which can execute within a single {@link ExecutableStage}, determined by {@link * GreedyPCollectionFusers#isCompatible(PTransformNode, PTransformNode, QueryablePipeline)}. * * <p>While a pending sibling set exists: * * <ul>//from www . jav a2 s .com * <li>Retrieve a pending sibling set from the front of the queue. * <li>If the pending sibling set has already been created, continue. Each materialized {@link * PTransformNode} can be consumed by any number of {@link ExecutableStage stages}, but each * {@link PTransformNode} may only be present in a single stage rooted at a single {@link * PCollectionNode}, otherwise it will process elements of that {@link PCollectionNode} * multiple times. * <li>Create a {@link GreedyStageFuser} with those siblings as the initial consuming transforms * of the stage * <li>For each materialized {@link PCollectionNode}, find all of the descendant in-environment * consumers. See {@link #getDescendantConsumers(PCollectionNode)} for details. * <li>Construct all of the sibling sets from the descendant in-environment consumers, and add * them to the queue of sibling sets. * </ul> */ private FusedPipeline fusePipeline(Collection<PTransformNode> initialUnfusedTransforms, NavigableSet<NavigableSet<CollectionConsumer>> initialConsumers) { Map<CollectionConsumer, ExecutableStage> consumedCollectionsAndTransforms = new HashMap<>(); Set<ExecutableStage> stages = new LinkedHashSet<>(); Set<PTransformNode> unfusedTransforms = new LinkedHashSet<>(initialUnfusedTransforms); Queue<Set<CollectionConsumer>> pendingSiblingSets = new ArrayDeque<>(initialConsumers); while (!pendingSiblingSets.isEmpty()) { // Only introduce new PCollection consumers. Not performing this introduces potential // duplicate paths through the pipeline. Set<CollectionConsumer> candidateSiblings = pendingSiblingSets.poll(); Set<CollectionConsumer> siblingSet = Sets.difference(candidateSiblings, consumedCollectionsAndTransforms.keySet()); checkState(siblingSet.equals(candidateSiblings) || siblingSet.isEmpty(), "Inconsistent collection of siblings reported for a %s. Initial attempt missed %s", PCollectionNode.class.getSimpleName(), siblingSet); if (siblingSet.isEmpty()) { LOG.debug("Filtered out duplicate stage root {}", candidateSiblings); continue; } // Create the stage with these siblings as the initial consuming transforms ExecutableStage stage = fuseSiblings(siblingSet); // Mark each of the root transforms of the stage as consuming the input PCollection, so we // don't place them in multiple stages. for (CollectionConsumer sibling : siblingSet) { consumedCollectionsAndTransforms.put(sibling, stage); } stages.add(stage); for (PCollectionNode materializedOutput : stage.getOutputPCollections()) { // Get all of the descendant consumers of each materialized PCollection, and add them to the // queue of pending siblings. DescendantConsumers descendantConsumers = getDescendantConsumers(materializedOutput); unfusedTransforms.addAll(descendantConsumers.getUnfusedNodes()); NavigableSet<NavigableSet<CollectionConsumer>> siblings = groupSiblings( descendantConsumers.getFusibleConsumers()); pendingSiblingSets.addAll(siblings); } } // TODO: Figure out where to store this. DeduplicationResult deduplicated = OutputDeduplicator.ensureSingleProducer(pipeline, stages, unfusedTransforms); // TODO: Stages can be fused with each other, if doing so does not introduce duplicate paths // for an element to take through the Pipeline. Compatible siblings can generally be fused, // as can compatible producers/consumers if a PCollection is only materialized once. return FusedPipeline.of(deduplicated.getDeduplicatedComponents(), stages.stream().map(stage -> deduplicated.getDeduplicatedStages().getOrDefault(stage, stage)).map( GreedyPipelineFuser::sanitizeDanglingPTransformInputs).collect(Collectors.toSet()), Sets.union(deduplicated.getIntroducedTransforms(), unfusedTransforms.stream().map(transform -> deduplicated.getDeduplicatedTransforms() .getOrDefault(transform.getId(), transform)).collect(Collectors.toSet()))); }
From source file:com.facebook.buck.haskell.HaskellLibraryDescription.java
private HaskellPackageRule requirePackage(BuildTarget baseTarget, BuildRuleParams baseParams, BuildRuleResolver resolver, SourcePathResolver pathResolver, SourcePathRuleFinder ruleFinder, CxxPlatform cxxPlatform, Arg args, Linker.LinkableDepType depType) throws NoSuchBuildTargetException { Preconditions.checkArgument(Sets/*from w ww .j a v a2 s.co m*/ .intersection(baseTarget.getFlavors(), Sets.union(Type.FLAVOR_VALUES, cxxPlatforms.getFlavors())) .isEmpty()); BuildTarget target = baseTarget.withAppendedFlavors(cxxPlatform.getFlavor()); switch (depType) { case SHARED: target = target.withAppendedFlavors(Type.PACKAGE_SHARED.getFlavor()); break; case STATIC: target = target.withAppendedFlavors(Type.PACKAGE_STATIC.getFlavor()); break; case STATIC_PIC: target = target.withAppendedFlavors(Type.PACKAGE_STATIC_PIC.getFlavor()); break; default: throw new IllegalStateException(); } Optional<HaskellPackageRule> packageRule = resolver.getRuleOptionalWithType(target, HaskellPackageRule.class); if (packageRule.isPresent()) { return packageRule.get(); } return resolver.addToIndex( createPackage(target, baseParams, resolver, pathResolver, ruleFinder, cxxPlatform, args, depType)); }
From source file:org.opendaylight.groupbasedpolicy.neutron.mapper.mapping.rule.NeutronSecurityRuleAware.java
@VisibleForTesting
Set<NeutronSecurityRule> getProvidedSecRulesBetween(EndpointGroupId provEpgId, EndpointGroupId consEpgId) {
return Sets.union(secRuleDao.getSecRulesBySecGrpIdAndRemoteSecGrpId(provEpgId, consEpgId),
secRuleDao.getSecRulesWithoutRemoteSecGrpBySecGrpId(provEpgId));
}