List of usage examples for com.google.common.collect Sets intersection
public static <E> SetView<E> intersection(final Set<E> set1, final Set<?> set2)
From source file:com.olacabs.fabric.compute.builder.impl.JarScanner.java
private List<ScanResult> scanForSources(ClassLoader classLoader, URL[] downloadedUrls) throws Exception { Reflections reflections = new Reflections(new ConfigurationBuilder().addClassLoader(classLoader) .addScanners(new SubTypesScanner(), new TypeAnnotationsScanner()).addUrls(downloadedUrls)); Set<Class<?>> sources = Sets.intersection(reflections.getTypesAnnotatedWith(Source.class), reflections.getSubTypesOf(PipelineSource.class)); return sources.stream().map(source -> { Source sourceInfo = source.getAnnotation(Source.class); ComponentMetadata metadata = ComponentMetadata.builder().type(ComponentType.SOURCE) .namespace(sourceInfo.namespace()).name(sourceInfo.name()).version(sourceInfo.version()) .description(sourceInfo.description()).cpu(sourceInfo.cpu()).memory(sourceInfo.memory()) .requiredProperties(ImmutableList.copyOf(sourceInfo.requiredProperties())) .optionalProperties(ImmutableList.copyOf(sourceInfo.optionalProperties())).build(); return ScanResult.builder().metadata(metadata).componentClass(source).build(); }).collect(Collectors.toCollection(ArrayList::new)); }
From source file:io.druid.indexing.common.task.IndexTask.java
@Override public TaskStatus run(TaskToolbox toolbox) throws Exception { final GranularitySpec granularitySpec = ingestionSchema.getDataSchema().getGranularitySpec(); final int targetPartitionSize = ingestionSchema.getTuningConfig().getTargetPartitionSize(); final TaskLock myLock = Iterables.getOnlyElement(getTaskLocks(toolbox)); final Set<DataSegment> segments = Sets.newHashSet(); final Set<Interval> validIntervals = Sets.intersection(granularitySpec.bucketIntervals().get(), getDataIntervals());/* ww w . j av a2 s. co m*/ if (validIntervals.isEmpty()) { throw new ISE("No valid data intervals found. Check your configs!"); } for (final Interval bucket : validIntervals) { final List<ShardSpec> shardSpecs; if (targetPartitionSize > 0) { shardSpecs = determinePartitions(bucket, targetPartitionSize, granularitySpec.getQueryGranularity()); } else { int numShards = ingestionSchema.getTuningConfig().getNumShards(); if (numShards > 0) { shardSpecs = Lists.newArrayList(); for (int i = 0; i < numShards; i++) { shardSpecs.add(new HashBasedNumberedShardSpec(i, numShards, jsonMapper)); } } else { shardSpecs = ImmutableList.<ShardSpec>of(new NoneShardSpec()); } } for (final ShardSpec shardSpec : shardSpecs) { final DataSegment segment = generateSegment(toolbox, ingestionSchema.getDataSchema(), shardSpec, bucket, myLock.getVersion()); segments.add(segment); } } toolbox.pushSegments(segments); return TaskStatus.success(getId()); }
From source file:com.cloudera.gertrude.space.ExperimentSpaceBuilder.java
boolean areLayersOverlapping(int firstLayerId, int secondLayerId) { return Sets.intersection(getLineage(firstLayerId), getLineage(secondLayerId)).isEmpty(); }
From source file:ai.grakn.graql.internal.hal.HALUtils.java
private static boolean bothRolePlayersAreSelectedNoReasoner(VarPatternAdmin var, MatchQuery matchQuery) { Set<Var> rolePlayersInVar = var.getProperty(RelationProperty.class).get().getRelationPlayers() .map(x -> x.getRolePlayer().getVarName()).collect(Collectors.toSet()); Set<Var> selectedVars = matchQuery.admin().getSelectedNames(); //If all the role players contained in the current relation are also selected in the user query return Sets.intersection(rolePlayersInVar, selectedVars).equals(rolePlayersInVar); }
From source file:com.facebook.buck.haskell.HaskellLibraryDescription.java
private Archive requireStaticLibrary(BuildTarget baseTarget, BuildRuleParams baseParams, BuildRuleResolver resolver, SourcePathResolver pathResolver, SourcePathRuleFinder ruleFinder, CxxPlatform cxxPlatform, Arg args, Linker.LinkableDepType depType) throws NoSuchBuildTargetException { Preconditions.checkArgument(Sets .intersection(baseTarget.getFlavors(), Sets.union(Type.FLAVOR_VALUES, cxxPlatforms.getFlavors())) .isEmpty());//ww w . j a va 2 s . c o m BuildTarget target = baseTarget.withAppendedFlavors( depType == Linker.LinkableDepType.STATIC ? Type.STATIC.getFlavor() : Type.STATIC_PIC.getFlavor(), cxxPlatform.getFlavor()); Optional<Archive> archive = resolver.getRuleOptionalWithType(target, Archive.class); if (archive.isPresent()) { return archive.get(); } return resolver.addToIndex(createStaticLibrary(target, baseParams, resolver, pathResolver, ruleFinder, cxxPlatform, args, depType)); }
From source file:org.fenixedu.academic.ui.struts.action.academicAdministration.executionCourseManagement.ExecutionCourseBean.java
public String getDestinationPresentationName() { StringBuilder result = new StringBuilder(); if (getDestinationExecutionCourse() != null) { result.append(getDestinationExecutionCourse().getNameI18N().getContent()); final Set<DegreeCurricularPlan> plans; if (getDegree() != null) { plans = Sets.intersection(getDegree().getDegreeCurricularPlansSet(), Sets.newHashSet(getDestinationExecutionCourse().getAssociatedDegreeCurricularPlans())); } else {//from w w w. j av a 2 s. c om plans = Sets.newHashSet(getDestinationExecutionCourse().getAssociatedDegreeCurricularPlans()); } result.append(getDegreeCurricularPlansPresentationString(plans)); } return result.toString(); }
From source file:mvm.rya.indexing.accumulo.entity.StarQuery.java
public static Set<String> getCommonVars(StarQuery query, BindingSet bs) { Set<String> starQueryVarNames = Sets.newHashSet(); if (bs == null || bs.size() == 0) { return Sets.newHashSet(); }/*from ww w. j ava 2 s . co m*/ Set<String> bindingNames = bs.getBindingNames(); starQueryVarNames.addAll(query.getUnCommonVars()); if (!query.commonVarConstant()) { starQueryVarNames.add(query.getCommonVarName()); } return Sets.intersection(bindingNames, starQueryVarNames); }
From source file:edu.harvard.med.screensaver.service.cherrypicks.CherryPickRequestAllocator.java
/** * @return the set of <i>unfulfillable</i> cherry picks * @throws DataModelViolationException if the source wells for the labCherryPicks contain duplicates *//* w w w . j av a 2s .co m*/ @Transactional public Set<LabCherryPick> allocate(Collection<LabCherryPick> labCherryPicks) { Set<LabCherryPick> unfulfillableLabCherryPicks = new HashSet<LabCherryPick>(); if (labCherryPicks.size() == 0) { return unfulfillableLabCherryPicks; } try { final ImmutableMap<Well, LabCherryPick> well2lcp = Maps.uniqueIndex(labCherryPicks, new Function<LabCherryPick, Well>() { public Well apply(LabCherryPick lcp) { return lcp.getSourceWell(); } }); CherryPickRequest cherryPickRequest = labCherryPicks.iterator().next().getCherryPickRequest(); Map<Well, Set<Copy>> copyCandidatesForWells = findCopyCandidatesForWells(well2lcp.keySet(), cherryPickRequest.getTransferVolumePerWellApproved()); // remove unfulfillable wells now, as they would force the minimum copy set to always be empty Set<Well> unfulfillableWells = removeUnfulfillableWells(copyCandidatesForWells); assert Sets.intersection(unfulfillableWells, copyCandidatesForWells.keySet()).isEmpty(); Set<Copy> minimumCopySetForWells = findMinimumCopySetForWells(copyCandidatesForWells); if (log.isDebugEnabled()) { log.debug("using minimum copy set: " + minimumCopySetForWells); } for (LabCherryPick labCherryPick : labCherryPicks) { if (!unfulfillableWells.contains(labCherryPick.getSourceWell())) { Set<Copy> copyCandidatesForWell = copyCandidatesForWells.get(labCherryPick.getSourceWell()); Set<Copy> copyCandidatesForWellAndPlate = Sets.intersection(minimumCopySetForWells, copyCandidatesForWell); if (log.isDebugEnabled()) { log.debug("copy candidates for well " + copyCandidatesForWell); log.debug("copy candidates for well and plate " + copyCandidatesForWellAndPlate); } assert !copyCandidatesForWellAndPlate .isEmpty() : "algorithm for determining minimum set of copies is incorrect"; Copy selectedCopy = Collections.min(copyCandidatesForWellAndPlate); labCherryPick.setAllocated(selectedCopy); if (log.isDebugEnabled()) { log.debug("volume for " + labCherryPick + " allocated from " + selectedCopy); } } } Iterable<LabCherryPick> unfulfillableLCPsIter = Iterables.transform(unfulfillableWells, new Function<Well, LabCherryPick>() { public LabCherryPick apply(Well well) { return well2lcp.get(well); } }); HashSet<LabCherryPick> unfulfillableLCPs = Sets.newHashSet(unfulfillableLCPsIter); if (log.isDebugEnabled()) { log.debug("unfulfillable lab cherry picks: " + unfulfillableLCPs); } return unfulfillableLCPs; } catch (IllegalArgumentException e) { // We do not allow requests for allocation of // multiple lab cherry picks that have the same source well. This is critical, // since multiple allocations of the same source well could result in // overdrawing reagent from the source well. This is due to the fact that // remaining well volume checking is based upon the remaining well volumes as // recorded in the database, and the implementation, above, does not currently handle // the case where two or more reservations are being made from the same source // well (though, it could be made to do so). throw new BusinessRuleViolationException( "cannot allocate lab cherry picks if source wells are not unique"); } }
From source file:com.eucalyptus.cluster.proxy.node.Nodes.java
public static void updateNodeInfo(ServiceConfiguration ccConfig, List<NodeType> nodes) { ConcurrentNavigableMap<String, NodeInfo> clusterNodeMap = lookupAny(ccConfig).getNodeMap(); /** prepare key sets for comparison **/ Set<String> knownTags = Sets.newHashSet(clusterNodeMap.keySet()); Set<String> reportedTags = Sets.newHashSet(); for (final NodeType node : nodes) { reportedTags.add(node.getServiceTag()); }/*from ww w. j a v a 2 s. c o m*/ /** compute intersections and differences **/ Set<String> unreportedTags = Sets.difference(knownTags, reportedTags); Set<String> newTags = Sets.difference(reportedTags, knownTags); Set<String> stillKnownTags = Sets.intersection(knownTags, reportedTags); StringBuilder nodeLog = new StringBuilder(); /** maybe remove unreported nodes **/ for (String unreportedTag : unreportedTags) { NodeInfo unreportedNode = clusterNodeMap.get(unreportedTag); if (unreportedNode != null && (System.currentTimeMillis() - unreportedNode.getLastSeen().getTime()) > Nodes.REFRESH_TIMEOUT) { Topology.destroy(Components.lookup(ProxyNodeController.class).lookup(unreportedNode.getName())); NodeInfo removed = clusterNodeMap.remove(unreportedTag); nodeLog.append("GONE:").append(removed.getName()).append(":").append(removed.getLastState()) .append(" "); } } /** add new nodes or updated existing node infos **/ Set<NodeInfo> nodesToUpdate = Sets.newHashSet(); for (final NodeType node : nodes) { try { String serviceTag = node.getServiceTag(); if (newTags.contains(serviceTag)) { clusterNodeMap.putIfAbsent(serviceTag, new NodeInfo(ccConfig.getPartition(), node)); NodeInfo nodeInfo = clusterNodeMap.get(serviceTag); nodeLog.append("NEW:").append(nodeInfo.getName()).append(":").append(nodeInfo.getLastState()) .append(" "); nodesToUpdate.add(nodeInfo); } else if (stillKnownTags.contains(serviceTag)) { NodeInfo nodeInfo = clusterNodeMap.get(serviceTag); nodeInfo.setIqn(node.getIqn()); nodeLog.append("OLD:").append(nodeInfo.getName()).append(":").append(nodeInfo.getLastState()) .append(" "); nodesToUpdate.add(nodeInfo); } } catch (NoSuchElementException e) { LOG.error(e); LOG.debug(e, e); } } LOG.debug("Updated node info map: " + nodeLog.toString()); try { Nodes.updateServiceConfiguration(ccConfig, nodesToUpdate); } catch (Exception e) { if (!Component.State.ENABLED.apply(ccConfig)) LOG.debug("Error while updating nodes: " + e.getMessage(), e); } }
From source file:edu.mit.streamjit.impl.compiler2.Storage.java
/** * Returns true if this Storage is fully external; that is, all connected * ActorGroups either read xor write.//from w w w.j a v a2 s .com * @return true iff this Storage is fully external */ public boolean isFullyExternal() { return Sets.intersection(upstreamGroups(), downstreamGroups()).isEmpty(); }