Example usage for com.google.common.collect Sets intersection

List of usage examples for com.google.common.collect Sets intersection

Introduction

In this page you can find the example usage for com.google.common.collect Sets intersection.

Prototype

public static <E> SetView<E> intersection(final Set<E> set1, final Set<?> set2) 

Source Link

Document

Returns an unmodifiable view of the intersection of two sets.

Usage

From source file:grakn.core.server.kb.concept.ThingImpl.java

@Override
public Stream<Attribute<?>> keys(AttributeType... attributeTypes) {
    Set<ConceptId> attributeTypesIds = Arrays.stream(attributeTypes).map(Concept::id).collect(toSet());
    Set<ConceptId> keyTypeIds = type().keys().map(Concept::id).collect(toSet());

    if (!attributeTypesIds.isEmpty()) {
        keyTypeIds = Sets.intersection(attributeTypesIds, keyTypeIds);
    }//from w ww  . j a  v  a 2s . c  o  m

    if (keyTypeIds.isEmpty())
        return Stream.empty();

    return attributes(getShortcutNeighbours(true), keyTypeIds);
}

From source file:edu.udo.scaffoldhunter.gui.SelectionPane.java

private void updateSelectionInView() {
    int selectedInView;

    if (window.getActiveView() != null) {
        Set<Molecule> inView = window.getActiveView().getSubset();
        Set<Molecule> intersection = Sets.intersection(selection, inView);
        selectedInView = intersection.size();
    } else {/* w  ww.  jav a 2 s . c  o  m*/
        selectedInView = 0;
    }

    selectionInView.setText(Integer.toString(selectedInView));

    makeViewSubsetButton.setEnabled(selectedInView > 0);
}

From source file:org.hbasene.index.search.HBaseTopFieldCollector.java

private void doAppendToPQ(final Map<byte[], SortFieldDoc> docMap, final PriorityQueue<SortFieldDoc> outputPq,
        final String sortField, final int sortIndex) throws IOException {
    HTableInterface table = this.tablePool.getTable(this.indexName);
    final String sortFieldPrefix = sortField + "/"; // separator
    try {/* ww  w  .  j av  a 2s .  c  o m*/
        byte[] row = Bytes.toBytes(sortFieldPrefix);
        Result priorToFirstTerm = table.getRowOrBefore(row, FAMILY_TERMVECTOR);
        ResultScanner scanner = table
                .getScanner(this.createScan((priorToFirstTerm != null) ? priorToFirstTerm.getRow() : null));
        try {
            int index = 0;
            Result result = scanner.next();
            while (result != null) {
                String currentRow = Bytes.toString(result.getRow());
                if (currentRow.startsWith(sortFieldPrefix)) {
                    ++index;
                    NavigableMap<byte[], byte[]> columnQualifiers = result.getFamilyMap(FAMILY_TERMVECTOR);
                    SetView<byte[]> intersectionSet = Sets.intersection(columnQualifiers.keySet(),
                            docMap.keySet());
                    for (final byte[] commonDocId : intersectionSet) {
                        SortFieldDoc next = docMap.get(commonDocId);
                        next.indices[sortIndex] = index;
                        outputPq.add(next);
                    }
                    //Method works best if the ratio between the unique number of elements 
                    // in the field to be sorted is small compared to the total 
                    // number of documents in the list
                    docMap.keySet().removeAll(intersectionSet);
                    LOG.info("Docs Size after  " + currentRow + " is " + docMap.size());
                    if (docMap.isEmpty()) {
                        break;
                    }
                }
                result = scanner.next();
            }
        } finally {
            scanner.close();
        }
    } finally {
        this.tablePool.putTable(table);
    }
}

From source file:blockplus.transport.VirtualClient.java

private IPosition testAI(final Colors color, final Board board, final Options options) {
    System.out.println();/*from   w  ww. j a va  2s  .  c o m*/
    System.out.println(color);
    final Iterable<IPosition> ownLights = board.getLights(color);
    System.out.println(ownLights);
    final Set<IPosition> setOfOwnLights = Sets.newHashSet(ownLights);
    final Set<Colors> colors = board.getColors();
    colors.remove(color);
    Set<IPosition> intersections = Sets.newHashSet();
    for (final Colors Color : colors) {
        final Iterable<IPosition> otherLights = board.getLights(Color);
        final SetView<IPosition> intersection = Sets.intersection(setOfOwnLights, Sets.newHashSet(otherLights));
        intersections = Sets.union(intersections, intersection);
    }
    IPosition position = null;
    if (!intersections.isEmpty()) {
        System.out.println(intersections);
        int max = 0;
        for (final IPosition iPosition : intersections) {
            int n = 0;
            for (final Colors Color : colors) {
                if (Sets.newHashSet(board.getLights(Color)).contains(iPosition))
                    ++n;
            }
            if (n > max) {
                max = n;
                position = iPosition;
            }
        }
        System.out.println(max);
        System.out.println(position);
    }
    return position;
}

From source file:io.prestosql.verifier.VerifyCommand.java

public void run() {
    if (configFilename != null) {
        // Read/*w ww.j a  v  a 2  s  .com*/
        System.setProperty("config", configFilename);
    }

    ImmutableList.Builder<Module> builder = ImmutableList.<Module>builder().add(new PrestoVerifierModule())
            .addAll(getAdditionalModules());

    Bootstrap app = new Bootstrap(builder.build());
    Injector injector;
    try {
        injector = app.strictConfig().initialize();
    } catch (Exception e) {
        throwIfUnchecked(e);
        throw new RuntimeException(e);
    }

    try {
        VerifierConfig config = injector.getInstance(VerifierConfig.class);
        injector.injectMembers(this);
        Set<String> supportedEventClients = injector.getInstance(Key.get(new TypeLiteral<Set<String>>() {
        }, SupportedEventClients.class));
        for (String clientType : config.getEventClients()) {
            checkArgument(supportedEventClients.contains(clientType), "Unsupported event client: %s",
                    clientType);
        }
        Set<EventClient> eventClients = injector.getInstance(Key.get(new TypeLiteral<Set<EventClient>>() {
        }));

        VerifierDao dao = Jdbi.create(getQueryDatabase(injector)).installPlugin(new SqlObjectPlugin())
                .onDemand(VerifierDao.class);

        ImmutableList.Builder<QueryPair> queriesBuilder = ImmutableList.builder();
        for (String suite : config.getSuites()) {
            queriesBuilder.addAll(dao.getQueriesBySuite(suite, config.getMaxQueries()));
        }

        List<QueryPair> queries = queriesBuilder.build();
        queries = applyOverrides(config, queries);
        queries = filterQueryTypes(new SqlParser(getParserOptions()), config, queries);
        queries = filterQueries(queries);
        if (config.getShadowWrites()) {
            Sets.SetView<QueryType> allowedTypes = Sets.union(config.getTestQueryTypes(),
                    config.getControlQueryTypes());
            checkArgument(!Sets.intersection(allowedTypes, ImmutableSet.of(CREATE, MODIFY)).isEmpty(),
                    "CREATE or MODIFY queries must be allowed in test or control to use write shadowing");
            queries = rewriteQueries(new SqlParser(getParserOptions()), config, queries);
        }

        // Load jdbc drivers if needed
        if (config.getAdditionalJdbcDriverPath() != null) {
            List<URL> urlList = getUrls(config.getAdditionalJdbcDriverPath());
            URL[] urls = new URL[urlList.size()];
            urlList.toArray(urls);
            if (config.getTestJdbcDriverName() != null) {
                loadJdbcDriver(urls, config.getTestJdbcDriverName());
            }
            if (config.getControlJdbcDriverName() != null) {
                loadJdbcDriver(urls, config.getControlJdbcDriverName());
            }
        }

        // TODO: construct this with Guice
        int numFailedQueries = new Verifier(System.out, config, eventClients).run(queries);
        System.exit((numFailedQueries > 0) ? 1 : 0);
    } catch (InterruptedException | MalformedURLException e) {
        throwIfUnchecked(e);
        throw new RuntimeException(e);
    } finally {
        try {
            injector.getInstance(LifeCycleManager.class).stop();
        } catch (Exception e) {
            throwIfUnchecked(e);
            throw new RuntimeException(e);
        }
    }
}

From source file:org.diqube.executionenv.FlattenedTableInstanceManager.java

@PostConstruct
public void initialize() {
    // Use a CountCleanupStrategy that cleans up everything that was already evicted from the cache: If something was
    // evicted from the cache, we definitely
    // won't offer it again, since we will not use that same versionId again. Therefore we can free up the count memory
    // of those.//from www .j  ava2 s. c o  m
    // Additionally we remove the counts of every version that is in #countCleanupCacheEntries. These are old versions.
    // If anybody still needs those versions, they must have flagged those elements in the cache, otherwise their
    // entries will have count 0 and that will most probably lead to them being evicted from the cache on the next run.
    CountCleanupStrategy<Pair<String, String>, UUID> cacheCountCleanupStrategy = (countsForCleanup,
            allCounts) -> {
        Set<Pair<Pair<String, String>, UUID>> curCountCleanupCacheEntries = new HashSet<>();
        while (!countCleanupCacheEntries.isEmpty()) {
            try {
                curCountCleanupCacheEntries.add(countCleanupCacheEntries.pop());
            } catch (NoSuchElementException e) {
                // swallow -> two thread concurrently traversed countCleanupCacheEntries and our thread did not get another
                // element. Thats fine. (Although this will not happen currently, since CountingCache synchronizes).
            }
        }

        Set<Pair<Pair<String, String>, UUID>> res = Sets.union(countsForCleanup,
                Sets.intersection(allCounts, curCountCleanupCacheEntries));
        logger.trace("Evicting old usage counts (limit): {}", Iterables.limit(res, 100));
        return res;
    };

    MemoryConsumptionProvider<FlattenedTableInfo> cacheMemoryConsumptionProvider = info -> info
            .getFlattenedTable().calculateApproximateSizeInBytes();

    cache = new CountingCache<>(flattenedTableCacheSizeMb * 1024L * 1024L, cacheMemoryConsumptionProvider,
            cacheCountCleanupStrategy);
}

From source file:org.apache.aurora.scheduler.updater.JobDiff.java

/**
 * Calculates the diff necessary to change the current state of a job to the proposed state.
 *
 * @param taskStore Store to fetch the job's current state from.
 * @param job Job being diffed.//from  w  ww.  j  a v a  2 s  .  c  o  m
 * @param proposedState Proposed state to move the job towards.
 * @param scope Instances to limit the diff to.
 * @return A diff of the current state compared with {@code proposedState}, within {@code scope}.
 */
public static JobDiff compute(TaskStore taskStore, IJobKey job, Map<Integer, ITaskConfig> proposedState,
        Set<IRange> scope) {

    Map<Integer, ITaskConfig> currentState = ImmutableMap.copyOf(Maps.transformValues(
            Maps.uniqueIndex(taskStore.fetchTasks(Query.jobScoped(job).active()), Tasks::getInstanceId),
            Tasks::getConfig));

    JobDiff diff = computeUnscoped(currentState, job, proposedState);
    if (scope.isEmpty()) {
        return diff;
    } else {
        Set<Integer> limit = Numbers.rangesToInstanceIds(scope);
        Map<Integer, ITaskConfig> replaced = ImmutableMap
                .copyOf(Maps.filterKeys(diff.getReplacedInstances(), Predicates.in(limit)));
        Set<Integer> replacements = ImmutableSet
                .copyOf(Sets.intersection(diff.getReplacementInstances(), limit));

        Set<Integer> unchangedIds = ImmutableSet.copyOf(Sets.difference(
                ImmutableSet.copyOf(Sets.difference(currentState.keySet(), replaced.keySet())), replacements));
        Map<Integer, ITaskConfig> unchanged = ImmutableMap
                .copyOf(Maps.filterKeys(currentState, Predicates.in(unchangedIds)));

        return new JobDiff(replaced, replacements, unchanged);
    }
}

From source file:org.apache.sentry.provider.db.generic.service.thrift.SentryGenericPolicyProcessor.java

private boolean inAdminGroups(Set<String> requestorGroups) {
    requestorGroups = toTrimedLower(requestorGroups);
    if (Sets.intersection(adminGroups, requestorGroups).isEmpty()) {
        return false;
    } else/*from  w  w  w  .j  a va2s  . co  m*/
        return true;
}

From source file:net.sourceforge.fenixedu.domain.accessControl.StudentGroup.java

@Override
public Set<User> getMembers() {
    if (executionCourse != null) {
        if (degree == null && degreeType == null && campus == null) {
            return registrationsToUsers(getCourseBasedRegistrations(executionCourse));
        } else {/*from  w  w  w  . j  a  v a  2s . c o m*/
            return registrationsToUsers(Sets.intersection(getCourseBasedRegistrations(executionCourse),
                    getDegreeBasedRegistrations()));
        }
    } else if (campus != null) {
        return registrationsToUsers(getCampusBasedRegistrations());
    } else {
        return registrationsToUsers(getDegreeBasedRegistrations());
    }
}

From source file:org.apache.druid.segment.indexing.DataSchema.java

@JsonIgnore
public InputRowParser getParser() {
    if (parser == null) {
        log.warn("No parser has been specified");
        return null;
    }/*from   www  .ja va  2s . c  o  m*/

    if (cachedParser != null) {
        return cachedParser;
    }

    final InputRowParser inputRowParser = transformSpec
            .decorate(jsonMapper.convertValue(this.parser, InputRowParser.class));

    final Set<String> dimensionExclusions = Sets.newHashSet();
    for (AggregatorFactory aggregator : aggregators) {
        dimensionExclusions.addAll(aggregator.requiredFields());
        dimensionExclusions.add(aggregator.getName());
    }

    if (inputRowParser.getParseSpec() != null) {
        final DimensionsSpec dimensionsSpec = inputRowParser.getParseSpec().getDimensionsSpec();
        final TimestampSpec timestampSpec = inputRowParser.getParseSpec().getTimestampSpec();

        // exclude timestamp from dimensions by default, unless explicitly included in the list of dimensions
        if (timestampSpec != null) {
            final String timestampColumn = timestampSpec.getTimestampColumn();
            if (!(dimensionsSpec.hasCustomDimensions()
                    && dimensionsSpec.getDimensionNames().contains(timestampColumn))) {
                dimensionExclusions.add(timestampColumn);
            }
        }
        if (dimensionsSpec != null) {
            final Set<String> metSet = Sets.newHashSet();
            for (AggregatorFactory aggregator : aggregators) {
                metSet.add(aggregator.getName());
            }
            final Set<String> dimSet = Sets.newHashSet(dimensionsSpec.getDimensionNames());
            final Set<String> overlap = Sets.intersection(metSet, dimSet);
            if (!overlap.isEmpty()) {
                throw new IAE(
                        "Cannot have overlapping dimensions and metrics of the same name. Please change the name of the metric. Overlap: %s",
                        overlap);
            }

            cachedParser = inputRowParser.withParseSpec(inputRowParser.getParseSpec().withDimensionsSpec(
                    dimensionsSpec.withDimensionExclusions(Sets.difference(dimensionExclusions, dimSet))));
        } else {
            cachedParser = inputRowParser;
        }
    } else {
        log.warn("No parseSpec in parser has been specified.");
        cachedParser = inputRowParser;
    }

    return cachedParser;
}