Example usage for com.google.common.collect Multimap isEmpty

List of usage examples for com.google.common.collect Multimap isEmpty

Introduction

In this page you can find the example usage for com.google.common.collect Multimap isEmpty.

Prototype

boolean isEmpty();

Source Link

Document

Returns true if this multimap contains no key-value pairs.

Usage

From source file:com.google.devtools.build.lib.query2.RdepsBoundedVisitor.java

@Override
protected Visit getVisitResult(Iterable<DepAndRdepAtDepth> depAndRdepAtDepths) throws InterruptedException {
    Map<SkyKey, Integer> shallowestRdepDepthMap = new HashMap<>();
    depAndRdepAtDepths.forEach(depAndRdepAtDepth -> shallowestRdepDepthMap
            .merge(depAndRdepAtDepth.depAndRdep.rdep, depAndRdepAtDepth.rdepDepth, Integer::min));

    Collection<SkyKey> validRdeps = new ArrayList<>();

    // Multimap of dep to all the reverse deps in this visitation. Used to filter out the
    // disallowed deps.
    Multimap<SkyKey, SkyKey> reverseDepMultimap = ArrayListMultimap.create();
    for (DepAndRdepAtDepth depAndRdepAtDepth : depAndRdepAtDepths) {
        // The "roots" of our visitation (see #preprocessInitialVisit) have a null 'dep' field.
        if (depAndRdepAtDepth.depAndRdep.dep == null) {
            validRdeps.add(depAndRdepAtDepth.depAndRdep.rdep);
        } else {//from  w w  w. ja v  a  2s .  co m
            reverseDepMultimap.put(depAndRdepAtDepth.depAndRdep.dep, depAndRdepAtDepth.depAndRdep.rdep);
        }
    }

    Multimap<SkyKey, SkyKey> packageKeyToTargetKeyMap = env
            .makePackageKeyToTargetKeyMap(Iterables.concat(reverseDepMultimap.values()));
    Set<PackageIdentifier> pkgIdsNeededForTargetification = packageKeyToTargetKeyMap.keySet().stream()
            .map(SkyQueryEnvironment.PACKAGE_SKYKEY_TO_PACKAGE_IDENTIFIER).collect(toImmutableSet());
    packageSemaphore.acquireAll(pkgIdsNeededForTargetification);

    try {
        // Filter out disallowed deps. We cannot defer the targetification any further as we do not
        // want to retrieve the rdeps of unwanted nodes (targets).
        if (!reverseDepMultimap.isEmpty()) {
            Collection<Target> filteredTargets = env.filterRawReverseDepsOfTransitiveTraversalKeys(
                    reverseDepMultimap.asMap(), packageKeyToTargetKeyMap);
            filteredTargets.stream().map(SkyQueryEnvironment.TARGET_TO_SKY_KEY).forEachOrdered(validRdeps::add);
        }
    } finally {
        packageSemaphore.releaseAll(pkgIdsNeededForTargetification);
    }

    ImmutableList<SkyKey> uniqueValidRdeps = validRdeps.stream().filter(validRdep -> validRdepMinDepthUniquifier
            .uniqueAtDepthLessThanOrEqualTo(validRdep, shallowestRdepDepthMap.get(validRdep)))
            .collect(ImmutableList.toImmutableList());

    // Don't bother getting the rdeps of the rdeps that are already at the depth bound.
    Iterable<SkyKey> uniqueValidRdepsBelowDepthBound = Iterables.filter(uniqueValidRdeps,
            uniqueValidRdep -> shallowestRdepDepthMap.get(uniqueValidRdep) < depth);

    // Retrieve the reverse deps as SkyKeys and defer the targetification and filtering to next
    // recursive visitation.
    Map<SkyKey, Iterable<SkyKey>> unfilteredRdepsOfRdeps = env.graph
            .getReverseDeps(uniqueValidRdepsBelowDepthBound);

    ImmutableList.Builder<DepAndRdepAtDepth> depAndRdepAtDepthsToVisitBuilder = ImmutableList.builder();
    unfilteredRdepsOfRdeps.entrySet().forEach(entry -> {
        SkyKey rdep = entry.getKey();
        int depthOfRdepOfRdep = shallowestRdepDepthMap.get(rdep) + 1;
        Streams.stream(entry.getValue()).filter(Predicates.and(SkyQueryEnvironment.IS_TTV, universe))
                .forEachOrdered(rdepOfRdep -> {
                    depAndRdepAtDepthsToVisitBuilder
                            .add(new DepAndRdepAtDepth(new DepAndRdep(rdep, rdepOfRdep), depthOfRdepOfRdep));
                });
    });

    return new Visit(/*keysToUseForResult=*/ uniqueValidRdeps,
            /*keysToVisit=*/ depAndRdepAtDepthsToVisitBuilder.build());
}

From source file:org.apache.accumulo.examples.wikisearch.parser.FieldIndexQueryReWriter.java

private RewriterTreeNode removeNonIndexedTerms(RewriterTreeNode root, Multimap<String, String> indexedTerms)
        throws Exception {
    // public void removeNonIndexedTerms(BooleanLogicTreeNodeJexl myroot, String indexedTerms) throws Exception {
    if (indexedTerms.isEmpty()) {
        throw new Exception("removeNonIndexedTerms, indexed Terms empty");
    }/* www . j a  v a  2  s  .  c  om*/

    // NOTE: doing a depth first enumeration didn't work when I started
    // removing nodes halfway through. The following method does work,
    // it's essentially a reverse breadth first traversal.
    List<RewriterTreeNode> nodes = new ArrayList<RewriterTreeNode>();
    Enumeration<?> bfe = root.breadthFirstEnumeration();

    while (bfe.hasMoreElements()) {
        RewriterTreeNode node = (RewriterTreeNode) bfe.nextElement();
        nodes.add(node);
    }

    // walk backwards
    for (int i = nodes.size() - 1; i >= 0; i--) {
        RewriterTreeNode node = nodes.get(i);
        if (log.isDebugEnabled()) {
            log.debug("removeNonIndexedTerms, analyzing node: " + node.toString() + "  " + node.printNode());
        }
        if (node.getType() == ParserTreeConstants.JJTANDNODE
                || node.getType() == ParserTreeConstants.JJTORNODE) {
            // If all of your children are gone, AND/OR has no purpose, remove
            if (node.getChildCount() == 0) {
                node.removeFromParent();

                // If AND/OR has only 1 child, attach it to the parent directly.
            } else if (node.getChildCount() == 1) {
                RewriterTreeNode p = (RewriterTreeNode) node.getParent();
                RewriterTreeNode c = (RewriterTreeNode) node.getFirstChild();
                node.removeFromParent();
                p.add(c);
            }
        } else if (node.getType() == ParserTreeConstants.JJTJEXLSCRIPT) { // Head node
            // If head node has no children, we have nothing to search on.
            if (node.getChildCount() == 0) {
                throw new Exception();
            }
        } else if (rangeNodeSet.contains(node.getType())) { // leave it alone
            // leave ranges untouched, they'll be handled elsewhere.
            continue;
        } else {
            if (log.isDebugEnabled()) {
                log.debug(
                        "removeNonIndexedTerms, Testing: " + node.getFieldName() + ":" + node.getFieldValue());
            }

            if (!indexedTerms
                    .containsKey(node.getFieldName().toString() + ":" + node.getFieldValue().toString())) {
                if (log.isDebugEnabled()) {
                    log.debug(node.getFieldName() + ":" + node.getFieldValue() + " is NOT indexed");
                }
                node.removeFromParent();
            } else {
                if (log.isDebugEnabled()) {
                    log.debug(node.getFieldName() + ":" + node.getFieldValue() + " is indexed");
                }
            }
        }
    }

    return root;
}

From source file:org.eclipse.xtext.xtext.XtextValidator.java

@Check
public void checkRuleName(AbstractRule rule) {
    final Grammar grammar = GrammarUtil.getGrammar(rule);
    Multimap<String, AbstractRule> rules = getAllRules(grammar, rule.getName());
    rules.remove(rule.getName(), rule);//  ww w  .jav a  2 s  . c o m
    if (!rules.isEmpty()) {
        TreeSet<String> names = Sets.newTreeSet(rules.keySet());
        if (names.size() == 1) {
            String name = names.first();
            if (name.equals(rule.getName())) {
                final String message = "A rule's name has to be unique.";
                error(message, XtextPackage.Literals.ABSTRACT_RULE__NAME);
                return;
            } else {
                String message = "A rule's name has to be unique even case insensitive.";
                boolean superGrammar = false;
                for (AbstractRule otherRule : rules.get(name)) {
                    if (GrammarUtil.getGrammar(otherRule) != grammar) {
                        message = message + " A used grammar contains another rule '" + name + "'.";
                        superGrammar = true;
                        break;
                    }
                }
                if (!superGrammar)
                    message = message + " This grammar contains another rule '" + name + "'.";
                error(message, XtextPackage.Literals.ABSTRACT_RULE__NAME);
                return;
            }
        } else {
            String message = "A rule's name has to be unique even case insensitive.";
            final StringBuilder builder = new StringBuilder((rule.getName().length() + 4) * names.size() - 2);
            int i = 0;
            for (String name : names) {
                if (builder.length() != 0) {
                    if (i < names.size() - 1)
                        builder.append(", ");
                    else
                        builder.append(" and ");
                }
                i++;
                builder.append("'").append(name).append("'");
            }
            error(message + " The conflicting rules are " + builder + ".",
                    XtextPackage.Literals.ABSTRACT_RULE__NAME);
            return;
        }
    }
    if (SuperCallScope.SUPER.equals(rule.getName())) {
        addIssue("Discouraged rule name 'super'", rule, XtextPackage.Literals.ABSTRACT_RULE__NAME,
                DISCOURAGED_RULE_NAME);
    }
}

From source file:com.bigdata.dastor.locator.AbstractReplicationStrategy.java

/**
 * returns multimap of {live destination: ultimate targets}, where if target is not the same
 * as the destination, it is a "hinted" write, and will need to be sent to
 * the ultimate target when it becomes alive again.
 *///from w  w w  . ja v a  2  s. c o  m
public Multimap<InetAddress, InetAddress> getHintedEndpoints(String table, Collection<InetAddress> targets) {
    Multimap<InetAddress, InetAddress> map = HashMultimap.create(targets.size(), 1);

    IEndPointSnitch endPointSnitch = DatabaseDescriptor.getEndPointSnitch(table);

    // first, add the live endpoints
    for (InetAddress ep : targets) {
        if (FailureDetector.instance.isAlive(ep))
            map.put(ep, ep);
    }

    // if everything was alive or we're not doing HH on this keyspace, stop with just the live nodes
    if (map.size() == targets.size() || !StorageProxy.isHintedHandoffEnabled())
        return map;

    // assign dead endpoints to be hinted to the closest live one, or to the local node
    // (since it is trivially the closest) if none are alive.  This way, the cost of doing
    // a hint is only adding the hint header, rather than doing a full extra write, if any
    // destination nodes are alive.
    //
    // we do a 2nd pass on targets instead of using temporary storage,
    // to optimize for the common case (everything was alive).
    InetAddress localAddress = FBUtilities.getLocalAddress();
    for (InetAddress ep : targets) {
        if (map.containsKey(ep))
            continue;

        InetAddress destination = map.isEmpty() ? localAddress
                : endPointSnitch.getSortedListByProximity(localAddress, map.keySet()).get(0);
        map.put(destination, ep);
    }

    return map;
}

From source file:net.minecraftforge.registries.GameData.java

@SuppressWarnings({ "unchecked", "rawtypes" })
public static Multimap<ResourceLocation, ResourceLocation> injectSnapshot(
        Map<ResourceLocation, ForgeRegistry.Snapshot> snapshot, boolean injectFrozenData,
        boolean isLocalWorld) {
    FMLLog.log.info("Injecting existing registry data into this {} instance",
            FMLCommonHandler.instance().getEffectiveSide().isServer() ? "server" : "client");
    RegistryManager.ACTIVE.registries.forEach((name, reg) -> reg.validateContent(name));
    RegistryManager.ACTIVE.registries.forEach((name, reg) -> reg.dump(name));
    RegistryManager.ACTIVE.registries.forEach((name, reg) -> reg.resetDelegates());

    List<ResourceLocation> missingRegs = snapshot.keySet().stream()
            .filter(name -> !RegistryManager.ACTIVE.registries.containsKey(name)).collect(Collectors.toList());
    if (missingRegs.size() > 0) {
        String text = "Forge Mod Loader detected missing/unknown registrie(s).\n\n" + "There are "
                + missingRegs.size() + " missing registries in this save.\n"
                + "If you continue the missing registries will get removed.\n"
                + "This may cause issues, it is advised that you create a world backup before continuing.\n\n"
                + "Missing Registries:\n";

        for (ResourceLocation s : missingRegs)
            text += s.toString() + "\n";

        if (!StartupQuery.confirm(text))
            StartupQuery.abort();/* w  w w. j  a  v  a2s .  com*/
    }

    RegistryManager STAGING = new RegistryManager("STAGING");

    final Map<ResourceLocation, Map<ResourceLocation, Integer[]>> remaps = Maps.newHashMap();
    final LinkedHashMap<ResourceLocation, Map<ResourceLocation, Integer>> missing = Maps.newLinkedHashMap();
    // Load the snapshot into the "STAGING" registry
    snapshot.forEach((key, value) -> {
        final Class<? extends IForgeRegistryEntry> clazz = RegistryManager.ACTIVE.getSuperType(key);
        remaps.put(key, Maps.newLinkedHashMap());
        missing.put(key, Maps.newHashMap());
        loadPersistentDataToStagingRegistry(RegistryManager.ACTIVE, STAGING, remaps.get(key), missing.get(key),
                key, value, clazz);
    });

    snapshot.forEach((key, value) -> {
        value.dummied.forEach(dummy -> {
            Map<ResourceLocation, Integer> m = missing.get(key);
            ForgeRegistry<?> reg = STAGING.getRegistry(key);

            // Currently missing locally, we just inject and carry on
            if (m.containsKey(dummy)) {
                if (reg.markDummy(dummy, m.get(dummy)))
                    m.remove(dummy);
            } else if (isLocalWorld) {
                if (ForgeRegistry.DEBUG)
                    FMLLog.log.debug("Registry {}: Resuscitating dummy entry {}", key, dummy);
            } else {
                // The server believes this is a dummy block identity, but we seem to have one locally. This is likely a conflict
                // in mod setup - Mark this entry as a dummy
                int id = reg.getID(dummy);
                FMLLog.log.warn(
                        "Registry {}: The ID {} is currently locally mapped - it will be replaced with a dummy for this session",
                        key, id);
                reg.markDummy(dummy, id);
            }
        });
    });

    int count = missing.values().stream().mapToInt(Map::size).sum();
    if (count > 0) {
        FMLLog.log.debug("There are {} mappings missing - attempting a mod remap", count);
        Multimap<ResourceLocation, ResourceLocation> defaulted = ArrayListMultimap.create();
        Multimap<ResourceLocation, ResourceLocation> failed = ArrayListMultimap.create();

        missing.entrySet().stream().filter(e -> e.getValue().size() > 0).forEach(m -> {
            ResourceLocation name = m.getKey();
            ForgeRegistry<?> reg = STAGING.getRegistry(name);
            RegistryEvent.MissingMappings<?> event = reg.getMissingEvent(name, m.getValue());
            MinecraftForge.EVENT_BUS.post(event);

            List<MissingMappings.Mapping<?>> lst = event.getAllMappings().stream()
                    .filter(e -> e.getAction() == MissingMappings.Action.DEFAULT).collect(Collectors.toList());
            if (!lst.isEmpty()) {
                FMLLog.log.error("Unidentified mapping from registry {}", name);
                lst.forEach(map -> {
                    FMLLog.log.error("    {}: {}", map.key, map.id);
                    if (!isLocalWorld)
                        defaulted.put(name, map.key);
                });
            }
            event.getAllMappings().stream().filter(e -> e.getAction() == MissingMappings.Action.FAIL)
                    .forEach(fail -> failed.put(name, fail.key));

            final Class<? extends IForgeRegistryEntry> clazz = RegistryManager.ACTIVE.getSuperType(name);
            processMissing(clazz, name, STAGING, event, m.getValue(), remaps.get(name), defaulted.get(name),
                    failed.get(name));
        });

        if (!defaulted.isEmpty() && !isLocalWorld)
            return defaulted;

        if (!defaulted.isEmpty()) {
            StringBuilder buf = new StringBuilder();
            buf.append("Forge Mod Loader detected missing registry entries.\n\n").append("There are ")
                    .append(defaulted.size()).append(" missing entries in this save.\n")
                    .append("If you continue the missing entries will get removed.\n")
                    .append("A world backup will be automatically created in your saves directory.\n\n");

            defaulted.asMap().forEach((name, entries) -> {
                buf.append("Missing ").append(name).append(":\n");
                entries.forEach(rl -> buf.append("    ").append(rl).append("\n"));
            });

            boolean confirmed = StartupQuery.confirm(buf.toString());
            if (!confirmed)
                StartupQuery.abort();

            try {
                String skip = System.getProperty("fml.doNotBackup");
                if (skip == null || !"true".equals(skip)) {
                    ZipperUtil.backupWorld();
                } else {
                    for (int x = 0; x < 10; x++)
                        FMLLog.log.error("!!!!!!!!!! UPDATING WORLD WITHOUT DOING BACKUP !!!!!!!!!!!!!!!!");
                }
            } catch (IOException e) {
                StartupQuery.notify("The world backup couldn't be created.\n\n" + e);
                StartupQuery.abort();
            }
        }

        if (!defaulted.isEmpty()) {
            if (isLocalWorld)
                FMLLog.log.error(
                        "There are unidentified mappings in this world - we are going to attempt to process anyway");
        }

    }

    if (injectFrozenData) {
        // If we're loading from disk, we can actually substitute air in the block map for anything that is otherwise "missing". This keeps the reference in the map, in case
        // the block comes back later
        missing.forEach((name, m) -> {
            ForgeRegistry<?> reg = STAGING.getRegistry(name);
            m.forEach((rl, id) -> reg.markDummy(rl, id));
        });

        // If we're loading up the world from disk, we want to add in the new data that might have been provisioned by mods
        // So we load it from the frozen persistent registry
        RegistryManager.ACTIVE.registries.forEach((name, reg) -> {
            final Class<? extends IForgeRegistryEntry> clazz = RegistryManager.ACTIVE.getSuperType(name);
            loadFrozenDataToStagingRegistry(STAGING, name, remaps.get(name), clazz);
        });
    }

    // Validate that all the STAGING data is good
    STAGING.registries.forEach((name, reg) -> reg.validateContent(name));

    // Load the STAGING registry into the ACTIVE registry
    for (Map.Entry<ResourceLocation, ForgeRegistry<? extends IForgeRegistryEntry<?>>> r : RegistryManager.ACTIVE.registries
            .entrySet()) {
        final Class<? extends IForgeRegistryEntry> registrySuperType = RegistryManager.ACTIVE
                .getSuperType(r.getKey());
        loadRegistry(r.getKey(), STAGING, RegistryManager.ACTIVE, registrySuperType, true);
    }

    // Dump the active registry
    RegistryManager.ACTIVE.registries.forEach((name, reg) -> reg.dump(name));

    // Tell mods that the ids have changed
    Loader.instance().fireRemapEvent(remaps, false);

    // The id map changed, ensure we apply object holders
    ObjectHolderRegistry.INSTANCE.applyObjectHolders();

    // Return an empty list, because we're good
    return ArrayListMultimap.create();
}

From source file:io.datakernel.logfs.LogToCubeMetadataStorageSql.java

private void saveCommit(final String log, final Map<AggregationMetadata, String> idMap,
        final Map<String, LogPosition> oldPositions, final Map<String, LogPosition> newPositions,
        final Multimap<AggregationMetadata, AggregationChunk.NewChunk> newChunks) {
    cubeMetadataStorage.executeExclusiveTransaction(new TransactionalRunnable() {
        @Override//from   w w w.  j  a v a  2s . c  o m
        public void run(Configuration configuration) throws Exception {
            DSLContext jooq = DSL.using(configuration);

            for (String partition : newPositions.keySet()) {
                LogPosition logPosition = newPositions.get(partition);
                logger.info("Finished reading log '{}' for partition '{}' at position {}", log, partition,
                        logPosition);

                if (logPosition.getLogFile() == null)
                    continue;

                jooq.insertInto(AGGREGATION_DB_LOG)
                        .set(new AggregationDbLogRecord(log, partition, logPosition.getLogFile().getName(),
                                logPosition.getLogFile().getN(), logPosition.getPosition()))
                        .onDuplicateKeyUpdate().set(AGGREGATION_DB_LOG.FILE, logPosition.getLogFile().getName())
                        .set(AGGREGATION_DB_LOG.FILE_INDEX, logPosition.getLogFile().getN())
                        .set(AGGREGATION_DB_LOG.POSITION, logPosition.getPosition()).execute();
            }

            if (!newChunks.isEmpty())
                cubeMetadataStorage.doSaveNewChunks(jooq, idMap, newChunks);
        }
    });
}

From source file:com.palantir.atlasdb.cli.command.CleanTransactionRange.java

@Override
public int execute(AtlasDbServices services) {
    long immutable = services.getTransactionManager().getImmutableTimestamp();
    TimestampService ts = services.getTimestampService();
    if (!isValid(immutable, ts)) {
        return 1;
    }/*from  w ww  .  j  a v a2 s . c om*/

    PersistentTimestampService pts = (PersistentTimestampService) ts;
    KeyValueService kvs = services.getKeyValueService();

    byte[] startRowInclusive = RangeRequests
            .nextLexicographicName(TransactionConstants.getValueForTimestamp(startTimestampExclusive));
    ClosableIterator<RowResult<Value>> range = kvs.getRange(TransactionConstants.TRANSACTION_TABLE,
            RangeRequest.builder().startRowInclusive(startRowInclusive).build(), Long.MAX_VALUE);

    Multimap<Cell, Long> toDelete = HashMultimap.create();
    long maxTimestamp = startTimestampExclusive;
    while (range.hasNext()) {
        RowResult<Value> row = range.next();
        byte[] rowName = row.getRowName();
        long startResult = TransactionConstants.getTimestampForValue(rowName);
        maxTimestamp = Math.max(maxTimestamp, startResult);

        Value value;
        try {
            value = row.getOnlyColumnValue();
        } catch (IllegalStateException e) {
            //this should never happen
            System.out.printf(
                    "Error: Found a row in the transactions table that didn't have 1 and only 1 column value: start=%d%n",
                    startResult);
            continue;
        }

        long endResult = TransactionConstants.getTimestampForValue(value.getContents());
        maxTimestamp = Math.max(maxTimestamp, endResult);
        System.out.printf("Found and cleaning possibly inconsistent transaction: [start=%d, commit=%d]%n",
                startResult, endResult);

        Cell key = Cell.create(rowName, TransactionConstants.COMMIT_TS_COLUMN);
        toDelete.put(key, value.getTimestamp()); //value.getTimestamp() should always be 0L but this is safer
    }

    if (!toDelete.isEmpty()) {
        kvs.delete(TransactionConstants.TRANSACTION_TABLE, toDelete);
        System.out.println("Delete completed.");

        pts.fastForwardTimestamp(maxTimestamp + 1);
        System.out.printf("Timestamp succesfully forwarded past all cleaned/deleted transactions to %d%n",
                maxTimestamp);
    } else {
        System.out.println("Found no transactions inside the given range to clean up or delete.");
    }

    return 0;
}

From source file:com.google.devtools.build.lib.query2.SkyQueryEnvironment.java

/**
 * Returns FileValue keys for which there may be relevant (from the perspective of {@link
 * #getRBuildFiles}) FileValues in the graph corresponding to the given {@code pathFragments},
 * which are assumed to be file paths.//from   www  .j  av  a2 s  . co  m
 *
 * <p>To do this, we emulate the {@link ContainingPackageLookupFunction} logic: for each given
 * file path, we look for the nearest ancestor directory (starting with its parent directory), if
 * any, that has a package. The {@link PackageLookupValue} for this package tells us the package
 * root that we should use for the {@link RootedPath} for the {@link FileValue} key.
 *
 * <p>Note that there may not be nodes in the graph corresponding to the returned SkyKeys.
 */
Collection<SkyKey> getSkyKeysForFileFragments(Iterable<PathFragment> pathFragments)
        throws InterruptedException {
    Set<SkyKey> result = new HashSet<>();
    Multimap<PathFragment, PathFragment> currentToOriginal = ArrayListMultimap.create();
    for (PathFragment pathFragment : pathFragments) {
        currentToOriginal.put(pathFragment, pathFragment);
    }
    while (!currentToOriginal.isEmpty()) {
        Multimap<SkyKey, PathFragment> packageLookupKeysToOriginal = ArrayListMultimap.create();
        Multimap<SkyKey, PathFragment> packageLookupKeysToCurrent = ArrayListMultimap.create();
        for (Entry<PathFragment, PathFragment> entry : currentToOriginal.entries()) {
            PathFragment current = entry.getKey();
            PathFragment original = entry.getValue();
            for (SkyKey packageLookupKey : getPkgLookupKeysForFile(original, current)) {
                packageLookupKeysToOriginal.put(packageLookupKey, original);
                packageLookupKeysToCurrent.put(packageLookupKey, current);
            }
        }
        Map<SkyKey, SkyValue> lookupValues = graph.getSuccessfulValues(packageLookupKeysToOriginal.keySet());
        for (Map.Entry<SkyKey, SkyValue> entry : lookupValues.entrySet()) {
            SkyKey packageLookupKey = entry.getKey();
            PackageLookupValue packageLookupValue = (PackageLookupValue) entry.getValue();
            if (packageLookupValue.packageExists()) {
                Collection<PathFragment> originalFiles = packageLookupKeysToOriginal.get(packageLookupKey);
                Preconditions.checkState(!originalFiles.isEmpty(), entry);
                for (PathFragment fileName : originalFiles) {
                    result.add(FileValue.key(RootedPath.toRootedPath(packageLookupValue.getRoot(), fileName)));
                }
                for (PathFragment current : packageLookupKeysToCurrent.get(packageLookupKey)) {
                    currentToOriginal.removeAll(current);
                }
            }
        }
        Multimap<PathFragment, PathFragment> newCurrentToOriginal = ArrayListMultimap.create();
        for (PathFragment pathFragment : currentToOriginal.keySet()) {
            PathFragment parent = pathFragment.getParentDirectory();
            if (parent != null) {
                newCurrentToOriginal.putAll(parent, currentToOriginal.get(pathFragment));
            }
        }
        currentToOriginal = newCurrentToOriginal;
    }
    return result;
}

From source file:ai.grakn.graql.internal.reasoner.atom.binary.Relation.java

/**
 * infer relation types that this relation atom can potentially have
 * NB: entity types and role types are treated separately as they behave differently:
 * entity types only play the explicitly defined roles (not the relevant part of the hierarchy of the specified role)
 * @return list of relation types this atom can have ordered by the number of compatible role types
 *///from www  .  java 2s. c  om
public List<RelationType> inferPossibleRelationTypes(Answer sub) {
    //look at available role types
    Multimap<RelationType, RoleType> compatibleTypesFromRoles = getCompatibleRelationTypesWithRoles(
            getExplicitRoleTypes(), new RoleTypeConverter());

    //look at entity types
    Map<Var, Type> varTypeMap = getParentQuery().getVarTypeMap();

    //explicit types
    Set<Type> types = getRolePlayers().stream().filter(varTypeMap::containsKey).map(varTypeMap::get)
            .collect(toSet());

    //types deduced from substitution
    inferEntityTypes(sub).forEach(types::add);

    Multimap<RelationType, RoleType> compatibleTypesFromTypes = getCompatibleRelationTypesWithRoles(types,
            new TypeConverterImpl());

    Multimap<RelationType, RoleType> compatibleTypes;
    //intersect relation types from roles and types
    if (compatibleTypesFromRoles.isEmpty()) {
        compatibleTypes = compatibleTypesFromTypes;
    } else if (!compatibleTypesFromTypes.isEmpty()) {
        compatibleTypes = multimapIntersection(compatibleTypesFromTypes, compatibleTypesFromRoles);
    } else {
        compatibleTypes = compatibleTypesFromRoles;
    }

    return compatibleTypes.asMap().entrySet().stream().sorted(Comparator.comparing(e -> -e.getValue().size()))
            .map(Map.Entry::getKey)
            .filter(t -> Sets.intersection(getSuperTypes(t), compatibleTypes.keySet()).isEmpty())
            .collect(Collectors.toList());
}

From source file:grakn.core.graql.reasoner.atom.binary.RelationAtom.java

/**
 * @return a map of relations and corresponding roles that could be played by this atom
 *//*w ww.  j  a  va2s .  c o m*/
private Multimap<RelationType, Role> inferPossibleRelationConfigurations(ConceptMap sub) {
    Set<Role> roles = getExplicitRoles().filter(r -> !Schema.MetaSchema.isMetaLabel(r.label()))
            .collect(toSet());
    SetMultimap<Variable, Type> varTypeMap = getParentQuery().getVarTypeMap(sub);
    Set<Type> types = getRolePlayers().stream().filter(varTypeMap::containsKey)
            .flatMap(v -> varTypeMap.get(v).stream()).collect(toSet());

    if (roles.isEmpty() && types.isEmpty()) {
        RelationType metaRelationType = tx().getMetaRelationType();
        Multimap<RelationType, Role> compatibleTypes = HashMultimap.create();
        metaRelationType.subs().filter(rt -> !rt.equals(metaRelationType))
                .forEach(rt -> compatibleTypes.putAll(rt, rt.roles().collect(toSet())));
        return compatibleTypes;
    }

    //intersect relation types from roles and types
    Multimap<RelationType, Role> compatibleTypes;

    Multimap<RelationType, Role> compatibleTypesFromRoles = ReasonerUtils
            .compatibleRelationTypesWithRoles(roles, new RoleConverter());
    Multimap<RelationType, Role> compatibleTypesFromTypes = ReasonerUtils
            .compatibleRelationTypesWithRoles(types, new TypeConverter());

    if (roles.isEmpty()) {
        compatibleTypes = compatibleTypesFromTypes;
    }
    //no types from roles -> roles correspond to mutually exclusive relations
    else if (compatibleTypesFromRoles.isEmpty() || types.isEmpty()) {
        compatibleTypes = compatibleTypesFromRoles;
    } else {
        compatibleTypes = ReasonerUtils.multimapIntersection(compatibleTypesFromTypes,
                compatibleTypesFromRoles);
    }
    return compatibleTypes;
}