Example usage for com.google.common.collect Multimap asMap

List of usage examples for com.google.common.collect Multimap asMap

Introduction

In this page you can find the example usage for com.google.common.collect Multimap asMap.

Prototype

Map<K, Collection<V>> asMap();

Source Link

Document

Returns a view of this multimap as a Map from each distinct key to the nonempty collection of that key's associated values.

Usage

From source file:org.cinchapi.concourse.server.storage.db.SearchRecord.java

/**
 * Return the Set of primary keys for records that match {@code query}.
 * /*from ww  w .  j  a  va2s . com*/
 * @param query
 * @return the Set of PrimaryKeys
 */
public Set<PrimaryKey> search(Text query) {
    read.lock();
    try {
        Multimap<PrimaryKey, Integer> reference = HashMultimap.create();
        String[] toks = query.toString().toLowerCase()
                .split(TStrings.REGEX_GROUP_OF_ONE_OR_MORE_WHITESPACE_CHARS);
        boolean initial = true;
        int offset = 0;
        for (String tok : toks) {
            Multimap<PrimaryKey, Integer> temp = HashMultimap.create();
            if (STOPWORDS.contains(tok)) {
                // When skipping a stop word, we must record an offset to
                // correctly determine if the next term match is in the
                // correct relative position to the previous term match
                offset++;
                continue;
            }
            Set<Position> positions = get(Text.wrap(tok));
            for (Position position : positions) {
                PrimaryKey key = position.getPrimaryKey();
                int pos = position.getIndex();
                if (initial) {
                    temp.put(key, pos);
                } else {
                    for (int current : reference.get(key)) {
                        if (pos == current + 1 + offset) {
                            temp.put(key, pos);
                        }
                    }
                }
            }
            initial = false;
            reference = temp;
            offset = 0;
        }

        // Result Scoring: Scoring is simply the number of times the query
        // appears in a document [e.g. the number of Positions mapped from
        // key: #reference.get(key).size()]. The total number of positions
        // in #reference is equal to the total number of times a document
        // appears in the corpus [e.g. reference.asMap().values().size()].
        Multimap<Integer, PrimaryKey> sorted = TreeMultimap.create(Collections.<Integer>reverseOrder(),
                PrimaryKey.Sorter.INSTANCE);
        for (Entry<PrimaryKey, Collection<Integer>> entry : reference.asMap().entrySet()) {
            sorted.put(entry.getValue().size(), entry.getKey());
        }
        return Sets.newLinkedHashSet(sorted.values());
    } finally {
        read.unlock();
    }
}

From source file:co.cask.cdap.data2.transaction.queue.hbase.HBaseConsumerStateStore.java

@Override
public void configureGroups(Iterable<? extends ConsumerGroupConfig> groupConfigs) {
    com.google.common.collect.Table<Long, Integer, byte[]> startRows = fetchAllStartRows();

    // Writes a new barrier info for all the groups
    byte[] startRow = QueueEntryRow.getQueueEntryRowKey(queueName, transaction.getWritePointer(), 0);
    Put put = new Put(Bytes.add(queueName.toBytes(), startRow));
    Set<Long> groupsIds = Sets.newHashSet();
    for (ConsumerGroupConfig groupConfig : groupConfigs) {
        long groupId = groupConfig.getGroupId();
        if (!groupsIds.add(groupId)) {
            throw new IllegalArgumentException("Same consumer group is provided multiple times");
        }// ww w .j av a 2  s  .c om
        put.add(Bytes.toBytes(groupId), GSON.toJson(groupConfig));

        // For new instance, set the start row to barrier start row
        for (int instanceId = 0; instanceId < groupConfig.getGroupSize(); instanceId++) {
            if (!startRows.contains(groupId, instanceId)) {
                table.put(queueName.toBytes(), getConsumerStateColumn(groupId, instanceId), startRow);
            }
        }
    }

    // Remove all states for groups that are removed.
    deleteRemovedGroups(table.get(queueName.toBytes()), groupsIds);

    // Remove all barriers for groups that are removed.
    // Also remove barriers that have all consumers consumed pass that barrier
    Scanner scanner = table.scan(barrierScanStartRow, barrierScanEndRow);
    // Multimap from groupId to barrier start rows. Ordering need to be maintained as the scan order.
    Multimap<Long, byte[]> deletes = LinkedHashMultimap.create();
    try {
        Row row = scanner.next();
        while (row != null) {
            deleteRemovedGroups(row, groupsIds);

            // Check all instances in all groups
            for (Map.Entry<byte[], byte[]> entry : row.getColumns().entrySet()) {
                QueueBarrier barrier = decodeBarrierInfo(row.getRow(), entry.getValue());
                if (barrier == null) {
                    continue;
                }
                long groupId = barrier.getGroupConfig().getGroupId();
                boolean delete = true;
                // Check if all instances in a group has consumed passed the current barrier
                for (int instanceId = 0; instanceId < barrier.getGroupConfig().getGroupSize(); instanceId++) {
                    byte[] consumerStartRow = startRows.get(groupId, instanceId);
                    if (consumerStartRow == null
                            || Bytes.compareTo(consumerStartRow, barrier.getStartRow()) < 0) {
                        delete = false;
                        break;
                    }
                }
                if (delete) {
                    deletes.put(groupId, row.getRow());
                }
            }
            row = scanner.next();
        }
    } finally {
        scanner.close();
    }

    // Remove barries that have all consumers consumed passed it
    for (Map.Entry<Long, Collection<byte[]>> entry : deletes.asMap().entrySet()) {
        // Retains the last barrier info
        if (entry.getValue().size() <= 1) {
            continue;
        }
        Deque<byte[]> rows = Lists.newLinkedList(entry.getValue());
        rows.removeLast();
        byte[] groupColumn = Bytes.toBytes(entry.getKey());
        for (byte[] rowKey : rows) {
            table.delete(rowKey, groupColumn);
        }
    }

    table.put(put);
}

From source file:eu.itesla_project.modules.topo.TopologyHistory.java

private boolean removeLowProbabilityPossibleTopologies(int iteration, double probabilityThreshold,
        Set<String> excludedTopoIds) {
    Multimap<String, String> removedPerMetaSubstation = HashMultimap.create();
    Map<String, Integer> topologyCountPerMetaSubstationBefore = new HashMap<>();
    for (TopologyChoice topologyChoice : topologyChoices) {
        // skip lowest probability topologies but skip one at least...
        Collections.sort(topologyChoice.getPossibleTopologies(), PossibleTopology.COMPARATOR);
        int count = topologyChoice.getPossibleTopologies().size();
        topologyCountPerMetaSubstationBefore.put(
                topologyChoice.getPossibleTopologies().iterator().next().getMetaSubstation().getId(), count);
        int removedCount = 0;
        for (Iterator<PossibleTopology> it = topologyChoice.getPossibleTopologies().iterator(); it.hasNext()
                && topologyChoice.getPossibleTopologies().size() > 1;) {
            PossibleTopology possibleTopology = it.next();
            if (possibleTopology.getProbability() < probabilityThreshold && removedCount < count - 1) {
                removedPerMetaSubstation.put(possibleTopology.getMetaSubstation().getId(),
                        possibleTopology.getTopoHash());
                it.remove();//from w  w  w. ja v a 2s  .c o  m
                excludedTopoIds.add(possibleTopology.getTopoHash());
                removedCount++;
            }
        }
        if (topologyChoice.getPossibleTopologies().isEmpty()) {
            throw new RuntimeException("Empty topo choice");
        }
    }
    int removedTopoCount = removedPerMetaSubstation.asMap().entrySet().stream()
            .mapToInt(e -> e.getValue().size()).sum();
    if (removedTopoCount > 0) {
        LOGGER.debug("Iteration {}: {} possible topologies removed because very low probability (< {})",
                iteration, removedTopoCount, probabilityThreshold);
        if (LOGGER.isTraceEnabled()) {
            for (Map.Entry<String, Collection<String>> entry : removedPerMetaSubstation.asMap().entrySet()) {
                String metaSubstationId = entry.getKey();
                LOGGER.trace(
                        "Iteration {}: remove {} possible topologies on {} of meta substation {} because of very low probability",
                        iteration, entry.getValue().size(),
                        topologyCountPerMetaSubstationBefore.get(metaSubstationId), metaSubstationId);
            }
        }
        return true;
    }
    return false;
}

From source file:com.facebook.presto.operator.scalar.SplitToMultimapFunction.java

@SqlType("map(varchar,array(varchar))")
public Block splitToMultimap(@TypeParameter("map(varchar,array(varchar))") Type mapType,
        @SqlType(StandardTypes.VARCHAR) Slice string, @SqlType(StandardTypes.VARCHAR) Slice entryDelimiter,
        @SqlType(StandardTypes.VARCHAR) Slice keyValueDelimiter) {
    checkCondition(entryDelimiter.length() > 0, INVALID_FUNCTION_ARGUMENT, "entryDelimiter is empty");
    checkCondition(keyValueDelimiter.length() > 0, INVALID_FUNCTION_ARGUMENT, "keyValueDelimiter is empty");
    checkCondition(!entryDelimiter.equals(keyValueDelimiter), INVALID_FUNCTION_ARGUMENT,
            "entryDelimiter and keyValueDelimiter must not be the same");

    Multimap<Slice, Slice> multimap = ArrayListMultimap.create();
    int entryStart = 0;
    while (entryStart < string.length()) {
        // Extract key-value pair based on current index
        // then add the pair if it can be split by keyValueDelimiter
        Slice keyValuePair;//  w w w . j  a  va  2s .  com
        int entryEnd = string.indexOf(entryDelimiter, entryStart);
        if (entryEnd >= 0) {
            keyValuePair = string.slice(entryStart, entryEnd - entryStart);
        } else {
            // The rest of the string is the last possible pair.
            keyValuePair = string.slice(entryStart, string.length() - entryStart);
        }

        int keyEnd = keyValuePair.indexOf(keyValueDelimiter);
        if (keyEnd < 0) {
            throw new PrestoException(INVALID_FUNCTION_ARGUMENT,
                    "Key-value delimiter must appear exactly once in each entry. Bad input: "
                            + keyValuePair.toStringUtf8());
        }

        int valueStart = keyEnd + keyValueDelimiter.length();
        Slice key = keyValuePair.slice(0, keyEnd);
        Slice value = keyValuePair.slice(valueStart, keyValuePair.length() - valueStart);
        if (value.indexOf(keyValueDelimiter) >= 0) {
            throw new PrestoException(INVALID_FUNCTION_ARGUMENT,
                    "Key-value delimiter must appear exactly once in each entry. Bad input: "
                            + keyValuePair.toStringUtf8());
        }

        multimap.put(key, value);

        if (entryEnd < 0) {
            // No more pairs to add
            break;
        }
        // Next possible pair is placed next to the current entryDelimiter
        entryStart = entryEnd + entryDelimiter.length();
    }

    if (pageBuilder.isFull()) {
        pageBuilder.reset();
    }

    pageBuilder.declarePosition();
    BlockBuilder blockBuilder = pageBuilder.getBlockBuilder(0);
    BlockBuilder singleMapBlockBuilder = blockBuilder.beginBlockEntry();
    for (Map.Entry<Slice, Collection<Slice>> entry : multimap.asMap().entrySet()) {
        VARCHAR.writeSlice(singleMapBlockBuilder, entry.getKey());
        Collection<Slice> values = entry.getValue();
        BlockBuilder valueBlockBuilder = singleMapBlockBuilder.beginBlockEntry();
        for (Slice value : values) {
            VARCHAR.writeSlice(valueBlockBuilder, value);
        }
        singleMapBlockBuilder.closeEntry();
    }
    blockBuilder.closeEntry();

    return (Block) mapType.getObject(blockBuilder, blockBuilder.getPositionCount() - 1);
}

From source file:org.apache.cassandra.service.StorageService.java

/**
 * Called when an endpoint is removed from the ring. This function checks
 * whether this node becomes responsible for new ranges as a
 * consequence and streams data if needed.
 *
 * This is rather ineffective, but it does not matter so much
 * since this is called very seldom/*  ww w.  j ava  2 s.  c o m*/
 *
 * @param endpoint the node that left
 */
private void restoreReplicaCount(InetAddress endpoint, final InetAddress notifyEndpoint) {
    final Multimap<InetAddress, String> fetchSources = HashMultimap.create();
    Multimap<String, Map.Entry<InetAddress, Collection<Range>>> rangesToFetch = HashMultimap.create();

    final InetAddress myAddress = FBUtilities.getLocalAddress();

    for (String table : DatabaseDescriptor.getNonSystemTables()) {
        Multimap<Range, InetAddress> changedRanges = getChangedRangesForLeaving(table, endpoint);
        Set<Range> myNewRanges = new HashSet<Range>();
        for (Map.Entry<Range, InetAddress> entry : changedRanges.entries()) {
            if (entry.getValue().equals(myAddress))
                myNewRanges.add(entry.getKey());
        }
        Multimap<InetAddress, Range> sourceRanges = getNewSourceRanges(table, myNewRanges);
        for (Map.Entry<InetAddress, Collection<Range>> entry : sourceRanges.asMap().entrySet()) {
            fetchSources.put(entry.getKey(), table);
            rangesToFetch.put(table, entry);
        }
    }

    for (final String table : rangesToFetch.keySet()) {
        for (Map.Entry<InetAddress, Collection<Range>> entry : rangesToFetch.get(table)) {
            final InetAddress source = entry.getKey();
            Collection<Range> ranges = entry.getValue();
            final Runnable callback = new Runnable() {
                public void run() {
                    synchronized (fetchSources) {
                        fetchSources.remove(source, table);
                        if (fetchSources.isEmpty())
                            sendReplicationNotification(myAddress, notifyEndpoint);
                    }
                }
            };
            if (logger_.isDebugEnabled())
                logger_.debug("Requesting from " + source + " ranges " + StringUtils.join(ranges, ", "));
            StreamIn.requestRanges(source, table, ranges, callback, OperationType.RESTORE_REPLICA_COUNT);
        }
    }
}

From source file:org.dllearner.reasoning.ClosedWorldReasoner.java

@SuppressWarnings("unchecked")
public SortedSet<OWLIndividual> getIndividualsImplFast(OWLClassExpression description)
        throws ReasoningMethodUnsupportedException {
    // policy: returned sets are clones, i.e. can be modified
    // (of course we only have to clone the leafs of a class OWLClassExpression tree)
    if (description.isOWLThing()) {
        return (TreeSet<OWLIndividual>) individuals.clone();
    } else if (description.isOWLNothing()) {
        return new TreeSet<>();
    } else if (!description.isAnonymous()) {
        if (classInstancesPos.containsKey(description.asOWLClass())) {
            return (TreeSet<OWLIndividual>) classInstancesPos.get(description).clone();
        } else {/* ww w  . j  ava 2s. co m*/
            return new TreeSet<>();
        }
    } else if (description instanceof OWLObjectComplementOf) {
        OWLClassExpression operand = ((OWLObjectComplementOf) description).getOperand();
        if (!operand.isAnonymous()) {
            if (isDefaultNegation()) {
                if (precomputeNegations) {
                    return (TreeSet<OWLIndividual>) classInstancesNeg.get(operand).clone();
                }
                SetView<OWLIndividual> diff = Sets.difference(individuals, classInstancesPos.get(operand));
                return new TreeSet<>(diff);
            } else {
                return (TreeSet<OWLIndividual>) classInstancesNeg.get(operand).clone();
            }
        }
        // implement retrieval as default negation
        return new TreeSet<>(Sets.difference(individuals, getIndividualsImpl(operand)));
    } else if (description instanceof OWLObjectUnionOf) {
        SortedSet<OWLIndividual> ret = new TreeSet<>();
        for (OWLClassExpression operand : ((OWLObjectUnionOf) description).getOperands()) {
            ret.addAll(getIndividualsImpl(operand));
        }
        return ret;
    } else if (description instanceof OWLObjectIntersectionOf) {
        Iterator<OWLClassExpression> iterator = ((OWLObjectIntersectionOf) description).getOperands()
                .iterator();
        // copy instances of first element and then subtract all others
        SortedSet<OWLIndividual> ret = getIndividualsImpl(iterator.next());
        while (iterator.hasNext()) {
            ret.retainAll(getIndividualsImpl(iterator.next()));
        }
        return ret;
    } else if (description instanceof OWLObjectSomeValuesFrom) {
        SortedSet<OWLIndividual> returnSet = new TreeSet<>();

        OWLObjectPropertyExpression property = ((OWLObjectSomeValuesFrom) description).getProperty();
        OWLClassExpression filler = ((OWLObjectSomeValuesFrom) description).getFiller();

        //get instances of filler concept
        SortedSet<OWLIndividual> targetSet = getIndividualsImpl(filler);

        // the mapping of instances related by r
        Map<OWLIndividual, ? extends Collection<OWLIndividual>> mapping = opPos
                .get(property.getNamedProperty());

        if (property.isAnonymous()) { // \exists r^{-1}.C
            // invert the mapping
            // get all objects that are related by r to (at least) one subject which is of type C
            Multimap<OWLIndividual, OWLIndividual> mappingInv = Multimaps.invertFrom(
                    MapUtils.createSortedMultiMap(opPos.get(property.getNamedProperty())),
                    TreeMultimap.<OWLIndividual, OWLIndividual>create());

            mapping = mappingInv.asMap();
        }

        // each individual is connected to a set of individuals via the property;
        // we loop through the complete mapping
        for (Entry<OWLIndividual, ? extends Collection<OWLIndividual>> entry : mapping.entrySet()) {
            Collection<OWLIndividual> inds = entry.getValue();
            for (OWLIndividual ind : inds) {
                if (targetSet.contains(ind)) {
                    returnSet.add(entry.getKey());
                    // once we found an individual, we do not need to check the others
                    break;
                }
            }
        }

        return returnSet;
    } else if (description instanceof OWLObjectAllValuesFrom) {
        // \forall restrictions are difficult to handle; assume we want to check
        // \forall hasChild.male with domain(hasChild)=Person; then for all non-persons
        // this is satisfied trivially (all of their non-existing children are male)
        //         if(!configurator.getForallRetrievalSemantics().equals("standard")) {
        //            throw new Error("Only forallExists semantics currently implemented.");
        //         }

        // problem: we need to make sure that \neg \exists r.\top \equiv \forall r.\bot
        // can still be reached in an algorithm (\forall r.\bot \equiv \bot under forallExists
        // semantics)
        OWLObjectPropertyExpression property = ((OWLObjectAllValuesFrom) description).getProperty();
        OWLClassExpression filler = ((OWLObjectAllValuesFrom) description).getFiller();

        // get instances of filler concept
        SortedSet<OWLIndividual> targetSet = getIndividualsImpl(filler);

        // the mapping of instances related by r
        Map<OWLIndividual, ? extends Collection<OWLIndividual>> mapping = opPos
                .get(property.getNamedProperty());

        if (property.isAnonymous()) { // \forall r^{-1}.C
            // invert the mapping
            // get all objects that are related by r to (at least) one subject which is of type C
            Multimap<OWLIndividual, OWLIndividual> mappingInv = Multimaps.invertFrom(
                    MapUtils.createSortedMultiMap(opPos.get(property.getNamedProperty())),
                    TreeMultimap.<OWLIndividual, OWLIndividual>create());

            mapping = mappingInv.asMap();
        }

        //         SortedSet<OWLIndividual> returnSet = new TreeSet<OWLIndividual>(mapping.keySet());
        SortedSet<OWLIndividual> returnSet = (SortedSet<OWLIndividual>) individuals.clone();

        // each individual is connected to a set of individuals via the property;
        // we loop through the complete mapping
        for (Entry<OWLIndividual, ? extends Collection<OWLIndividual>> entry : mapping.entrySet()) {
            Collection<OWLIndividual> inds = entry.getValue();
            for (OWLIndividual ind : inds) {
                if (!targetSet.contains(ind)) {
                    returnSet.remove(entry.getKey());
                    break;
                }
            }
        }
        return returnSet;
    } else if (description instanceof OWLObjectMinCardinality) {
        OWLObjectPropertyExpression property = ((OWLObjectMinCardinality) description).getProperty();
        OWLClassExpression filler = ((OWLObjectMinCardinality) description).getFiller();

        //get instances of filler concept
        SortedSet<OWLIndividual> targetSet = getIndividualsImpl(filler);

        // the mapping of instances related by r
        Map<OWLIndividual, ? extends Collection<OWLIndividual>> mapping = opPos
                .get(property.getNamedProperty());

        if (property.isAnonymous()) { // \forall r^{-1}.C
            // invert the mapping
            // get all objects that are related by r to (at least) one subject which is of type C
            Multimap<OWLIndividual, OWLIndividual> mappingInv = Multimaps.invertFrom(
                    MapUtils.createSortedMultiMap(opPos.get(property.getNamedProperty())),
                    TreeMultimap.<OWLIndividual, OWLIndividual>create());

            mapping = mappingInv.asMap();
        }

        SortedSet<OWLIndividual> returnSet = new TreeSet<>();

        int number = ((OWLObjectMinCardinality) description).getCardinality();

        for (Entry<OWLIndividual, ? extends Collection<OWLIndividual>> entry : mapping.entrySet()) {
            int nrOfFillers = 0;
            int index = 0;
            Collection<OWLIndividual> inds = entry.getValue();

            // we do not need to run tests if there are not sufficiently many fillers
            if (inds.size() < number) {
                continue;
            }

            for (OWLIndividual ind : inds) {
                // stop inner loop when nr of fillers is reached
                if (nrOfFillers >= number) {
                    returnSet.add(entry.getKey());
                    break;
                }
                // early abort when too many instance checks failed
                if (inds.size() - index < number) {
                    break;
                }
                if (targetSet.contains(ind)) {
                    nrOfFillers++;
                }
                index++;
            }
        }

        return returnSet;
    } else if (description instanceof OWLObjectMaxCardinality) {
        OWLObjectPropertyExpression property = ((OWLObjectMaxCardinality) description).getProperty();
        OWLClassExpression filler = ((OWLObjectMaxCardinality) description).getFiller();
        int number = ((OWLObjectMaxCardinality) description).getCardinality();

        //get instances of filler concept
        SortedSet<OWLIndividual> targetSet = getIndividualsImpl(filler);

        // the mapping of instances related by r
        Map<OWLIndividual, ? extends Collection<OWLIndividual>> mapping = opPos
                .get(property.getNamedProperty());

        if (property.isAnonymous()) { // \forall r^{-1}.C
            // invert the mapping
            // get all objects that are related by r to (at least) one subject which is of type C
            Multimap<OWLIndividual, OWLIndividual> mappingInv = Multimaps.invertFrom(
                    MapUtils.createSortedMultiMap(opPos.get(property.getNamedProperty())),
                    TreeMultimap.<OWLIndividual, OWLIndividual>create());

            mapping = mappingInv.asMap();
        }

        // initially all individuals are in the return set and we then remove those
        // with too many fillers
        SortedSet<OWLIndividual> returnSet = (SortedSet<OWLIndividual>) individuals.clone();

        for (Entry<OWLIndividual, ? extends Collection<OWLIndividual>> entry : mapping.entrySet()) {
            int nrOfFillers = 0;
            int index = 0;
            Collection<OWLIndividual> inds = entry.getValue();

            // we do not need to run tests if there are not sufficiently many fillers
            if (number < inds.size()) {
                returnSet.add(entry.getKey());
                continue;
            }

            for (OWLIndividual ind : inds) {
                // stop inner loop when nr of fillers is reached
                if (nrOfFillers >= number) {
                    break;
                }
                // early abort when too many instance are true already
                if (inds.size() - index < number) {
                    returnSet.add(entry.getKey());
                    break;
                }
                if (targetSet.contains(ind)) {
                    nrOfFillers++;
                }
                index++;
            }
        }

        return returnSet;
    } else if (description instanceof OWLObjectHasValue) {
        OWLObjectPropertyExpression property = ((OWLObjectHasValue) description).getProperty();
        OWLIndividual value = ((OWLObjectHasValue) description).getFiller();

        // the mapping of instances related by r
        Map<OWLIndividual, ? extends Collection<OWLIndividual>> mapping = opPos
                .get(property.getNamedProperty());

        if (property.isAnonymous()) { // \exists r^{-1}.{a}
            // invert the mapping
            // get all objects that are related by r to (at least) one subject which is of type C
            Multimap<OWLIndividual, OWLIndividual> mappingInv = Multimaps.invertFrom(
                    MapUtils.createSortedMultiMap(opPos.get(property.getNamedProperty())),
                    TreeMultimap.<OWLIndividual, OWLIndividual>create());

            mapping = mappingInv.asMap();
        }

        SortedSet<OWLIndividual> returnSet = new TreeSet<>();

        for (Entry<OWLIndividual, ? extends Collection<OWLIndividual>> entry : mapping.entrySet()) {
            if (entry.getValue().contains(value)) {
                returnSet.add(entry.getKey());
            }
        }
        return returnSet;
    } else if (description instanceof OWLDataSomeValuesFrom) {
        OWLDataPropertyExpression property = ((OWLDataSomeValuesFrom) description).getProperty();
        OWLDataRange filler = ((OWLDataSomeValuesFrom) description).getFiller();

        if (filler.isDatatype()) {
            //we assume that the values are of the given datatype
            return new TreeSet<>(dpPos.get(property).keySet());
            //            OWLDatatype dt = filler.asOWLDatatype();
            //            if(dt.isDouble()){
            //               return new TreeSet<OWLIndividual>(dd.get(property).keySet());
            //            } else if(dt.isInteger()){
            //               return new TreeSet<OWLIndividual>(id.get(property).keySet());
            //            } else if(dt.isBoolean()){
            //               return bdPos.get(property);
            //            }
        } else if (filler instanceof OWLDatatypeRestriction) {
            OWLDatatype datatype = ((OWLDatatypeRestriction) filler).getDatatype();
            Set<OWLFacetRestriction> facetRestrictions = ((OWLDatatypeRestriction) filler)
                    .getFacetRestrictions();

            if (OWLAPIUtils.floatDatatypes.contains(datatype)) {
                double min = -Double.MAX_VALUE;
                double max = Double.MAX_VALUE;
                for (OWLFacetRestriction facet : facetRestrictions) {
                    if (facet.getFacet() == OWLFacet.MIN_INCLUSIVE) {
                        min = Double.parseDouble(facet.getFacetValue().getLiteral());
                    } else if (facet.getFacet() == OWLFacet.MAX_INCLUSIVE) {
                        max = Double.parseDouble(facet.getFacetValue().getLiteral());
                    }
                }
                Map<OWLIndividual, SortedSet<Double>> mapping = dd.get(property);
                SortedSet<OWLIndividual> returnSet = new TreeSet<>();

                for (Entry<OWLIndividual, SortedSet<Double>> entry : mapping.entrySet()) {
                    //we can skip of largest number is below minimum or lowest number is above maximum
                    if (entry.getValue().last() < min || entry.getValue().first() > max) {
                        continue;
                    }

                    //search a value which is in the interval
                    for (Double value : entry.getValue()) {
                        if (value >= min && value <= max) {
                            returnSet.add(entry.getKey());
                            break;
                        }
                    }
                }
                return returnSet;
            } else if (OWLAPIUtils.intDatatypes.contains(datatype)) {
                int min = Integer.MIN_VALUE;
                int max = Integer.MAX_VALUE;
                for (OWLFacetRestriction facet : facetRestrictions) {
                    if (facet.getFacet() == OWLFacet.MIN_INCLUSIVE) {
                        min = facet.getFacetValue().parseInteger();
                    } else if (facet.getFacet() == OWLFacet.MAX_INCLUSIVE) {
                        max = facet.getFacetValue().parseInteger();
                    }
                }
                Map<OWLIndividual, SortedSet<Integer>> mapping = id.get(property);
                SortedSet<OWLIndividual> returnSet = new TreeSet<>();
                for (Entry<OWLIndividual, SortedSet<Integer>> entry : mapping.entrySet()) {
                    //we can skip of largest number is below minimum or lowest number is above maximum
                    if (entry.getValue().last() < min || entry.getValue().first() > max) {
                        continue;
                    }

                    //search a value which is in the interval
                    for (Integer value : entry.getValue()) {
                        if (value >= min && value <= max) {
                            returnSet.add(entry.getKey());
                            break;
                        }
                    }
                }
                return returnSet;
            } else if (OWLAPIUtils.dtDatatypes.contains(datatype)) {
                // TODO we cannot ensure the sorting, because OWL API does only String comparison
                // on the lexical String value
                OWLLiteral min = null;
                OWLLiteral max = null;
                for (OWLFacetRestriction facet : facetRestrictions) {
                    if (facet.getFacet() == OWLFacet.MIN_INCLUSIVE) {
                        min = facet.getFacetValue();
                    } else if (facet.getFacet() == OWLFacet.MAX_INCLUSIVE) {
                        max = facet.getFacetValue();
                    }
                }
                Map<OWLIndividual, SortedSet<OWLLiteral>> mapping = dpPos.get(property);
                // we can return false if largest number is below minimum or lowest number is above maximum
                DateTimeFormatter parser = OWLAPIUtils.dateTimeParsers.get(datatype);
                DateTime minDateTime = null;
                if (min != null) {
                    minDateTime = parser.parseDateTime(min.getLiteral());
                }
                DateTime maxDateTime = null;
                if (max != null) {
                    maxDateTime = parser.parseDateTime(max.getLiteral());
                }
                SortedSet<OWLIndividual> returnSet = new TreeSet<>();
                for (Entry<OWLIndividual, SortedSet<OWLLiteral>> entry : mapping.entrySet()) {
                    //search a value which is in the interval
                    for (OWLLiteral value : entry.getValue()) {
                        if (OWLAPIUtils.inRange(value, min, max)) {
                            returnSet.add(entry.getKey());
                        }
                    }
                }
                return returnSet;
            }
        } else if (filler.getDataRangeType() == DataRangeType.DATA_ONE_OF) {
            OWLDataOneOf dataOneOf = (OWLDataOneOf) filler;
            Set<OWLLiteral> values = dataOneOf.getValues();

            Map<OWLIndividual, SortedSet<OWLLiteral>> mapping = dpPos.get(property);
            SortedSet<OWLIndividual> returnSet = new TreeSet<>();

            for (Entry<OWLIndividual, SortedSet<OWLLiteral>> entry : mapping.entrySet()) {
                OWLIndividual ind = entry.getKey();
                SortedSet<OWLLiteral> indValues = entry.getValue();

                if (!Sets.intersection(values, indValues).isEmpty()) {
                    returnSet.add(ind);
                }
            }
            return returnSet;
        }
    } else if (description instanceof OWLDataHasValue) {
        OWLDataPropertyExpression property = ((OWLDataHasValue) description).getProperty();
        OWLLiteral value = ((OWLDataHasValue) description).getFiller();

        SortedSet<OWLIndividual> returnSet = new TreeSet<>();

        Map<OWLIndividual, SortedSet<OWLLiteral>> mapping = dpPos.get(property);

        for (Entry<OWLIndividual, SortedSet<OWLLiteral>> entry : mapping.entrySet()) {
            if (entry.getValue().contains(value)) {
                returnSet.add(entry.getKey());
            }
        }

        return returnSet;
    } else if (description instanceof OWLObjectOneOf) {
        return new TreeSet(((OWLObjectOneOf) description).getIndividuals());
    }

    throw new ReasoningMethodUnsupportedException(
            "Retrieval for class expression " + description + " unsupported.");

}

From source file:org.eclipse.xtext.util.formallang.PdaUtil.java

public <S, P, T, D extends Pda<S, P>> D expand(Pda<S, P> pda, Function<S, Pda<S, P>> expand,
        Function<S, T> tokens, PdaFactory<D, S, P, T> fact) {
    D result = fact.create(tokens.apply(pda.getStart()), tokens.apply(pda.getStop()));
    Identity<S> identity = new Identity<S>();
    Map<S, S> idstates = Maps.newIdentityHashMap();
    Multimap<S, S> followers = LinkedHashMultimap.create();
    for (S s_old : nfaUtil.collect(pda)) {
        S s_new = idstates.get(s_old);
        if (s_new == null) {
            Pda<S, P> sub = expand.apply(s_old);
            if (sub != null) {
                S s_start = identity.get(fact.createPush(result, tokens.apply(s_old)));
                S s_stop = identity.get(fact.createPop(result, tokens.apply(s_old)));
                idstates.put(s_old, s_start);
                idstates.put(sub.getStart(), s_start);
                idstates.put(sub.getStop(), s_stop);
                followers.putAll(s_start, sub.getFollowers(sub.getStart()));
                followers.putAll(s_stop, pda.getFollowers(s_old));
                for (S f_old : nfaUtil.collect(sub))
                    if (f_old != sub.getStart() && f_old != sub.getStop()) {
                        S f_new = idstates.get(f_old);
                        if (f_new == null) {
                            idstates.put(f_old, f_new = clone(f_old, sub, result, tokens, fact, identity));
                            followers.putAll(f_new, pda.getFollowers(f_old));
                        }/*  www  .jav  a  2 s.  co  m*/
                    }
            } else {
                idstates.put(s_old, s_new = clone(s_old, pda, result, tokens, fact, identity));
                followers.putAll(s_new, pda.getFollowers(s_old));
            }
        }
    }
    for (Map.Entry<S, Collection<S>> entry : followers.asMap().entrySet()) {
        Set<S> f = Sets.newLinkedHashSet();
        for (S s : entry.getValue())
            f.add(idstates.get(s));
        fact.setFollowers(result, entry.getKey(), f);
    }
    return result;
}

From source file:org.apache.calcite.plan.RelOptUtil.java

/**
 * Returns a list of all tables used by this expression or its children
 *//*from   w  w w.  ja  v  a2  s  .  c om*/
public static List<RelOptTable> findAllTables(RelNode rel) {
    final Multimap<Class<? extends RelNode>, RelNode> nodes = RelMetadataQuery.instance().getNodeTypes(rel);
    final List<RelOptTable> usedTables = new ArrayList<>();
    for (Entry<Class<? extends RelNode>, Collection<RelNode>> e : nodes.asMap().entrySet()) {
        if (TableScan.class.isAssignableFrom(e.getKey())) {
            for (RelNode node : e.getValue()) {
                usedTables.add(node.getTable());
            }
        }
    }
    return usedTables;
}

From source file:org.apache.druid.java.util.http.client.NettyHttpClient.java

@Override
public <Intermediate, Final> ListenableFuture<Final> go(final Request request,
        final HttpResponseHandler<Intermediate, Final> handler, final Duration requestReadTimeout) {
    final HttpMethod method = request.getMethod();
    final URL url = request.getUrl();
    final Multimap<String, String> headers = request.getHeaders();

    final String requestDesc = StringUtils.format("%s %s", method, url);
    if (log.isDebugEnabled()) {
        log.debug("[%s] starting", requestDesc);
    }//from w  w  w. j  a v  a 2s  .  c o m

    // Block while acquiring a channel from the pool, then complete the request asynchronously.
    final Channel channel;
    final String hostKey = getPoolKey(url);
    final ResourceContainer<ChannelFuture> channelResourceContainer = pool.take(hostKey);
    final ChannelFuture channelFuture = channelResourceContainer.get().awaitUninterruptibly();
    if (!channelFuture.isSuccess()) {
        channelResourceContainer.returnResource(); // Some other poor sap will have to deal with it...
        return Futures.immediateFailedFuture(
                new ChannelException("Faulty channel in resource pool", channelFuture.getCause()));
    } else {
        channel = channelFuture.getChannel();

        // In case we get a channel that never had its readability turned back on.
        channel.setReadable(true);
    }
    final String urlFile = StringUtils.nullToEmptyNonDruidDataString(url.getFile());
    final HttpRequest httpRequest = new DefaultHttpRequest(HttpVersion.HTTP_1_1, method,
            urlFile.isEmpty() ? "/" : urlFile);

    if (!headers.containsKey(HttpHeaders.Names.HOST)) {
        httpRequest.headers().add(HttpHeaders.Names.HOST, getHost(url));
    }

    // If Accept-Encoding is set in the Request, use that. Otherwise use the default from "compressionCodec".
    if (!headers.containsKey(HttpHeaders.Names.ACCEPT_ENCODING)) {
        httpRequest.headers().set(HttpHeaders.Names.ACCEPT_ENCODING, compressionCodec.getEncodingString());
    }

    for (Map.Entry<String, Collection<String>> entry : headers.asMap().entrySet()) {
        String key = entry.getKey();

        for (String obj : entry.getValue()) {
            httpRequest.headers().add(key, obj);
        }
    }

    if (request.hasContent()) {
        httpRequest.setContent(request.getContent());
    }

    final long readTimeout = getReadTimeout(requestReadTimeout);
    final SettableFuture<Final> retVal = SettableFuture.create();

    if (readTimeout > 0) {
        channel.getPipeline().addLast(READ_TIMEOUT_HANDLER_NAME,
                new ReadTimeoutHandler(timer, readTimeout, TimeUnit.MILLISECONDS));
    }

    channel.getPipeline().addLast(LAST_HANDLER_NAME, new SimpleChannelUpstreamHandler() {
        private volatile ClientResponse<Intermediate> response = null;

        // Chunk number most recently assigned.
        private long currentChunkNum = 0;

        // Suspend and resume watermarks (respectively: last chunk number that triggered a suspend, and that was
        // provided to the TrafficCop's resume method). Synchronized access since they are not always accessed
        // from an I/O thread. (TrafficCops can be called from any thread.)
        private final Object watermarkLock = new Object();
        private long suspendWatermark = -1;
        private long resumeWatermark = -1;

        @Override
        public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) {
            if (log.isDebugEnabled()) {
                log.debug("[%s] messageReceived: %s", requestDesc, e.getMessage());
            }
            try {
                Object msg = e.getMessage();

                if (msg instanceof HttpResponse) {
                    HttpResponse httpResponse = (HttpResponse) msg;
                    if (log.isDebugEnabled()) {
                        log.debug("[%s] Got response: %s", requestDesc, httpResponse.getStatus());
                    }

                    HttpResponseHandler.TrafficCop trafficCop = resumeChunkNum -> {
                        synchronized (watermarkLock) {
                            resumeWatermark = Math.max(resumeWatermark, resumeChunkNum);

                            if (suspendWatermark >= 0 && resumeWatermark >= suspendWatermark) {
                                suspendWatermark = -1;
                                channel.setReadable(true);
                                long backPressureDuration = System.nanoTime() - backPressureStartTimeNs;
                                log.debug("[%s] Resumed reads from channel (chunkNum = %,d).", requestDesc,
                                        resumeChunkNum);
                                return backPressureDuration;
                            }
                        }

                        return 0; //If we didn't resume, don't know if backpressure was happening
                    };
                    response = handler.handleResponse(httpResponse, trafficCop);
                    if (response.isFinished()) {
                        retVal.set((Final) response.getObj());
                    }

                    assert currentChunkNum == 0;
                    possiblySuspendReads(response);

                    if (!httpResponse.isChunked()) {
                        finishRequest();
                    }
                } else if (msg instanceof HttpChunk) {
                    HttpChunk httpChunk = (HttpChunk) msg;
                    if (log.isDebugEnabled()) {
                        log.debug("[%s] Got chunk: %sB, last=%s", requestDesc,
                                httpChunk.getContent().readableBytes(), httpChunk.isLast());
                    }

                    if (httpChunk.isLast()) {
                        finishRequest();
                    } else {
                        response = handler.handleChunk(response, httpChunk, ++currentChunkNum);
                        if (response.isFinished() && !retVal.isDone()) {
                            retVal.set((Final) response.getObj());
                        }
                        possiblySuspendReads(response);
                    }
                } else {
                    throw new IllegalStateException(
                            StringUtils.format("Unknown message type[%s]", msg.getClass()));
                }
            } catch (Exception ex) {
                log.warn(ex, "[%s] Exception thrown while processing message, closing channel.", requestDesc);

                if (!retVal.isDone()) {
                    retVal.set(null);
                }
                channel.close();
                channelResourceContainer.returnResource();

                throw ex;
            }
        }

        private void possiblySuspendReads(ClientResponse<?> response) {
            if (!response.isContinueReading()) {
                synchronized (watermarkLock) {
                    suspendWatermark = Math.max(suspendWatermark, currentChunkNum);
                    if (suspendWatermark > resumeWatermark) {
                        channel.setReadable(false);
                        backPressureStartTimeNs = System.nanoTime();
                        log.debug("[%s] Suspended reads from channel (chunkNum = %,d).", requestDesc,
                                currentChunkNum);
                    }
                }
            }
        }

        private void finishRequest() {
            ClientResponse<Final> finalResponse = handler.done(response);

            if (!finalResponse.isFinished() || !finalResponse.isContinueReading()) {
                throw new ISE(
                        "[%s] Didn't get a completed ClientResponse Object from [%s] (finished = %s, continueReading = %s)",
                        requestDesc, handler.getClass(), finalResponse.isFinished(),
                        finalResponse.isContinueReading());
            }
            if (!retVal.isDone()) {
                retVal.set(finalResponse.getObj());
            }
            removeHandlers();
            channel.setReadable(true);
            channelResourceContainer.returnResource();
        }

        @Override
        public void exceptionCaught(ChannelHandlerContext context, ExceptionEvent event) {
            if (log.isDebugEnabled()) {
                final Throwable cause = event.getCause();
                if (cause == null) {
                    log.debug("[%s] Caught exception", requestDesc);
                } else {
                    log.debug(cause, "[%s] Caught exception", requestDesc);
                }
            }

            retVal.setException(event.getCause());
            // response is non-null if we received initial chunk and then exception occurs
            if (response != null) {
                handler.exceptionCaught(response, event.getCause());
            }
            try {
                if (channel.isOpen()) {
                    channel.close();
                }
            } catch (Exception e) {
                log.warn(e, "Error while closing channel");
            } finally {
                channelResourceContainer.returnResource();
            }
        }

        @Override
        public void channelDisconnected(ChannelHandlerContext context, ChannelStateEvent event) {
            if (log.isDebugEnabled()) {
                log.debug("[%s] Channel disconnected", requestDesc);
            }
            // response is non-null if we received initial chunk and then exception occurs
            if (response != null) {
                handler.exceptionCaught(response, new ChannelException("Channel disconnected"));
            }
            channel.close();
            channelResourceContainer.returnResource();
            if (!retVal.isDone()) {
                log.warn("[%s] Channel disconnected before response complete", requestDesc);
                retVal.setException(new ChannelException("Channel disconnected"));
            }
        }

        private void removeHandlers() {
            if (readTimeout > 0) {
                channel.getPipeline().remove(READ_TIMEOUT_HANDLER_NAME);
            }
            channel.getPipeline().remove(LAST_HANDLER_NAME);
        }
    });

    channel.write(httpRequest).addListener(new ChannelFutureListener() {
        @Override
        public void operationComplete(ChannelFuture future) {
            if (!future.isSuccess()) {
                channel.close();
                channelResourceContainer.returnResource();
                if (!retVal.isDone()) {
                    retVal.setException(new ChannelException(
                            StringUtils.format("[%s] Failed to write request to channel", requestDesc),
                            future.getCause()));
                }
            }
        }
    });

    return retVal;
}

From source file:com.palantir.atlasdb.keyvalue.cassandra.CassandraClientPool.java

private void sanityCheckRingConsistency() {
    Multimap<Set<TokenRange>, InetSocketAddress> tokenRangesToHost = HashMultimap.create();
    for (InetSocketAddress host : currentPools.keySet()) {
        Cassandra.Client client = null;// w  w w .  j  a v  a  2 s .  com
        try {
            client = CassandraClientFactory.getClientInternal(host, config.ssl(), config.socketTimeoutMillis(),
                    config.socketQueryTimeoutMillis());
            try {
                client.describe_keyspace(config.keyspace());
            } catch (NotFoundException e) {
                return; // don't care to check for ring consistency when we're not even fully initialized
            }
            tokenRangesToHost.put(ImmutableSet.copyOf(client.describe_ring(config.keyspace())), host);
        } catch (Exception e) {
            log.warn("failed to get ring info from host: {}", host, e);
        } finally {
            if (client != null) {
                client.getOutputProtocol().getTransport().close();
            }
        }

        if (tokenRangesToHost.isEmpty()) {
            log.warn(
                    "Failed to get ring info for entire Cassandra cluster ({}); ring could not be checked for consistency.",
                    config.keyspace());
            return;
        }

        if (tokenRangesToHost.keySet().size() == 1) { // all nodes agree on a consistent view of the cluster. Good.
            return;
        }

        RuntimeException e = new IllegalStateException(
                "Hosts have differing ring descriptions.  This can lead to inconsistent reads and lost data. ");
        log.error("QA-86204 " + e.getMessage() + tokenRangesToHost, e);

        // provide some easier to grok logging for the two most common cases
        if (tokenRangesToHost.size() > 2) {
            for (Map.Entry<Set<TokenRange>, Collection<InetSocketAddress>> entry : tokenRangesToHost.asMap()
                    .entrySet()) {
                if (entry.getValue().size() == 1) {
                    log.error("Host: " + entry.getValue().iterator().next()
                            + " disagrees with the other nodes about the ring state.");
                }
            }
        }
        if (tokenRangesToHost.keySet().size() == 2) {
            ImmutableList<Set<TokenRange>> sets = ImmutableList.copyOf(tokenRangesToHost.keySet());
            Set<TokenRange> set1 = sets.get(0);
            Set<TokenRange> set2 = sets.get(1);
            log.error("Hosts are split.  group1: " + tokenRangesToHost.get(set1) + " group2: "
                    + tokenRangesToHost.get(set2));
        }

        CassandraVerifier.logErrorOrThrow(e.getMessage(), config.safetyDisabled());
    }
}