Example usage for com.google.common.collect Multimap containsKey

List of usage examples for com.google.common.collect Multimap containsKey

Introduction

In this page you can find the example usage for com.google.common.collect Multimap containsKey.

Prototype

boolean containsKey(@Nullable Object key);

Source Link

Document

Returns true if this multimap contains at least one key-value pair with the key key .

Usage

From source file:eu.itesla_project.modules.topo.TopologyHistory.java

private boolean fixBranchesAlwaysDisconnectAtOneSide(int iteration, Network network) {
    // avoid branches always disconnected at one side => connect it to an isolated bus on disconnected side
    Multimap<String, String> branch2substations = HashMultimap.create();
    for (TopologyChoice topologyChoice : topologyChoices) {
        for (PossibleTopology possibleTopology : topologyChoice.getPossibleTopologies()) {
            for (PossibleTopology.Substation substation : possibleTopology.getMetaSubstation()
                    .getSubstations()) {
                for (PossibleTopology.Bus bus : substation.getBuses()) {
                    for (PossibleTopology.Equipment eq : bus.getEquipments()) {
                        if (eq.isBranch(false)) {
                            branch2substations.put(eq.getId(), substation.getId());
                        }/*from  w  w w  .  ja  v a 2  s  . c om*/
                    }
                }
            }
        }
    }
    Set<String> branchesAlwaysDisconnectedAtOneSide = new HashSet<>();
    Multimap<String, String> substation2branches = HashMultimap.create();
    for (Map.Entry<String, Collection<String>> entry : branch2substations.asMap().entrySet()) {
        String branchId = entry.getKey();
        if (entry.getValue().size() == 1) {
            String substationId = entry.getValue().iterator().next();
            TwoTerminalsConnectable branch = network.getLine(branchId);
            if (branch == null) {
                branch = network.getTwoWindingsTransformer(branchId);
            }
            if (branch == null) {
                throw new RuntimeException();
            }
            if (branch.getTerminal1().getVoltageLevel() != branch.getTerminal2().getVoltageLevel()) {
                String otherSubstationId;
                if (branch.getTerminal1().getVoltageLevel().getId().equals(substationId)) {
                    otherSubstationId = branch.getTerminal2().getVoltageLevel().getId();
                } else if (branch.getTerminal2().getVoltageLevel().getId().equals(substationId)) {
                    otherSubstationId = branch.getTerminal1().getVoltageLevel().getId();
                } else {
                    throw new RuntimeException();
                }
                substation2branches.put(otherSubstationId, branchId);
                branchesAlwaysDisconnectedAtOneSide.add(branchId);
            }
        }
    }

    for (TopologyChoice topologyChoice : topologyChoices) {
        for (PossibleTopology possibleTopology : topologyChoice.getPossibleTopologies()) {
            for (PossibleTopology.Substation substation : possibleTopology.getMetaSubstation()
                    .getSubstations()) {
                if (substation2branches.containsKey(substation.getId())) {
                    VoltageLevel vl = network.getVoltageLevel(substation.getId());
                    for (String branchId : substation2branches.asMap().get(substation.getId())) {
                        PossibleTopology.Equipment eq = new PossibleTopology.Equipment(branchId);
                        Connectable obj = vl.getConnectable(eq.getId(), Connectable.class);
                        eq.setType(obj.getType());
                        substation.getBuses().add(new PossibleTopology.Bus(eq));
                    }
                }
            }
        }
    }
    if (branchesAlwaysDisconnectedAtOneSide.size() > 0) {
        LOGGER.debug(
                "Iteration {}: {} branches are always disconnected at one side, a fictive bus (isolated) has been added to disconnected side",
                iteration, branchesAlwaysDisconnectedAtOneSide.size());
        LOGGER.trace("Iteration {}: detailed list of branches always disconnected at one side: {}", iteration,
                branchesAlwaysDisconnectedAtOneSide);
        return true;
    }
    return false;
}

From source file:com.b2international.snowowl.datastore.server.snomed.merge.rules.SnomedDonatedComponentResolverRule.java

@Override
public Collection<MergeConflict> validate(final CDOTransaction transaction) {

    Stopwatch stopwatch = Stopwatch.createStarted();

    // XXX This is important to avoid ObjectNotFoundExceptions due to the removal of extension concepts
    transaction.options().setStaleReferencePolicy(CDOStaleReferencePolicy.PROXY);

    Map<CDOID, Component> newComponentsMap = StreamSupport
            .stream(ComponentUtils2.getNewObjects(transaction, Component.class).spliterator(), false)
            .collect(toMap(CDOObject::cdoID, Function.identity()));

    Iterable<Relationship> allNewAndDirtyRelationships = Iterables.concat(
            ComponentUtils2.getNewObjects(transaction, Relationship.class),
            ComponentUtils2.getDirtyObjects(transaction, Relationship.class));

    Multimap<CDOID, Relationship> destinationToRelationshipsMap = HashMultimap.create();

    StreamSupport.stream(allNewAndDirtyRelationships.spliterator(), false)
            .filter(r -> !newDonatedComponents.containsKey(r.getSource().cdoID())
                    && newDonatedComponents.containsKey(r.getDestination().cdoID()))
            .forEach(r -> destinationToRelationshipsMap.put(r.getDestination().cdoID(), r));

    for (final Entry<CDOID, CDOID> entry : newDonatedComponents.entrySet()) {

        final CDOID sourceCDOID = entry.getKey();
        final CDOID targetCDOID = entry.getValue();

        final Optional<CDOObject> sourceComponent = Optional.ofNullable(newComponentsMap.get(sourceCDOID));
        final Optional<CDOObject> targetComponent = Optional
                .ofNullable(CDOUtils.getObjectIfExists(transaction, targetCDOID));

        if (sourceComponent.isPresent() && targetComponent.isPresent()) {

            if (sourceComponent.get() instanceof Concept && targetComponent.get() instanceof Concept) {

                final Concept extensionConcept = (Concept) sourceComponent.get();
                final Concept donatedConcept = (Concept) targetComponent.get();

                LOGGER.info(">>> Processing donated concept with id '{}'", donatedConcept.getId());

                unfreezeRevision(extensionConcept);

                final List<Description> additionalExtensionDescriptions = extensionConcept.getDescriptions()
                        .stream()//from w  w  w . j  a  v a  2 s. co  m
                        .filter(extension -> !donatedConcept.getDescriptions().stream()
                                .anyMatch(donated -> donated.getId().equals(extension.getId())))
                        .collect(toList());

                final List<Relationship> additionalExtensionRelationships = extensionConcept
                        .getOutboundRelationships().stream()
                        .filter(extension -> !donatedConcept.getOutboundRelationships().stream()
                                .anyMatch(donated -> donated.getId().equals(extension.getId())))
                        .collect(toList());

                // association refset members?
                // inactivation indicator refset members?
                // concrete domain refset members?

                // handle inbound relationships
                if (destinationToRelationshipsMap.containsKey(extensionConcept.cdoID())) {

                    Collection<Relationship> inboundRelationships = destinationToRelationshipsMap
                            .get(extensionConcept.cdoID());

                    for (Relationship relationship : inboundRelationships) {

                        Concept relationshipSourceConcept = relationship.getSource();

                        LOGGER.info("Replacing inbound reference from '{}' to '{}' with id '{}'",
                                relationshipSourceConcept.getId(), donatedConcept.getId(),
                                relationship.getId());

                        unfreezeRevision(relationshipSourceConcept);
                        relationship.setDestination(donatedConcept);

                    }

                }

                EcoreUtil.remove(extensionConcept);

                for (final Description extensionDescription : additionalExtensionDescriptions) {
                    LOGGER.info("Adding extension description to the donated version '{}' - '{}'",
                            extensionDescription.getId(), extensionDescription.getTerm());
                    donatedConcept.getDescriptions().add(extensionDescription);
                }

                for (final Relationship extensionRelationship : additionalExtensionRelationships) {

                    if (newDonatedComponents.containsKey(extensionRelationship.getDestination().cdoID())) {

                        Optional<Concept> newDestinationConcept = Optional
                                .ofNullable(CDOUtils.getObjectIfExists(transaction, newDonatedComponents
                                        .get(extensionRelationship.getDestination().cdoID())));

                        if (newDestinationConcept.isPresent()) {

                            LOGGER.info("Replacing outbound reference from '{}' to '{}' with id '{}'",
                                    donatedConcept.getId(), newDestinationConcept.get().getId(),
                                    extensionRelationship.getId());

                            extensionRelationship.setDestination(newDestinationConcept.get());
                        }
                    }

                    LOGGER.info("Adding extension relationship to the donated version with id '{}'",
                            extensionRelationship.getId());

                    donatedConcept.getOutboundRelationships().add(extensionRelationship);
                }

                LOGGER.info("<<< Processed donated concept with id '{}'", extensionConcept.getId());

            } else if (sourceComponent.get() instanceof Description
                    && targetComponent.get() instanceof Description) {

                final Description extensionDescription = (Description) sourceComponent.get();
                final Description donatedDescription = (Description) targetComponent.get();

                LOGGER.info(">>> Processing donated description with id '{}'", extensionDescription.getId());

                EcoreUtil.remove(extensionDescription);

                // association refset members?
                // inactivation indicator refset members?

                donatedDescription.getLanguageRefSetMembers()
                        .addAll(extensionDescription.getLanguageRefSetMembers());

                LOGGER.info("<<< Processed donated description with id '{}'", extensionDescription.getId());

            } else if (sourceComponent.get() instanceof Relationship
                    && targetComponent.get() instanceof Relationship) {

                final Relationship sourceRelationship = (Relationship) sourceComponent.get();

                LOGGER.info(">>> Processing donated relationship with id '{}'", sourceRelationship.getId());

                EcoreUtil.remove(sourceRelationship);

                // concrete domain members?

                LOGGER.info("<<< Processed donated relationship with id '{}'", sourceRelationship.getId());
            }

        }

    }

    for (final CDOID id : changedDonatedComponents) {

        final Optional<CDOObject> object = Optional.ofNullable(CDOUtils.getObjectIfExists(transaction, id));

        if (object.isPresent()) {

            if (object.get() instanceof Component) {

                final Component component = (Component) object.get();

                transaction.getLastSavepoint().getDirtyObjects().remove(id);
                transaction.getLastSavepoint().getRevisionDeltas().remove(id);

                LOGGER.info("Keeping latest ({}) version of donated component '{}' with id '{}'",
                        EffectiveTimes.format(component.getEffectiveTime()), component.eClass().getName(),
                        component.getId());
            }
        }
    }

    LOGGER.info("Donated component resolution finished in {}", TimeUtil.toString(stopwatch));

    return emptySet();
}

From source file:org.eclipse.xtend.core.validation.XtendValidator.java

protected void checkDispatchNonDispatchConflict(XtendClass clazz,
        Multimap<DispatchHelper.DispatchSignature, JvmOperation> dispatchMethods) {
    if (isIgnored(DISPATCH_PLAIN_FUNCTION_NAME_CLASH)) {
        return;/*from  ww w.  j  a  v a2s  .  com*/
    }
    Multimap<DispatchHelper.DispatchSignature, XtendFunction> nonDispatchMethods = HashMultimap.create();
    for (XtendFunction method : filter(clazz.getMembers(), XtendFunction.class)) {
        if (!method.isDispatch()) {
            nonDispatchMethods.put(
                    new DispatchHelper.DispatchSignature(method.getName(), method.getParameters().size()),
                    method);
        }
    }
    for (DispatchHelper.DispatchSignature dispatchSignature : dispatchMethods.keySet()) {
        if (nonDispatchMethods.containsKey(dispatchSignature)) {
            for (XtendFunction function : nonDispatchMethods.get(dispatchSignature))
                addIssue("Non-dispatch method has same name and number of parameters as dispatch method",
                        function, XTEND_FUNCTION__NAME, DISPATCH_PLAIN_FUNCTION_NAME_CLASH);
            for (JvmOperation operation : dispatchMethods.get(dispatchSignature))
                addIssue("Dispatch method has same name and number of parameters as non-dispatch method",
                        associations.getXtendFunction(operation), XTEND_FUNCTION__NAME,
                        DISPATCH_PLAIN_FUNCTION_NAME_CLASH);
        }
    }
}

From source file:no.ssb.vtl.test.junit.GrammarRule.java

/**
 * Parse an expression starting from the given <b>ANTLR rule</b>
 * <p>//from www  . j a v  a  2 s  .c om
 * In order to get the Rule, use the {@link #withRule(String)} method.
 *
 * @param expression the expression to parse.
 * @param rule       the rule to start from.
 *                   @param diagnostic {@link DiagnosticErrorListener} will be used if true.
 * @return the resulting parse tree.
 * @throws Exception if the expression failed to parse.
 */
public ParserRuleContext parse(String expression, Rule rule, boolean diagnostic) throws Exception {
    Multimap<Integer, String> messages = LinkedListMultimap.create();

    LexerInterpreter lexerInterpreter = grammar.createLexerInterpreter(new ANTLRInputStream(expression));
    GrammarParserInterpreter parserInterpreter = grammar
            .createGrammarParserInterpreter(new CommonTokenStream(lexerInterpreter));

    BaseErrorListener errorListener;
    if (diagnostic) {
        errorListener = new DiagnosticErrorListener();
    } else {
        errorListener = new ConsoleErrorListener();
    }

    BaseErrorListener ruleErrorReporter = new BaseErrorListener() {
        @Override
        public void syntaxError(Recognizer<?, ?> recognizer, Object offendingSymbol, int line,
                int charPositionInLine, String msg, org.antlr.v4.runtime.RecognitionException e) {
            int startLine = line, stopLine = line;
            int startColumn = charPositionInLine, stopColumn = charPositionInLine;
            if (offendingSymbol instanceof Token) {
                Token symbol = (Token) offendingSymbol;
                int start = symbol.getStartIndex();
                int stop = symbol.getStopIndex();
                if (start >= 0 && stop >= 0) {
                    stopColumn = startColumn + (stop - start) + 1;
                }
            }

            messages.put(stopLine,
                    String.format("at [%4s:%6s]:\t%s (%s)\n", String.format("%d,%d", startLine, stopLine),
                            String.format("%d,%d", startColumn, stopColumn), msg,
                            Optional.ofNullable(e).map(ex -> ex.getClass().getSimpleName()).orElse("null")));
        }
    };

    parserInterpreter.setErrorHandler(new GrammarParserInterpreter.BailButConsumeErrorStrategy());
    lexerInterpreter.removeErrorListeners();
    parserInterpreter.removeErrorListeners();

    lexerInterpreter.addErrorListener(errorListener);
    parserInterpreter.addErrorListener(errorListener);
    lexerInterpreter.addErrorListener(ruleErrorReporter);
    parserInterpreter.addErrorListener(ruleErrorReporter);

    ParserRuleContext parse = parserInterpreter.parse(rule.index);

    if (!messages.isEmpty()) {

        StringBuilder expressionWithErrors = new StringBuilder();
        LineNumberReader expressionReader = new LineNumberReader(new StringReader(expression));
        String line;
        while ((line = expressionReader.readLine()) != null) {
            int lineNumber = expressionReader.getLineNumber();
            expressionWithErrors.append(String.format("\t%d:%s%n", lineNumber, line));
            if (messages.containsKey(lineNumber)) {
                expressionWithErrors.append(String.format("%n"));
                for (String message : messages.get(lineNumber)) {
                    expressionWithErrors.append(message);
                }
            }
        }
        throw new Exception(
                String.format("errors parsing expression:%n%n%s%n", expressionWithErrors.toString()));
    }

    return parse;
}

From source file:org.apache.accumulo.examples.wikisearch.parser.FieldIndexQueryReWriter.java

private RewriterTreeNode orNormalizedTerms(RewriterTreeNode myroot, Multimap<String, String> indexedTerms)
        throws Exception {
    // we have multimap of FieldName to multiple FieldValues
    if (indexedTerms.isEmpty()) {
        throw new Exception("indexed Terms empty");
    }/*from  w ww.  j a  v a  2s.  com*/
    try {
        // NOTE: doing a depth first enumeration didn't work when I started
        // removing nodes halfway through. The following method does work,
        // it's essentially a reverse breadth first traversal.
        List<RewriterTreeNode> nodes = new ArrayList<RewriterTreeNode>();
        Enumeration<?> bfe = myroot.breadthFirstEnumeration();

        while (bfe.hasMoreElements()) {
            RewriterTreeNode node = (RewriterTreeNode) bfe.nextElement();
            nodes.add(node);
        }

        // walk backwards
        for (int i = nodes.size() - 1; i >= 0; i--) {
            RewriterTreeNode node = nodes.get(i);
            if (log.isDebugEnabled()) {
                log.debug("orNormalizedTerms, analyzing node: " + node.toString() + "  " + node.printNode());
            }
            if (node.getType() == ParserTreeConstants.JJTANDNODE
                    || node.getType() == ParserTreeConstants.JJTORNODE) {
                continue;
            } else if (node.getType() == ParserTreeConstants.JJTJEXLSCRIPT) {
                if (node.getChildCount() == 0) {
                    if (log.isDebugEnabled()) {
                        log.debug("orNormalizedTerms: Head node has no children!");
                    }
                    throw new Exception(); // Head node has no children.
                }
            } else {
                if (log.isDebugEnabled()) {
                    log.debug("Testing data location: " + node.getFieldName());
                }
                String fName = node.getFieldName().toString();
                String fValue = node.getFieldValue().toString();
                if (indexedTerms.containsKey(fName + ":" + fValue)) {

                    if (indexedTerms.get(fName + ":" + fValue).size() > 1) {
                        // Replace node with an OR, and make children from the multimap collection
                        node.setType(ParserTreeConstants.JJTORNODE);
                        boolean neg = node.isNegated();
                        node.setNegated(false);
                        node.setFieldName(null);
                        node.setFieldValue(null);
                        Collection<String> values = indexedTerms.get(fName + ":" + fValue);
                        for (String value : values) {
                            RewriterTreeNode n = new RewriterTreeNode(ParserTreeConstants.JJTEQNODE, fName,
                                    value, neg);
                            node.add(n);
                        }
                    } else if (indexedTerms.get(fName + ":" + fValue).size() == 1) {
                        // Straight replace
                        Collection<String> values = indexedTerms.get(fName + ":" + fValue);
                        for (String val : values) {
                            // should only be 1
                            node.setFieldValue(val);
                        }
                    }

                } else {
                    // throw new Exception("orNormalizedTerms, encountered a non-indexed term: " + node.getFieldName().toString());
                }
            }
        }
    } catch (Exception e) {
        log.debug("Caught exception in orNormalizedTerms(): " + e);
        throw new Exception("exception in: orNormalizedTerms");
    }

    return myroot;
}

From source file:org.apache.accumulo.examples.wikisearch.parser.RangeCalculator.java

/**
 * //  w  w w  .  ja va2  s. c o  m
 * @param c
 * @param auths
 * @param indexedTerms
 * @param terms
 * @param query
 * @param logic
 * @param typeFilter
 * @throws ParseException
 */
public void execute(Connector c, Authorizations auths, Multimap<String, Normalizer> indexedTerms,
        Multimap<String, QueryTerm> terms, String query, AbstractQueryLogic logic, Set<String> typeFilter)
        throws ParseException {
    super.execute(query);
    this.c = c;
    this.auths = auths;
    this.indexedTerms = indexedTerms;
    this.termsCopy.putAll(terms);
    this.indexTableName = logic.getIndexTableName();
    this.reverseIndexTableName = logic.getReverseIndexTableName();
    this.queryThreads = logic.getQueryThreads();

    Map<MapKey, Set<Range>> indexRanges = new HashMap<MapKey, Set<Range>>();
    Map<MapKey, Set<Range>> trailingWildcardRanges = new HashMap<MapKey, Set<Range>>();
    Map<MapKey, Set<Range>> leadingWildcardRanges = new HashMap<MapKey, Set<Range>>();
    Map<Text, RangeBounds> rangeMap = new HashMap<Text, RangeBounds>();

    // Here we iterate over all of the terms in the query to determine if they are an equivalence,
    // wildcard, or range type operator
    for (Entry<String, QueryTerm> entry : terms.entries()) {
        if (entry.getValue().getOperator().equals(JexlOperatorConstants.getOperator(ASTEQNode.class))
                || entry.getValue().getOperator().equals(JexlOperatorConstants.getOperator(ASTERNode.class))
                || entry.getValue().getOperator().equals(JexlOperatorConstants.getOperator(ASTLTNode.class))
                || entry.getValue().getOperator().equals(JexlOperatorConstants.getOperator(ASTLENode.class))
                || entry.getValue().getOperator().equals(JexlOperatorConstants.getOperator(ASTGTNode.class))
                || entry.getValue().getOperator().equals(JexlOperatorConstants.getOperator(ASTGENode.class))) {
            // If this term is not in the set of indexed terms, then bail
            if (!indexedTerms.containsKey(entry.getKey())) {
                termCardinalities.put(entry.getKey().toUpperCase(), 0L);
                continue;
            }
            // In the case of function calls, the query term could be null. Dont query the index for it.
            if (null == entry.getValue()) {
                termCardinalities.put(entry.getKey().toUpperCase(), 0L);
                continue;
            }
            // In the case where we are looking for 'null', then skip.
            if (null == entry.getValue().getValue() || ((String) entry.getValue().getValue()).equals("null")) {
                termCardinalities.put(entry.getKey().toUpperCase(), 0L);
                continue;
            }

            // Remove the begin and end ' marks
            String value = null;
            if (((String) entry.getValue().getValue()).startsWith("'")
                    && ((String) entry.getValue().getValue()).endsWith("'"))
                value = ((String) entry.getValue().getValue()).substring(1,
                        ((String) entry.getValue().getValue()).length() - 1);
            else
                value = (String) entry.getValue().getValue();
            // The entries in the index are normalized
            for (Normalizer normalizer : indexedTerms.get(entry.getKey())) {
                String normalizedFieldValue = normalizer.normalizeFieldValue(null, value);
                Text fieldValue = new Text(normalizedFieldValue);
                Text fieldName = new Text(entry.getKey().toUpperCase());

                // EQUALS
                if (entry.getValue().getOperator().equals(JexlOperatorConstants.getOperator(ASTEQNode.class))) {
                    Key startRange = new Key(fieldValue, fieldName);
                    Range r = new Range(startRange, true, startRange.followingKey(PartialKey.ROW), true);

                    MapKey key = new MapKey(fieldName.toString(), fieldValue.toString());
                    key.setOriginalQueryValue(value);
                    this.originalQueryValues.put(value, key);
                    if (!indexRanges.containsKey(key))
                        indexRanges.put(key, new HashSet<Range>());
                    indexRanges.get(key).add(r);
                    // WILDCARD
                } else if (entry.getValue().getOperator()
                        .equals(JexlOperatorConstants.getOperator(ASTERNode.class))) {
                    // This is a wildcard query using regex. We can only support leading and trailing wildcards at this time. Leading
                    // wildcards will need be reversed and sent to the global reverse index. Trailing wildcard queries will be sent to the
                    // global index. In all cases, the range for the wilcard will be the range of possible UNICODE codepoints, hex 0 to 10FFFF.
                    int loc = normalizedFieldValue.indexOf(WILDCARD);
                    if (-1 == loc)
                        loc = normalizedFieldValue.indexOf(SINGLE_WILDCARD);
                    if (-1 == loc) {
                        // Then no wildcard in the query? Treat like the equals case above.
                        Key startRange = new Key(fieldValue, fieldName);
                        Range r = new Range(startRange, true, startRange.followingKey(PartialKey.ROW), true);

                        MapKey key = new MapKey(fieldName.toString(), fieldValue.toString());
                        key.setOriginalQueryValue(value);
                        this.originalQueryValues.put(value, key);
                        if (!indexRanges.containsKey(key))
                            indexRanges.put(key, new HashSet<Range>());
                        indexRanges.get(key).add(r);
                    } else {
                        if (loc == 0) {
                            // Then we have a leading wildcard, reverse the term and use the global reverse index.
                            StringBuilder buf = new StringBuilder(normalizedFieldValue.substring(2));
                            normalizedFieldValue = buf.reverse().toString();
                            Key startRange = new Key(new Text(normalizedFieldValue + "\u0000"), fieldName);
                            Key endRange = new Key(new Text(normalizedFieldValue + "\u10FFFF"), fieldName);
                            Range r = new Range(startRange, true, endRange, true);

                            MapKey key = new MapKey(fieldName.toString(), normalizedFieldValue);
                            key.setOriginalQueryValue(value);
                            this.originalQueryValues.put(value, key);
                            if (!leadingWildcardRanges.containsKey(key))
                                leadingWildcardRanges.put(key, new HashSet<Range>());
                            leadingWildcardRanges.get(key).add(r);
                        } else if (loc == (normalizedFieldValue.length() - 2)) {
                            normalizedFieldValue = normalizedFieldValue.substring(0, loc);
                            // Then we have a trailing wildcard character.
                            Key startRange = new Key(new Text(normalizedFieldValue + "\u0000"), fieldName);
                            Key endRange = new Key(new Text(normalizedFieldValue + "\u10FFFF"), fieldName);
                            Range r = new Range(startRange, true, endRange, true);

                            MapKey key = new MapKey(fieldName.toString(), normalizedFieldValue);
                            key.setOriginalQueryValue(value);
                            this.originalQueryValues.put(value, key);
                            if (!trailingWildcardRanges.containsKey(key))
                                trailingWildcardRanges.put(key, new HashSet<Range>());
                            trailingWildcardRanges.get(key).add(r);
                        } else {
                            // throw new RuntimeException("Unsupported wildcard location. Only trailing or leading wildcards are supported: " + normalizedFieldValue);
                            // Don't throw an exception, there must be a wildcard in the query, we'll treat it as a filter on the results since it is not
                            // leading or trailing.
                        }
                    }
                    // RANGES
                } else if (entry.getValue().getOperator()
                        .equals(JexlOperatorConstants.getOperator(ASTGTNode.class))
                        || entry.getValue().getOperator()
                                .equals(JexlOperatorConstants.getOperator(ASTGENode.class))) {
                    // Then we have a lower bound to a range query
                    if (!rangeMap.containsKey(fieldName))
                        rangeMap.put(fieldName, new RangeBounds());
                    rangeMap.get(fieldName).setLower(fieldValue);
                    rangeMap.get(fieldName).setOriginalLower(value);
                } else if (entry.getValue().getOperator()
                        .equals(JexlOperatorConstants.getOperator(ASTLTNode.class))
                        || entry.getValue().getOperator()
                                .equals(JexlOperatorConstants.getOperator(ASTLENode.class))) {
                    // Then we have an upper bound to a range query
                    if (!rangeMap.containsKey(fieldName))
                        rangeMap.put(fieldName, new RangeBounds());
                    rangeMap.get(fieldName).setUpper(fieldValue);
                    rangeMap.get(fieldName).setOriginalUpper(value);
                }
            }
        }
    }

    // INDEX RANGE QUERY
    // Now that we have figured out the range bounds, create the index ranges.
    for (Entry<Text, RangeBounds> entry : rangeMap.entrySet()) {
        if (entry.getValue().getLower() != null && entry.getValue().getUpper() != null) {
            // Figure out the key order
            Key lk = new Key(entry.getValue().getLower());
            Key up = new Key(entry.getValue().getUpper());
            Text lower = lk.getRow();
            Text upper = up.getRow();
            // Swith the order if needed.
            if (lk.compareTo(up) > 0) {
                lower = up.getRow();
                upper = lk.getRow();
            }
            Key startRange = new Key(lower, entry.getKey());
            Key endRange = new Key(upper, entry.getKey());
            Range r = new Range(startRange, true, endRange, true);
            // For the range queries we need to query the global index and then handle the results a little differently.
            Map<MapKey, Set<Range>> ranges = new HashMap<MapKey, Set<Range>>();
            MapKey key = new MapKey(entry.getKey().toString(), entry.getValue().getLower().toString());
            key.setOriginalQueryValue(entry.getValue().getOriginalLower().toString());
            this.originalQueryValues.put(entry.getValue().getOriginalLower().toString(), key);
            ranges.put(key, new HashSet<Range>());
            ranges.get(key).add(r);

            // Now query the global index and override the field value used in the results map
            try {
                Map<MapKey, TermRange> lowerResults = queryGlobalIndex(ranges, entry.getKey().toString(),
                        this.indexTableName, false, key, typeFilter);
                // Add the results to the global index results for both the upper and lower field values.
                Map<MapKey, TermRange> upperResults = new HashMap<MapKey, TermRange>();
                for (Entry<MapKey, TermRange> e : lowerResults.entrySet()) {
                    MapKey key2 = new MapKey(e.getKey().getFieldName(), entry.getValue().getUpper().toString());
                    key2.setOriginalQueryValue(entry.getValue().getOriginalUpper().toString());
                    upperResults.put(key2, e.getValue());
                    this.originalQueryValues.put(entry.getValue().getOriginalUpper(), key2);

                }

                this.globalIndexResults.putAll(lowerResults);
                this.globalIndexResults.putAll(upperResults);

            } catch (TableNotFoundException e) {
                log.error("index table not found", e);
                throw new RuntimeException(" index table not found", e);
            }
        } else {
            log.warn("Unbounded range detected, not querying index for it. Field  " + entry.getKey().toString()
                    + " in query: " + query);
        }
    }
    // Now that we have calculated all of the ranges, query the global index.
    try {

        // Query for the trailing wildcards if we have any
        for (Entry<MapKey, Set<Range>> trailing : trailingWildcardRanges.entrySet()) {
            Map<MapKey, Set<Range>> m = new HashMap<MapKey, Set<Range>>();
            m.put(trailing.getKey(), trailing.getValue());
            if (log.isDebugEnabled())
                log.debug("Ranges for Wildcard Global Index query: " + m.toString());
            this.globalIndexResults.putAll(queryGlobalIndex(m, trailing.getKey().getFieldName(),
                    this.indexTableName, false, trailing.getKey(), typeFilter));
        }

        // Query for the leading wildcards if we have any
        for (Entry<MapKey, Set<Range>> leading : leadingWildcardRanges.entrySet()) {
            Map<MapKey, Set<Range>> m = new HashMap<MapKey, Set<Range>>();
            m.put(leading.getKey(), leading.getValue());
            if (log.isDebugEnabled())
                log.debug("Ranges for Wildcard Global Reverse Index query: " + m.toString());
            this.globalIndexResults.putAll(queryGlobalIndex(m, leading.getKey().getFieldName(),
                    this.reverseIndexTableName, true, leading.getKey(), typeFilter));
        }

        // Query for the equals case
        for (Entry<MapKey, Set<Range>> equals : indexRanges.entrySet()) {
            Map<MapKey, Set<Range>> m = new HashMap<MapKey, Set<Range>>();
            m.put(equals.getKey(), equals.getValue());
            if (log.isDebugEnabled())
                log.debug("Ranges for Global Index query: " + m.toString());
            this.globalIndexResults.putAll(queryGlobalIndex(m, equals.getKey().getFieldName(),
                    this.indexTableName, false, equals.getKey(), typeFilter));
        }
    } catch (TableNotFoundException e) {
        log.error("index table not found", e);
        throw new RuntimeException(" index table not found", e);
    }

    if (log.isDebugEnabled())
        log.debug("Ranges from Global Index query: " + globalIndexResults.toString());

    // Now traverse the AST
    EvaluationContext ctx = new EvaluationContext();
    this.getAST().childrenAccept(this, ctx);

    if (ctx.lastRange.getRanges().size() == 0) {
        log.debug("No resulting range set");
    } else {
        if (log.isDebugEnabled())
            log.debug("Setting range results to: " + ctx.lastRange.getRanges().toString());
        this.result = ctx.lastRange.getRanges();
    }
}

From source file:com.facebook.presto.server.HttpRemoteTask.java

public HttpRemoteTask(Session session, TaskId taskId, String nodeId, URI location, PlanFragment planFragment,
        Multimap<PlanNodeId, Split> initialSplits, OutputBuffers outputBuffers, HttpClient httpClient,
        Executor executor, ScheduledExecutorService errorScheduledExecutor, Duration minErrorDuration,
        Duration refreshMaxWait, JsonCodec<TaskInfo> taskInfoCodec,
        JsonCodec<TaskUpdateRequest> taskUpdateRequestCodec,
        SplitCountChangeListener splitCountChangeListener) {
    requireNonNull(session, "session is null");
    requireNonNull(taskId, "taskId is null");
    requireNonNull(nodeId, "nodeId is null");
    requireNonNull(location, "location is null");
    requireNonNull(planFragment, "planFragment1 is null");
    requireNonNull(outputBuffers, "outputBuffers is null");
    requireNonNull(httpClient, "httpClient is null");
    requireNonNull(executor, "executor is null");
    requireNonNull(taskInfoCodec, "taskInfoCodec is null");
    requireNonNull(taskUpdateRequestCodec, "taskUpdateRequestCodec is null");
    requireNonNull(splitCountChangeListener, "splitCountChangeListener is null");

    try (SetThreadName ignored = new SetThreadName("HttpRemoteTask-%s", taskId)) {
        this.taskId = taskId;
        this.session = session;
        this.nodeId = nodeId;
        this.planFragment = planFragment;
        this.outputBuffers.set(outputBuffers);
        this.httpClient = httpClient;
        this.executor = executor;
        this.errorScheduledExecutor = errorScheduledExecutor;
        this.taskInfoCodec = taskInfoCodec;
        this.taskUpdateRequestCodec = taskUpdateRequestCodec;
        this.updateErrorTracker = new RequestErrorTracker(taskId, location, minErrorDuration,
                errorScheduledExecutor, "updating task");
        this.getErrorTracker = new RequestErrorTracker(taskId, location, minErrorDuration,
                errorScheduledExecutor, "getting info for task");
        this.splitCountChangeListener = splitCountChangeListener;

        for (Entry<PlanNodeId, Split> entry : requireNonNull(initialSplits, "initialSplits is null")
                .entries()) {/* w  ww  .ja  v  a 2s .  com*/
            ScheduledSplit scheduledSplit = new ScheduledSplit(nextSplitId.getAndIncrement(), entry.getValue());
            pendingSplits.put(entry.getKey(), scheduledSplit);
        }
        if (initialSplits.containsKey(planFragment.getPartitionedSource())) {
            pendingSourceSplitCount = initialSplits.get(planFragment.getPartitionedSource()).size();
            fireSplitCountChanged(pendingSourceSplitCount);
        }

        List<BufferInfo> bufferStates = outputBuffers.getBuffers().keySet().stream()
                .map(outputId -> new BufferInfo(outputId, false, 0, 0, PageBufferInfo.empty()))
                .collect(toImmutableList());

        TaskStats taskStats = new TaskStats(DateTime.now(), null);

        taskInfo = new StateMachine<>("task " + taskId, executor,
                new TaskInfo(taskId, Optional.empty(), TaskInfo.MIN_VERSION, TaskState.PLANNED, location,
                        DateTime.now(),
                        new SharedBufferInfo(BufferState.OPEN, true, true, 0, 0, 0, 0, bufferStates),
                        ImmutableSet.<PlanNodeId>of(), taskStats, ImmutableList.<ExecutionFailureInfo>of()));

        long timeout = minErrorDuration.toMillis() / 3;
        requestTimeout = new Duration(timeout + refreshMaxWait.toMillis(), MILLISECONDS);
        continuousTaskInfoFetcher = new ContinuousTaskInfoFetcher(refreshMaxWait);
    }
}

From source file:edu.udo.scaffoldhunter.model.db.DbManagerHibernate.java

@Override
@SuppressWarnings("unchecked")
public Scaffold getScaffolds(Subset subset, boolean cutStem) throws DatabaseException {
    Preconditions.checkNotNull(subset.getSession());
    Preconditions.checkNotNull(subset.getSession().getTree());

    List<Scaffold> scaffoldList;
    Tree tree = subset.getSession().getTree();
    Session hibernateSession = null;//from w  w w. j  a va  2s.  c om

    try {
        hibernateSession = sessionFactory.getCurrentSession();
        hibernateSession.beginTransaction();
        // loading the whole tree and then throwing away the scaffolds we
        // don't need seems to be much faster than retrieving only the
        // scaffolds with generation molecules in the current subset
        Criteria criteriaScaf = hibernateSession.createCriteria(Scaffold.class)
                .add(Restrictions.eq("tree", tree));
        scaffoldList = criteriaScaf.list();
    } catch (HibernateException ex) {
        logger.error("Query from Scaffold failed.\n{}\n{}", ex, stacktrace(ex));
        closeAndRollBackErroneousSession(hibernateSession);
        throw new DatabaseException("Query from Scaffold failed", ex);
    }

    Map<Integer, Molecule> mols = new HashMap<Integer, Molecule>();
    Set<Molecule> subMols = subset.getMolecules();
    for (Molecule m : subMols)
        mols.put(m.getId(), m);

    Set<Scaffold> scaffolds = Sets.newHashSet();

    /*
     * determine which scaffolds have molecules in the subset and add
     * molecules to scaffolds
     */
    try {
        hibernateSession = sessionFactory.getCurrentSession();

        /*
         * load tuples (ScaffoldId, GenerationMoleculeId) for the current
         * tree
         */
        Criteria criteria = hibernateSession.createCriteria(Scaffold.class)
                .createAlias("generationMolecules", "mols").add(Restrictions.eq("tree", tree))
                .setProjection(Projections.projectionList().add(Projections.id())
                        .add(Projections.property("mols.id")));

        List<Object[]> tuples = criteria.list();
        Multimap<Integer, Molecule> scaffoldMolecules = HashMultimap.create(scaffoldList.size(), 10);
        for (Object[] t : tuples) {
            Molecule mol = mols.get(t[1]);
            if (mol != null)
                scaffoldMolecules.put((Integer) t[0], mol);
        }

        for (Scaffold s : scaffoldList) {
            if (!scaffoldMolecules.containsKey(s.id))
                continue;
            Collection<Molecule> subScafMols = scaffoldMolecules.get(s.id);
            s.setMolecules(Sets.newHashSet(subScafMols));
            scaffolds.add(s);
        }

        hibernateSession.getTransaction().commit();
    } catch (HibernateException ex) {
        logger.error("Query from Molecule failed.\n{}\n{}", ex, stacktrace(ex));
        closeAndRollBackErroneousSession(hibernateSession);
        throw new DatabaseException("Query from Molecule failed", ex);
    }

    /*
     * add parent scaffolds to the set, that do not have molecules and thus
     * were not returned from the database
     */
    Set<Scaffold> parents = new HashSet<Scaffold>();
    for (Scaffold s : scaffolds) {
        addParents(s, parents, scaffolds);
    }
    scaffolds.addAll(parents);

    if (scaffolds.isEmpty())
        return null;

    Scaffold root = Scaffolds.getRoot(scaffolds.iterator().next());
    Scaffolds.sort(root, Orderings.STRUCTURE_BY_ID);

    for (Scaffold s : Scaffolds.getSubtreePreorderIterable(root)) {
        s.setTree(tree);
    }
    // remove the imaginary root if it has only one child
    if (root.getChildren().size() == 1) {
        root = root.getChildren().get(0);
        root.setParent(null);
    }
    // remove virtual root scaffolds with only one child
    while (cutStem && root.getChildren().size() == 1 && root.getMolecules().isEmpty()) {
        root = root.getChildren().get(0);
    }
    root.setParent(null);
    return root;
}

From source file:org.apache.crunch.impl.mr.plan.MSCRPlanner.java

private Multimap<Vertex, JobPrototype> constructJobPrototypes(List<Vertex> component) {
    Multimap<Vertex, JobPrototype> assignment = HashMultimap.create();
    List<Vertex> gbks = Lists.newArrayList();
    for (Vertex v : component) {
        if (v.isGBK()) {
            gbks.add(v);/* w ww .jav a2  s  . co  m*/
        }
    }

    if (gbks.isEmpty()) {
        HashMultimap<Target, NodePath> outputPaths = HashMultimap.create();
        for (Vertex v : component) {
            if (v.isInput()) {
                for (Edge e : v.getOutgoingEdges()) {
                    for (NodePath nodePath : e.getNodePaths()) {
                        PCollectionImpl target = nodePath.tail();
                        for (Target t : outputs.get(target)) {
                            outputPaths.put(t, nodePath);
                        }
                    }
                }
            }
        }
        if (outputPaths.isEmpty()) {
            throw new IllegalStateException("No outputs?");
        }
        JobPrototype prototype = JobPrototype.createMapOnlyJob(++lastJobID, outputPaths,
                pipeline.createTempPath());
        for (Vertex v : component) {
            assignment.put(v, prototype);
        }
    } else {
        Set<Edge> usedEdges = Sets.newHashSet();
        for (Vertex g : gbks) {
            Set<NodePath> inputs = Sets.newHashSet();
            HashMultimap<Target, NodePath> mapSideOutputPaths = HashMultimap.create();
            for (Edge e : g.getIncomingEdges()) {
                inputs.addAll(e.getNodePaths());
                usedEdges.add(e);
                if (e.getHead().isInput()) {
                    for (Edge ep : e.getHead().getOutgoingEdges()) {
                        if (ep.getTail().isOutput() && !usedEdges.contains(ep)) { // map-side output
                            for (Target t : outputs.get(ep.getTail().getPCollection())) {
                                mapSideOutputPaths.putAll(t, ep.getNodePaths());
                            }
                            usedEdges.add(ep);
                        }
                    }
                }
            }
            JobPrototype prototype = JobPrototype.createMapReduceJob(++lastJobID,
                    (PGroupedTableImpl) g.getPCollection(), inputs, pipeline.createTempPath());
            prototype.addMapSideOutputs(mapSideOutputPaths);
            assignment.put(g, prototype);
            for (Edge e : g.getIncomingEdges()) {
                assignment.put(e.getHead(), prototype);
                if (e.getHead().isInput()) {
                    for (Edge ep : e.getHead().getOutgoingEdges()) {
                        if (ep.getTail().isOutput() && !assignment.containsKey(ep.getTail())) { // map-side output
                            assignment.put(ep.getTail(), prototype);
                        }
                    }
                }
            }

            HashMultimap<Target, NodePath> outputPaths = HashMultimap.create();
            for (Edge e : g.getOutgoingEdges()) {
                Vertex output = e.getTail();
                for (Target t : outputs.get(output.getPCollection())) {
                    outputPaths.putAll(t, e.getNodePaths());
                }
                assignment.put(output, prototype);
                usedEdges.add(e);
            }
            prototype.addReducePaths(outputPaths);
        }

        // Check for any un-assigned vertices, which should be map-side outputs
        // that we will need to run in a map-only job.
        HashMultimap<Target, NodePath> outputPaths = HashMultimap.create();
        Set<Vertex> orphans = Sets.newHashSet();
        for (Vertex v : component) {
            // Check if this vertex has multiple inputs but only a subset of
            // them have already been assigned
            boolean vertexHasUnassignedIncomingEdges = false;
            if (v.isOutput()) {
                for (Edge e : v.getIncomingEdges()) {
                    if (!usedEdges.contains(e)) {
                        vertexHasUnassignedIncomingEdges = true;
                    }
                }
            }

            if (v.isOutput() && (vertexHasUnassignedIncomingEdges || !assignment.containsKey(v))) {
                orphans.add(v);
                for (Edge e : v.getIncomingEdges()) {
                    if (vertexHasUnassignedIncomingEdges && usedEdges.contains(e)) {
                        // We've already dealt with this incoming edge
                        continue;
                    }
                    orphans.add(e.getHead());
                    for (NodePath nodePath : e.getNodePaths()) {
                        PCollectionImpl target = nodePath.tail();
                        for (Target t : outputs.get(target)) {
                            outputPaths.put(t, nodePath);
                        }
                    }
                }
            }

        }
        if (!outputPaths.isEmpty()) {
            JobPrototype prototype = JobPrototype.createMapOnlyJob(++lastJobID, outputPaths,
                    pipeline.createTempPath());
            for (Vertex orphan : orphans) {
                assignment.put(orphan, prototype);
            }
        }
    }

    return assignment;
}

From source file:net.yacy.grid.io.index.YaCyQuery.java

private QueryBuilder parse(String q, int timezoneOffset) {
    // detect usage of OR ORconnective usage. Because of the preparse step we will have only OR or only AND here.
    q = q.replaceAll(" AND ", " "); // AND is default
    boolean ORconnective = q.indexOf(" OR ") >= 0;
    q = q.replaceAll(" OR ", " "); // if we know that all terms are OR, we remove that and apply it later. Because we splitted into OR groups it is right to use OR here only

    // tokenize the query
    Set<String> qe = new LinkedHashSet<String>();
    Matcher m = tokenizerPattern.matcher(q);
    while (m.find())
        qe.add(m.group(1));/*from  w ww.jav  a  2  s .com*/

    // twitter search syntax:
    //   term1 term2 term3 - all three terms shall appear
    //   "term1 term2 term3" - exact match of all terms
    //   term1 OR term2 OR term3 - any of the three terms shall appear
    //   from:user - tweets posted from that user
    //   to:user - tweets posted to that user
    //   @user - tweets which mention that user
    //   near:"location" within:xmi - tweets that are near that location
    //   #hashtag - tweets containing the given hashtag
    //   since:2015-04-01 until:2015-04-03 - tweets within given time range
    // additional constraints:
    //   /image /audio /video /place - restrict to tweets which have attached images, audio, video or place
    ArrayList<String> text_positive_match = new ArrayList<>();
    ArrayList<String> text_negative_match = new ArrayList<>();
    ArrayList<String> text_positive_filter = new ArrayList<>();
    ArrayList<String> text_negative_filter = new ArrayList<>();
    Multimap<String, String> modifier = HashMultimap.create();
    Set<String> constraints_positive = new HashSet<>();
    Set<String> constraints_negative = new HashSet<>();
    for (String t : qe) {
        if (t.length() == 0)
            continue;
        if (t.startsWith("/")) {
            constraints_positive.add(t.substring(1));
            continue;
        } else if (t.startsWith("-/")) {
            constraints_negative.add(t.substring(2));
            continue;
        } else if (t.indexOf(':') > 0) {
            int p = t.indexOf(':');
            String name = t.substring(0, p).toLowerCase();
            String value = t.substring(p + 1);
            if (value.indexOf('|') > 0) {
                String[] values = value.split("\\|");
                for (String v : values) {
                    modifier.put(name, v);
                }
            } else {
                modifier.put(name, value);
            }
            continue;
        } else {
            // patch characters that will confuse elasticsearch or have a different meaning
            boolean negative = t.startsWith("-");
            if (negative)
                t = t.substring(1);
            if (t.length() == 0)
                continue;
            if ((t.charAt(0) == dq && t.charAt(t.length() - 1) == dq)
                    || (t.charAt(0) == sq && t.charAt(t.length() - 1) == sq)) {
                t = t.substring(1, t.length() - 1);
                if (negative) {
                    text_negative_filter.add(t);
                    this.negativeBag.add(t);
                } else {
                    text_positive_filter.add(t);
                    this.positiveBag.add(t);
                }
            } else if (t.indexOf('-') > 0) {
                // this must be handled like a quoted string without the minus
                t = t.replace('-', space);
                if (negative) {
                    text_negative_filter.add(t);
                    this.negativeBag.add(t);
                } else {
                    text_positive_filter.add(t);
                    this.positiveBag.add(t);
                }
            } else {
                if (negative) {
                    text_negative_match.add(t);
                    this.negativeBag.add(t);
                } else {
                    text_positive_match.add(t);
                    this.positiveBag.add(t);
                }
            }
            continue;
        }
    }

    // construct a ranking
    if (modifier.containsKey("boost")) {
        this.boosts.patchWithModifier(modifier.get("boost").iterator().next());
    }

    // compose query for text
    List<QueryBuilder> queries = new ArrayList<>();
    // fuzzy matching
    if (!text_positive_match.isEmpty())
        queries.add(simpleQueryBuilder(String.join(" ", text_positive_match), ORconnective, boosts));
    if (!text_negative_match.isEmpty())
        queries.add(QueryBuilders.boolQuery()
                .mustNot(simpleQueryBuilder(String.join(" ", text_negative_match), ORconnective, boosts)));
    // exact matching
    for (String text : text_positive_filter) {
        queries.add(exactMatchQueryBuilder(text, this.boosts));
    }
    for (String text : text_negative_filter) {
        queries.add(QueryBuilders.boolQuery().mustNot(exactMatchQueryBuilder(text, this.boosts)));
    }

    // apply modifiers
    Collection<String> values;
    modifier_handling: for (String[] modifierType : modifierTypes) {
        String modifier_name = modifierType[0];
        String index_name = modifierType[1];

        if ((values = modifier.get(modifier_name)).size() > 0) {
            if (modifier_name.equals("yacy")) {
                values.forEach(y -> this.yacyModifiers.add(y));
                continue modifier_handling;
            }
            if (modifier_name.equals("site") && values.size() == 1) {
                String host = values.iterator().next();
                if (host.startsWith("www."))
                    values.add(host.substring(4));
                else
                    values.add("www." + host);
            }
            queries.add(QueryBuilders.constantScoreQuery(QueryBuilders.termsQuery(index_name, values)));
            continue modifier_handling;
        }

        if ((values = modifier.get("-" + modifier_name)).size() > 0) {
            if (modifier_name.equals("site") && values.size() == 1) {
                String host = values.iterator().next();
                if (host.startsWith("www."))
                    values.add(host.substring(4));
                else
                    values.add("www." + host);
            }
            queries.add(QueryBuilders.boolQuery()
                    .mustNot(QueryBuilders.constantScoreQuery(QueryBuilders.termsQuery(index_name, values))));
            continue modifier_handling;
        }
    }
    if (modifier.containsKey("collection") && (this.collections == null || this.collections.length == 0)) {
        Collection<String> c = modifier.get("collection");
        this.collections = c.toArray(new String[c.size()]);
    }
    if (modifier.containsKey("daterange")) {
        String dr = modifier.get("daterange").iterator().next();
        if (dr.length() > 0) {
            String from_to[] = dr.endsWith("..") ? new String[] { dr.substring(0, dr.length() - 2), "" }
                    : dr.startsWith("..") ? new String[] { "", dr.substring(2) } : dr.split("\\.\\.");
            if (from_to.length == 2) {
                if (from_to[0] != null && from_to[0].length() > 0)
                    try {
                        modifier.put("since", DateParser.dayDateFormat
                                .format(DateParser.parse(from_to[0], timezoneOffset).getTime()));
                    } catch (ParseException e) {
                    }
                if (from_to[1] != null && from_to[1].length() > 0)
                    try {
                        modifier.put("until", DateParser.dayDateFormat
                                .format(DateParser.parse(from_to[1], timezoneOffset).getTime()));
                    } catch (ParseException e) {
                    }
            }
        }
    }
    if (modifier.containsKey("since"))
        try {
            Calendar since = DateParser.parse(modifier.get("since").iterator().next(), timezoneOffset);
            this.since = since.getTime();
            RangeQueryBuilder rangeQuery = QueryBuilders
                    .rangeQuery(WebMapping.last_modified.getMapping().name())
                    .from(DateParser.formatGSAFS(this.since));
            if (modifier.containsKey("until")) {
                Calendar until = DateParser.parse(modifier.get("until").iterator().next(), timezoneOffset);
                if (until.get(Calendar.HOUR) == 0 && until.get(Calendar.MINUTE) == 0) {
                    // until must be the day which is included in results.
                    // To get the result within the same day, we must add one day.
                    until.add(Calendar.DATE, 1);
                }
                this.until = until.getTime();
                rangeQuery.to(DateParser.formatGSAFS(this.until));
            } else {
                this.until = new Date(Long.MAX_VALUE);
            }
            queries.add(rangeQuery);
        } catch (ParseException e) {
        }
    else if (modifier.containsKey("until"))
        try {
            Calendar until = DateParser.parse(modifier.get("until").iterator().next(), timezoneOffset);
            if (until.get(Calendar.HOUR) == 0 && until.get(Calendar.MINUTE) == 0) {
                // until must be the day which is included in results.
                // To get the result within the same day, we must add one day.
                until.add(Calendar.DATE, 1);
            }
            this.until = until.getTime();
            RangeQueryBuilder rangeQuery = QueryBuilders
                    .rangeQuery(WebMapping.last_modified.getMapping().name())
                    .to(DateParser.formatGSAFS(this.until));
            queries.add(rangeQuery);
        } catch (ParseException e) {
        }

    // now combine queries with OR or AND operator

    // simple case where we have one query only
    if (queries.size() == 1) {
        return queries.iterator().next();
    }

    BoolQueryBuilder b = QueryBuilders.boolQuery();
    for (QueryBuilder filter : queries) {
        if (ORconnective)
            b.should(filter);
        else
            b.must(filter);
    }
    if (ORconnective)
        b.minimumShouldMatch(1);

    return b;
}