Example usage for org.apache.lucene.index.memory MemoryIndex search

List of usage examples for org.apache.lucene.index.memory MemoryIndex search

Introduction

In this page you can find the example usage for org.apache.lucene.index.memory MemoryIndex search.

Prototype

public float search(Query query) 

Source Link

Document

Convenience method that efficiently returns the relevance score by matching this index against the given Lucene query expression.

Usage

From source file:com.appspot.socialinquirer.server.service.impl.StackExchangeServiceImpl.java

License:Apache License

/**
 * Match with keywords./*from  ww w . jav  a  2  s. co m*/
 *
 * @param keywords the keywords
 * @param userText the user text
 * @return true, if successful
 */
public boolean matchWithKeywords(List<String> keywords, String userText) {
    MemoryIndex index = new MemoryIndex();
    index.addField("text", userText, createEnglishAnalyzer());
    QueryParser parser = new QueryParser("text", createEnglishAnalyzer());
    BooleanQuery query = new BooleanQuery();
    for (String keyword : keywords) {
        try {
            query.add(parser.parse(keyword), BooleanClause.Occur.SHOULD);
        } catch (ParseException e) {
        }
    }

    float score = index.search(query);

    return score > 0.0f;
}

From source file:com.jaeksoft.searchlib.classifier.ClassifierItem.java

License:Open Source License

protected final float score(Client client, LanguageEnum lang, MemoryIndex index)
        throws ParseException, SearchLibException, SyntaxError, IOException {
    Query qry = queryMap.get(lang);
    if (qry == null) {
        AbstractSearchRequest searchRequest = getSearchRequest(client, lang);
        qry = searchRequest.getQuery();//from  w  w w  . java  2 s. c o  m
        queryMap.put(lang, qry);
    }
    return index.search(qry);
}

From source file:com.jaeksoft.searchlib.snippet.Fragment.java

License:Open Source License

public final double searchScore(final String fieldName, final CompiledAnalyzer analyzer, final Query query) {
    searchScore = 0;/*from  w  ww  .  j  a  v a 2s .  c o m*/
    if (query == null || analyzer == null)
        return 0;
    MemoryIndex index = new MemoryIndex();
    index.addField(fieldName, originalText, analyzer);
    searchScore = index.search(query);
    return searchScore;
}

From source file:com.orientechnologies.lucene.operator.OLuceneTextOperator.java

License:Apache License

@Override
public Object evaluateRecord(OIdentifiable iRecord, ODocument iCurrentResult, OSQLFilterCondition iCondition,
        Object iLeft, Object iRight, OCommandContext iContext) {

    OLuceneFullTextIndex index = involvedIndex(iRecord, iCurrentResult, iCondition, iLeft, iRight);

    if (index == null) {
        throw new OCommandExecutionException("Cannot evaluate lucene condition without index configuration.");
    }/*from  w  w w .  jav a 2  s  .com*/
    MemoryIndex memoryIndex = (MemoryIndex) iContext.getVariable("_memoryIndex");
    if (memoryIndex == null) {
        memoryIndex = new MemoryIndex();
        iContext.setVariable("_memoryIndex", memoryIndex);
    }
    memoryIndex.reset();
    Document doc = index.buildDocument(iLeft);

    for (IndexableField field : doc.getFields()) {
        memoryIndex.addField(field.name(), field.stringValue(), index.analyzer(field.name()));
    }
    Query query = null;
    try {
        query = index.buildQuery(iRight);
    } catch (Exception e) {
        throw new OCommandExecutionException("Error executing lucene query.", e);
    }
    return memoryIndex.search(query) > 0.0f;
}

From source file:com.orientechnologies.lucene.test.LuceneBooleanIndexTest.java

License:Apache License

@Test
public void testMemoryIndex() throws ParseException {
    // TODO To be used in evaluate Record
    MemoryIndex index = new MemoryIndex();

    Document doc = new Document();
    doc.add(new StringField("text", "my text", Field.Store.YES));
    StandardAnalyzer analyzer = new StandardAnalyzer();

    for (IndexableField field : doc.getFields()) {
        index.addField(field.name(), field.stringValue(), analyzer);
    }//ww  w  .j  a  v a 2s.  com

    QueryParser parser = new QueryParser("text", analyzer);
    float score = index.search(parser.parse("+text:my"));

}

From source file:com.orientechnologies.lucene.tx.OLuceneTxChangesMultiRid.java

License:Apache License

public boolean isDeleted(Document document, Object key, OIdentifiable value) {
    boolean match = false;
    List<String> strings = deleted.get(value.getIdentity().toString());
    if (strings != null) {
        MemoryIndex memoryIndex = new MemoryIndex();
        for (String string : strings) {
            Query q = engine.deleteQuery(string, value);
            memoryIndex.reset();/*from w w  w  .j ava2s .  co  m*/
            for (IndexableField field : document.getFields()) {
                memoryIndex.addField(field.name(), field.stringValue(), new KeywordAnalyzer());
            }
            match = match || (memoryIndex.search(q) > 0.0f);
        }
        return match;
    }
    return match;
}

From source file:net.yacy.search.index.SingleDocumentMatcher.java

License:Open Source License

/**
 * Check a given Solr document against a Solr query, without requesting a Solr
 * index, but using instead in-memory Lucene utility. This lets checking if a
 * single document matches some criterias, before adding it to a Solr index.
 * /*w ww .  j a v  a2  s  .  c o m*/
 * @param solrDoc
 *            the Solr document to check
 * @param query
 *            a standard Solr query string
 * @param core
 *            the Solr index core holding the Solr schema of the document
 * @return true when the document matches the given Solr query
 * @throws SyntaxError
 *             when the query String syntax is not valid
 * @throws SolrException when a query required element is missing, or when a problem occurred when accessing the target core
 * @throws IllegalArgumentException
 *             when a parameter is null.
 * @see <a href=
 *      "http://lucene.apache.org/solr/guide/6_6/the-standard-query-parser.html">The
 *      Solr Standard Query Parser</a>
 */
public static boolean matches(final SolrInputDocument solrDoc, final String query, final SolrCore core)
        throws SyntaxError, IllegalArgumentException {
    if (solrDoc == null || query == null || core == null) {
        throw new IllegalArgumentException("All parameters must be non null");
    }
    final IndexSchema schema = core.getLatestSchema();
    if (schema == null) {
        throw new IllegalArgumentException("All parameters must be non null");
    }

    final org.apache.lucene.document.Document luceneDoc = DocumentBuilder.toDocument(solrDoc, schema);

    final Analyzer indexAnalyzer = schema.getIndexAnalyzer();

    /*
     * Using the Lucene RAMDirectory could be an alternative, but it is slower with
     * a larger memory footprint
     */
    final MemoryIndex index = MemoryIndex.fromDocument(luceneDoc, indexAnalyzer);

    final Query luceneQuery = toLuceneQuery(query, core);

    final float score = index.search(luceneQuery);

    return score > 0.0f;
}

From source file:org.apache.uima.lucas.ProspectiveSearchAE.java

License:Apache License

@Override
public void process(CAS aCAS) throws AnalysisEngineProcessException {

    // First create the index of the document text
    MemoryIndex index = new MemoryIndex();

    List fields = createDocument(aCAS).getFields();

    for (Iterator it = fields.iterator(); it.hasNext();) {
        Field field = (Field) it.next();

        if (field.isIndexed() && field.tokenStreamValue() != null) {
            index.addField(field.name(), field.tokenStreamValue());
        }/*w w w .ja v a 2s.c  om*/
    }

    // Search all queries against the one document index
    for (SearchQuery query : searchQueryProvider.getSearchQueries(aCAS)) {

        float score = index.search(query.query());

        if (score > matchingThreshold) {

            // Add a FS to the CAS with the search result
            FeatureStructure searchResult = aCAS.createFS(searchResultType);
            searchResult.setLongValue(searchResultIdFeature, query.id());
            aCAS.addFsToIndexes(searchResult);

            // Find matching tokens and link their annotations
            // in case the user wants search term highlighting
            if (searchResultMatchingTextFeature != null) {

                fields = createDocument(aCAS).getFields();

                for (Iterator it = fields.iterator(); it.hasNext();) {

                    Field field = (Field) it.next();

                    if (field.isIndexed() && field.tokenStreamValue() != null) {

                        TokenStream tokenStream = field.tokenStreamValue();

                        Collection<AnnotationFS> matchingTextAnnotations = new LinkedList<AnnotationFS>();

                        QueryScorer scorer = new QueryScorer(query.query(), field.name());
                        scorer.startFragment(new TextFragment(new StringBuffer(aCAS.getDocumentText()), 0, 0));

                        try {
                            scorer.init(tokenStream);

                            OffsetAttribute offsetAttr = null;
                            while (tokenStream.incrementToken()) {
                                offsetAttr = (OffsetAttribute) tokenStream.getAttribute(OffsetAttribute.class);
                                float tokenScore = scorer.getTokenScore();
                                if (tokenScore > 0) {
                                    AnnotationFS annotation = aCAS.createAnnotation(matchingTextType,
                                            offsetAttr.startOffset(), offsetAttr.endOffset());

                                    matchingTextAnnotations.add(annotation);
                                }
                            }
                        } catch (IOException e) {
                            throw new AnalysisEngineProcessException(e);
                        }

                        ArrayFS matchtingTextArray = aCAS.createArrayFS(matchingTextAnnotations.size());

                        int matchtingTextArrayIndex = 0;
                        for (AnnotationFS matchingTextAnnotation : matchingTextAnnotations) {
                            matchtingTextArray.set(matchtingTextArrayIndex++, matchingTextAnnotation);
                        }

                        searchResult.setFeatureValue(searchResultMatchingTextFeature, matchtingTextArray);
                    }
                }
            }
        }
    }
}

From source file:org.gridkit.coherence.search.lucene.LuceneSearchPlugin.java

License:Apache License

@Override
public boolean evaluate(Query query, Object document) {
    Field[] fields = (Field[]) document;
    MemoryIndex memIndex = new MemoryIndex();
    for (Field field : fields) {
        memIndex.addField(field.name(), field.tokenStreamValue(), field.getBoost());
    }/*from w w w  .  ja va2s. c  o m*/
    return memIndex.search(query) > 0.0f;
}

From source file:org.openspaces.textsearch.LuceneTextSearchQueryExtensionManager.java

License:Open Source License

@Override
public boolean accept(String typeName, String path, String operation, Object gridValue, Object luceneQuery) {
    Assert.notNull(gridValue, "Provided value from grid is null");
    Assert.notNull(luceneQuery, "Provided lucene query is null");
    validateOperationName(operation);/*from  w  ww .  j  a  va2  s  .c om*/

    if (_logger.isLoggable(Level.FINE))
        _logger.log(Level.FINE, "filter [operation=" + operation + ", leftOperand(value from grid)=" + gridValue
                + ", rightOperand(lucene query)=" + luceneQuery + "]");

    try {
        Analyzer analyzer = getAnalyzer(typeName, path);
        MemoryIndex index = new MemoryIndex();
        index.addField("content", String.valueOf(gridValue), analyzer);
        Query query = new QueryParser("content", analyzer).parse(String.valueOf(luceneQuery));
        float score = index.search(query);
        return score > 0.0f;
    } catch (ParseException e) {
        throw new SpaceRuntimeException("Could not parse full text query [ " + luceneQuery + " ]", e);
    }
}