Example usage for org.apache.lucene.queryparser.classic MultiFieldQueryParser MultiFieldQueryParser

List of usage examples for org.apache.lucene.queryparser.classic MultiFieldQueryParser MultiFieldQueryParser

Introduction

In this page you can find the example usage for org.apache.lucene.queryparser.classic MultiFieldQueryParser MultiFieldQueryParser.

Prototype

public MultiFieldQueryParser(String[] fields, Analyzer analyzer, Map<String, Float> boosts) 

Source Link

Document

Creates a MultiFieldQueryParser.

Usage

From source file:org.apache.derbyTesting.functionTests.tests.lang.LuceneSupportTest.java

License:Apache License

/**
 * Create a simple query parser for multiple fields, which uses
 * StandardAnalyzer instead of the XMLAnalyzer that was used to create
 * the index./*from   w  w  w  .  ja  va2s .com*/
 */
public static QueryParser createXMLQueryParser(Version version, String[] fields, Analyzer analyzer) {
    return new MultiFieldQueryParser(version, fields, new StandardAnalyzer(version));
}

From source file:org.apache.roller.weblogger.business.search.operations.SearchOperation.java

License:Apache License

public void doRun() {
    final int docLimit = 500;
    searchresults = null;/* ww w . jav a 2s .com*/
    searcher = null;

    try {
        IndexReader reader = manager.getSharedIndexReader();
        searcher = new IndexSearcher(reader);

        MultiFieldQueryParser multiParser = new MultiFieldQueryParser(FieldConstants.LUCENE_VERSION,
                SEARCH_FIELDS, IndexManagerImpl.getAnalyzer());

        // Make it an AND by default. Comment this out for an or (default)
        multiParser.setDefaultOperator(MultiFieldQueryParser.Operator.AND);

        // Create a query object out of our term
        Query query = multiParser.parse(term);

        Term tUsername = IndexUtil.getTerm(FieldConstants.WEBSITE_HANDLE, websiteHandle);

        if (tUsername != null) {
            BooleanQuery bQuery = new BooleanQuery();
            bQuery.add(query, BooleanClause.Occur.MUST);
            bQuery.add(new TermQuery(tUsername), BooleanClause.Occur.MUST);
            query = bQuery;
        }

        if (category != null) {
            Term tCategory = new Term(FieldConstants.CATEGORY, category.toLowerCase());
            BooleanQuery bQuery = new BooleanQuery();
            bQuery.add(query, BooleanClause.Occur.MUST);
            bQuery.add(new TermQuery(tCategory), BooleanClause.Occur.MUST);
            query = bQuery;
        }

        Term tLocale = IndexUtil.getTerm(FieldConstants.LOCALE, locale);

        if (tLocale != null) {
            BooleanQuery bQuery = new BooleanQuery();
            bQuery.add(query, BooleanClause.Occur.MUST);
            bQuery.add(new TermQuery(tLocale), BooleanClause.Occur.MUST);
            query = bQuery;
        }

        searchresults = searcher.search(query, null/* Filter */, docLimit, SORTER);

    } catch (IOException e) {
        mLogger.error("Error searching index", e);
        parseError = e.getMessage();

    } catch (ParseException e) {
        // who cares?
        parseError = e.getMessage();
    }
    // don't need to close the reader, since we didn't do any writing!
}

From source file:org.archive.l2r.TemRerank.java

License:Apache License

public static void precheck() throws Exception {
    //formal run queries
    ArrayList<TemQuery> temQueryList = TemLoader.loadTemporalQuery(TemRunType.FormalRun);

    StandardAnalyzer solrAnalyzer = new StandardAnalyzer(Version.LUCENE_48);
    //solrParser = new QueryParser(Version.LUCENE_48, field, solrAnalyzer);
    QueryParser solrParser = new MultiFieldQueryParser(Version.LUCENE_48, new String[] { "title", "content" },
            solrAnalyzer);//from   ww w.  j  a  v  a  2  s. c o  m

    int count = 1;
    for (TemQuery temQuery : temQueryList) {
        System.out.println((count++) + temQuery.getTitle());
        //
        StanfordNER.suitParsing(temQuery.getSearchQuery(SubtopicType.atemporal));
        solrParser.parse(temQuery.getSearchQuery(SubtopicType.atemporal));

        StanfordNER.suitParsing(temQuery.getSearchQuery(SubtopicType.future));
        solrParser.parse(temQuery.getSearchQuery(SubtopicType.future));

        StanfordNER.suitParsing(temQuery.getSearchQuery(SubtopicType.past));
        solrParser.parse(temQuery.getSearchQuery(SubtopicType.past));

        StanfordNER.suitParsing(temQuery.getSearchQuery(SubtopicType.recency));
        solrParser.parse(temQuery.getSearchQuery(SubtopicType.recency));

    }
}

From source file:org.archive.search.IndexSearch.java

License:Apache License

/**
 * // ww w.ja  v  a 2 s. c  o m
 * **/
public static ArrayList<ResultSlot> initialLuceneSearch(SimType simType, String searchQuery, int slotNumber)
        throws Exception {
    // String queryStr = "apple";
    //int resultNum = 10;
    //String field = "content";
    if (!solrIni) {
        solrIndexReader = DirectoryReader.open(FSDirectory.open(new File(solrIndexDir)));
        solrSearcher = new IndexSearcher(solrIndexReader);

        if (simType == SimType.LM) {
            solrSimilarity = new LMDirichletSimilarity();
            solrSearcher.setSimilarity(solrSimilarity);
        } else if (simType == SimType.TFIDF) {
            //use default
        } else {
            System.err.println("SimType Input Error!");
            System.exit(1);
        }

        solrAnalyzer = new StandardAnalyzer(Version.LUCENE_48);
        //solrParser = new QueryParser(Version.LUCENE_48, field, solrAnalyzer);
        solrParser = new MultiFieldQueryParser(Version.LUCENE_48, new String[] { "title", "content" },
                solrAnalyzer);

        solrIni = true;
    }

    Query query = solrParser.parse(searchQuery);

    // Collect enough docs to show 5 pages
    TopDocs resultList = solrSearcher.search(query, slotNumber);
    ScoreDoc[] hitList = resultList.scoreDocs;

    ArrayList<ResultSlot> slotList = new ArrayList<>();
    for (int i = 0; i < hitList.length; i++) {
        ScoreDoc hit = hitList[i];

        //

        Document doc = solrSearcher.doc(hit.doc);

        //

        String id = doc.get("id");

        slotList.add(new ResultSlot(id, (i + 1), hit.score));
    }

    if (debug) {
        System.out.println("search results:");
        System.out.println();
        for (ScoreDoc hit : hitList) {
            System.out.println("doc=" + hit.doc + " score=" + hit.score);
            Document doc = solrSearcher.doc(hit.doc);
            String id = doc.get("id");
            System.out.println("id\t" + id);
            System.out.println("-------- lp file -------");
            System.out.println(fetchLPFile(id).get("text"));
            System.out.println();
        }
    }

    return slotList;
}

From source file:org.archive.search.IndexSearch.java

License:Apache License

private static void getTop10Results(TemRunType runType) throws Exception {
    //queries/*  w w w . j a  v a  2  s.  c o m*/
    String qFile;

    BufferedWriter top20IDWriter;
    BufferedWriter top20SolrWriter;
    BufferedWriter top20CheckWriter;

    if (runType == TemRunType.DryRun) {
        qFile = TDirectory.ROOT_DATASET
                + "Temporalia/DryRun/ntcir11_Temporalia_ntcir11-temporalia-tqic-dryrun.txt";

        top20IDWriter = IOBox.getBufferedWriter_UTF8(
                TDirectory.ROOT_OUTPUT + "/top10/idmap_" + TemRunType.DryRun.toString() + ".txt");
        top20SolrWriter = IOBox.getBufferedWriter_UTF8(
                TDirectory.ROOT_OUTPUT + "/top10/solr_" + TemRunType.DryRun.toString() + ".txt");
        top20CheckWriter = IOBox.getBufferedWriter_UTF8(
                TDirectory.ROOT_OUTPUT + "/top10/check_" + TemRunType.DryRun.toString() + ".txt");
    } else {
        qFile = TDirectory.ROOT_DATASET
                + "Temporalia/FormalRun/ntcir11_Temporalia_NTCIR-11TQICQueriesFormalRun.txt";

        top20IDWriter = IOBox.getBufferedWriter_UTF8(
                TDirectory.ROOT_OUTPUT + "/top10/idmap_" + TemRunType.FormalRun.toString() + ".txt");
        top20SolrWriter = IOBox.getBufferedWriter_UTF8(
                TDirectory.ROOT_OUTPUT + "/top10/solr_" + TemRunType.FormalRun.toString() + ".txt");
        top20CheckWriter = IOBox.getBufferedWriter_UTF8(
                TDirectory.ROOT_OUTPUT + "/top10/check_" + TemRunType.FormalRun.toString() + ".txt");
    }

    ArrayList<String> lineList = IOBox.getLinesAsAList_UTF8(qFile);

    //build a standard pseudo-xml file
    StringBuffer buffer = new StringBuffer();
    buffer.append("<add>");
    for (String line : lineList) {
        buffer.append(TemLoader.stripNonValidXMLCharacters(line));
    }
    buffer.append("</add>");

    SAXBuilder saxBuilder = new SAXBuilder();
    org.jdom.Document xmlDoc = saxBuilder
            .build(new InputStreamReader(new ByteArrayInputStream(buffer.toString().getBytes("UTF-8"))));
    Element webtrackElement = xmlDoc.getRootElement();
    List<Element> queryList = webtrackElement.getChildren("query");

    ArrayList<StrStr> qList = new ArrayList<>();
    for (Element query : queryList) {
        qList.add(new StrStr(query.getChildText("id").trim(), query.getChildText("query_string").trim()));
    }

    //solr search
    solrIndexReader = DirectoryReader.open(FSDirectory.open(new File(solrIndexDir)));
    solrSearcher = new IndexSearcher(solrIndexReader);
    solrSimilarity = new LMDirichletSimilarity();
    solrSearcher.setSimilarity(solrSimilarity);
    solrAnalyzer = new StandardAnalyzer(Version.LUCENE_48);
    solrParser = new MultiFieldQueryParser(Version.LUCENE_48, new String[] { "title", "content" },
            solrAnalyzer);

    //check search
    lpIndexReader = DirectoryReader.open(FSDirectory.open(new File(lpIndexDir)));
    lpSearcher = new IndexSearcher(lpIndexReader);

    //

    int count = 1;
    for (StrStr q : qList) {
        System.out.println((count++));
        //1
        Query solrQuery = solrParser.parse(q.second);
        TopDocs solrResultList = solrSearcher.search(solrQuery, 20);
        ScoreDoc[] solrHitList = solrResultList.scoreDocs;

        ArrayList<String> docidList = new ArrayList<>();

        for (int i = 0; i < solrHitList.length; i++) {
            ScoreDoc solrHit = solrHitList[i];
            Document doc = solrSearcher.doc(solrHit.doc);
            String docid = doc.get("id");
            docidList.add(docid);
        }

        //id map
        top20IDWriter.write(q.first);
        top20IDWriter.newLine();
        for (String docid : docidList) {
            top20IDWriter.write("\t" + docid);
            top20IDWriter.newLine();
        }

        //solr doc
        for (int i = 0; i < solrHitList.length; i++) {
            ScoreDoc solrHit = solrHitList[i];
            Document solrDoc = solrSearcher.doc(solrHit.doc);
            top20SolrWriter.write(TemLoader.toSolrXml(solrDoc));
            top20SolrWriter.newLine();
        }

        //check doc
        for (String docid : docidList) {
            Query checkQuery = lpParser.parse(docid);
            TopDocs checkResults = lpSearcher.search(checkQuery, 2);
            ScoreDoc[] checkHits = checkResults.scoreDocs;
            Document checkDoc = lpSearcher.doc(checkHits[0].doc);

            top20CheckWriter.write(TemLoader.toCheckXml(checkDoc));
            top20CheckWriter.newLine();
        }
    }

    //
    top20IDWriter.flush();
    top20IDWriter.close();

    top20SolrWriter.flush();
    top20SolrWriter.close();

    top20CheckWriter.flush();
    top20CheckWriter.close();
}

From source file:org.echocat.adam.profile.UserProfileDataQueryMapper.java

License:Open Source License

@Override
public Query convertToLuceneQuery(@Nonnull UserProfileDataQuery query) {
    final QueryParser parser = new MultiFieldQueryParser(LUCENE_VERSION, toFieldsArray(query),
            _luceneAnalyzerFactory.createAnalyzer());
    parser.setDefaultOperator(AND);/*from   w w  w  .j  a  v a 2  s  . c  o  m*/
    final String searchTerm = query.getSearchTerm();
    try {
        return parser.parse(searchTerm != null ? searchTerm : "");
    } catch (final ParseException e) {
        throw new RuntimeException("Unable to parse query: " + searchTerm, e);
    }
}

From source file:org.segrada.search.lucene.LuceneSearchEngine.java

License:Apache License

@Override
public PaginationInfo<SearchHit> search(String searchTerm, Map<String, String> filters) {
    // to avoid NPEs
    if (filters == null)
        filters = new HashMap<>();

    // set defaults
    int page = 1;
    int entriesPerPage = 20;

    try {/*  ww  w. ja v  a 2s.  c om*/
        DirectoryReader iReader = DirectoryReader.open(directory);

        String[] containFields;
        // do we have a filter to contain to certain fields?
        if (filters.containsKey("fields")) {
            String fields = filters.get("fields");
            if (fields.isEmpty())
                containFields = new String[] { "title", "subTitles", "content" };
            else if (fields.equalsIgnoreCase("title"))
                containFields = new String[] { "title" };
            else if (fields.equalsIgnoreCase("subTitles"))
                containFields = new String[] { "subTitles" };
            else if (fields.equalsIgnoreCase("content"))
                containFields = new String[] { "content" };
            else if (fields.equalsIgnoreCase("allTitles"))
                containFields = new String[] { "title", "subTitles" };
            else
                throw new RuntimeException("fields-Filter " + fields + " is not known.");
        } else
            containFields = new String[] { "title", "subTitles", "content" };

        // Parse a simple query that searches for "text":
        MultiFieldQueryParser parser = new MultiFieldQueryParser(Version.LUCENE_47, containFields, analyzer);

        // which operator do we use?
        parser.setDefaultOperator(QueryParser.Operator.AND);
        if (filters.containsKey("operator")) {
            String operator = filters.get("operator");
            if (operator.equalsIgnoreCase("or"))
                parser.setDefaultOperator(QueryParser.Operator.OR);
            else if (!operator.isEmpty() && !operator.equalsIgnoreCase("and"))
                throw new RuntimeException("operator-Filter " + operator + " is not and/or.");
        }

        // filters for query
        List<Filter> searchFilters = new ArrayList<>();

        // class filter
        if (filters.containsKey("class") && !filters.get("class").isEmpty()) {
            // multiple classes?
            String[] classes = filters.get("class").split(",");

            // single class
            if (classes.length <= 1) {
                TermQuery categoryQuery = new TermQuery(new Term("className", filters.get("class")));
                searchFilters.add(new QueryWrapperFilter(categoryQuery));
            } else { // multiple classes
                Filter[] categories = new Filter[classes.length];
                for (int i = 0; i < classes.length; i++) {
                    categories[i] = new QueryWrapperFilter(
                            new TermQuery(new Term("className", classes[i].trim())));
                }
                // add chained filter
                searchFilters.add(new ChainedFilter(categories, ChainedFilter.OR));
            }
        }

        // tag filter
        if (filters.containsKey("tags") && !filters.get("tags").isEmpty()) {
            // split tags into array
            String[] tags = filters.get("tags").split(",");
            BooleanQuery booleanQuery = new BooleanQuery();
            for (String tag : tags) {
                booleanQuery.add(new TermQuery(new Term("tag", tag.trim())), BooleanClause.Occur.SHOULD);
            }
            searchFilters.add(new QueryWrapperFilter(booleanQuery));
        }

        // create filter - if multiple filters applied, add chained filter
        Filter filter = null;
        if (searchFilters.size() == 1)
            filter = searchFilters.get(0);
        else if (searchFilters.size() > 1) {
            Filter[] filterArray = new Filter[searchFilters.size()];
            searchFilters.toArray(filterArray);
            filter = new ChainedFilter(filterArray, ChainedFilter.AND);
        }

        // define query
        Query query = null;
        if (searchTerm != null)
            query = parser.parse(searchTerm);
        if (query == null)
            query = new MatchAllDocsQuery(); // fallback to match all documents

        // get hits per page
        if (filters.containsKey("limit")) {
            try {
                entriesPerPage = Integer.valueOf(filters.get("limit"));
                if (entriesPerPage <= 0 || entriesPerPage > 1000)
                    entriesPerPage = 20;
            } catch (NumberFormatException e) {
                logger.warn("Could not parse limit " + filters.get("limit") + " to integer", e);
            }
        }

        // get page number
        if (filters.containsKey("page")) {
            try {
                page = Integer.valueOf(filters.get("page"));
            } catch (NumberFormatException e) {
                logger.warn("Could not parse page " + filters.get("page") + " to integer", e);
            }
        }

        // calculate start/stop indexes
        int startIndex = (page - 1) * entriesPerPage;
        int endIndex = page * entriesPerPage;

        IndexSearcher iSearcher = new IndexSearcher(iReader);
        // do search
        TopDocs topDocs = iSearcher.search(query, filter, 1000);

        // update end index
        if (topDocs.scoreDocs.length < endIndex)
            endIndex = topDocs.scoreDocs.length;
        // how many pages do we have?
        int pages = topDocs.scoreDocs.length / entriesPerPage + 1;
        // reset page to sane limit, if needed
        if (page <= 0 || page > pages)
            page = 1;

        // highlighter
        FastVectorHighlighter highlighter = new FastVectorHighlighter();
        FieldQuery fieldQuery = null;
        // field query for highlighted terms
        if (searchTerm != null)
            fieldQuery = highlighter.getFieldQuery(
                    new QueryParser(Version.LUCENE_47, "content", analyzer).parse(searchTerm), iReader);

        // cycle trough hits
        List<SearchHit> hits = new ArrayList<>();

        for (int i = startIndex; i < endIndex; i++) {
            ScoreDoc scoreDoc = topDocs.scoreDocs[i];
            Document hitDoc = iSearcher.doc(scoreDoc.doc);

            SearchHit searchHit = new SearchHit();
            searchHit.setId(hitDoc.get("id"));
            searchHit.setClassName(hitDoc.get("className"));
            searchHit.setTitle(hitDoc.get("title"));
            searchHit.setSubTitles(hitDoc.get("subTitles"));
            searchHit.setTagIds(hitDoc.getValues("tag"));
            String color = hitDoc.get("color");
            searchHit.setColor(color != null ? new Integer(color) : null);
            searchHit.setIconFileIdentifier(hitDoc.get("iconFileIdentifier"));
            searchHit.setRelevance(scoreDoc.score);

            // get highlighted components
            if (searchTerm != null) {
                String[] bestFragments = highlighter.getBestFragments(fieldQuery, iReader, scoreDoc.doc,
                        "content", 18, 10);
                searchHit.setHighlightText(bestFragments);
            }

            // add hit
            hits.add(searchHit);
        }

        iReader.close();

        // return pagination info
        return new PaginationInfo<>(page, pages, topDocs.totalHits, entriesPerPage, hits);
    } catch (Throwable e) {
        logger.error("Error in search.", e);
    }

    // return empty list result in order to avoid NPEs
    return new PaginationInfo<>(page, 1, 0, entriesPerPage, new ArrayList<>());
}

From source file:org.segrada.search.lucene.LuceneSearchEngine.java

License:Apache License

@Override
public String[] searchInDocument(String searchTerm, String id) {
    // sanity check
    if (searchTerm == null || id == null || searchTerm.isEmpty() || id.isEmpty())
        return new String[] {};

    try {/* w  w w  .  j  a va2  s. c  o m*/
        DirectoryReader iReader = DirectoryReader.open(directory);
        IndexSearcher iSearcher = new IndexSearcher(iReader);

        // only search content
        MultiFieldQueryParser parser = new MultiFieldQueryParser(Version.LUCENE_47, new String[] { "content" },
                analyzer);

        // set operator and contain by id
        parser.setDefaultOperator(QueryParser.Operator.AND);
        Query query = parser.parse(searchTerm);
        Filter filter = new QueryWrapperFilter(new TermQuery(new Term("id", id)));

        // do search, maximum of 1 document
        TopDocs topDocs = iSearcher.search(query, filter, 1);

        if (topDocs.scoreDocs.length > 0) {
            ScoreDoc scoreDoc = topDocs.scoreDocs[0];

            // get highlighted text
            FastVectorHighlighter highlighter = new FastVectorHighlighter();
            FieldQuery fieldQuery = highlighter.getFieldQuery(
                    new QueryParser(Version.LUCENE_47, "content", analyzer).parse(searchTerm), iReader);

            // return max of 100 highlighted elements
            return highlighter.getBestFragments(fieldQuery, iReader, scoreDoc.doc, "content", 100, 100);
        }
    } catch (Throwable e) {
        logger.error("Error in search.", e);
    }

    return new String[] {};
}

From source file:org.segrada.search.solr.SolrSearchEngine.java

License:Apache License

@Override
public PaginationInfo<SearchHit> search(String searchTerm, Map<String, String> filters) {
    // to avoid NPEs
    if (filters == null)
        filters = new HashMap<>();

    // set defaults
    int page = 1;
    int entriesPerPage = 20;

    try {/*from  w  w w  .  j a  v  a 2s  .c  o m*/
        // Parse a simple query that searches for "text":
        MultiFieldQueryParser parser;
        String[] containFields;
        // do we have a filter to contain to certain fields?
        if (filters.containsKey("fields")) {
            String fields = filters.get("fields");
            if (fields.isEmpty())
                containFields = new String[] { this.title, this.subTitles, this.content };
            else if (fields.equalsIgnoreCase(this.title))
                containFields = new String[] { this.title };
            else if (fields.equalsIgnoreCase(this.subTitles))
                containFields = new String[] { this.subTitles };
            else if (fields.equalsIgnoreCase(this.content))
                containFields = new String[] { this.content };
            else if (fields.equalsIgnoreCase("allTitles"))
                containFields = new String[] { this.title, this.subTitles };
            else
                throw new RuntimeException("fields-Filter " + fields + " is not known.");
        } else
            containFields = new String[] { this.title, this.subTitles, this.content };
        parser = new MultiFieldQueryParser(Version.LUCENE_47, containFields, analyzer);

        // which operator do we use?
        parser.setDefaultOperator(QueryParser.Operator.AND);
        if (filters.containsKey("operator")) {
            String operator = filters.get("operator");
            if (operator.equalsIgnoreCase("or"))
                parser.setDefaultOperator(QueryParser.Operator.OR);
            else if (!operator.isEmpty() && !operator.equalsIgnoreCase("and"))
                throw new RuntimeException("operator-Filter " + operator + " is not and/or.");
        }

        // filters for query
        SolrQuery query = new SolrQuery();
        // class filter
        if (filters.containsKey("class") && !filters.get("class").isEmpty()) {
            // multiple classes?
            String[] classes = filters.get("class").split(",");

            // single class
            if (classes.length <= 1) {
                query.addFilterQuery(this.className, filters.get("class"));
            } else { // multiple classes
                StringBuilder chained = new StringBuilder("(");
                for (int i = 0; i < classes.length; i++) {
                    if (i > 0)
                        chained.append(" OR ");
                    chained.append("className:").append(classes[i].trim());
                }
                query.addFilterQuery(this.className, chained + ")");
            }
        }

        // tag filter
        if (filters.containsKey("tags") && !filters.get("tags").isEmpty()) {
            // split tags into array
            String[] tags = filters.get("tags").split(",");
            BooleanQuery booleanQuery = new BooleanQuery();
            for (String tagLocal : tags) {
                booleanQuery.add(new TermQuery(new Term("tag", tagLocal.trim())), BooleanClause.Occur.SHOULD);
            }
            query.addFilterQuery(this.tag, booleanQuery.toString());
        }

        // define query
        Query queryTerm = null;
        if (searchTerm != null)
            queryTerm = parser.parse(searchTerm);
        if (queryTerm == null)
            queryTerm = new MatchAllDocsQuery(); // fallback to match all documents
        query.setQuery(queryTerm.toString());

        // get hits per page
        if (filters.containsKey("limit")) {
            try {
                entriesPerPage = Integer.valueOf(filters.get("limit"));
                if (entriesPerPage <= 0 || entriesPerPage > 1000)
                    entriesPerPage = 20;
            } catch (NumberFormatException e) {
                logger.warn("Could not parse limit " + filters.get("limit") + " to integer", e);
            }
        }

        // get page number
        if (filters.containsKey("page")) {
            try {
                page = Integer.valueOf(filters.get("page"));
            } catch (NumberFormatException e) {
                logger.warn("Could not parse page " + filters.get("page") + " to integer", e);
            }
        }

        // calculate start index
        int startIndex = (page - 1) * entriesPerPage;

        query.setStart(startIndex);
        query.setRows(entriesPerPage);

        query.setFields("*", "score");

        // define highlighting
        query.setHighlight(true);
        query.addHighlightField(this.content);
        query.setHighlightFragsize(18);
        query.setHighlightSnippets(10);
        query.setHighlightSimplePre("<b>");
        query.setHighlightSimplePost("</b>");

        // do query
        QueryResponse response = solr.query(query);
        SolrDocumentList results = response.getResults();

        // how many pages do we have?
        int pages = (int) (results.getNumFound() / entriesPerPage + 1);

        // cycle trough hits
        List<SearchHit> hits = new ArrayList<>();

        for (SolrDocument doc : results) {
            SearchHit searchHit = createHitFromDocument(doc);

            // add score
            Object score = doc.get("score");
            if (score != null && score instanceof Float)
                searchHit.setRelevance((float) score);

            // get highlighted components
            if (searchTerm != null && response.getHighlighting().get(searchHit.getId()) != null) {
                List<String> fragments = response.getHighlighting().get(searchHit.getId()).get(this.content);
                if (fragments != null) {
                    String[] bestFragments = new String[fragments.size() > 10 ? 10 : fragments.size()];
                    for (int i = 0; i < bestFragments.length; i++)
                        bestFragments[i] = fragments.get(i);
                    searchHit.setHighlightText(bestFragments);
                }
            }

            // add hit
            hits.add(searchHit);
        }

        // return pagination info
        return new PaginationInfo<>(page, pages, (int) results.getNumFound(), entriesPerPage, hits);
    } catch (Throwable e) {
        logger.error("Error in search.", e);
    }

    // return empty list result in order to avoid NPEs
    return new PaginationInfo<>(page, 1, 0, entriesPerPage, new ArrayList<>());
}

From source file:org.segrada.search.solr.SolrSearchEngine.java

License:Apache License

@Override
public String[] searchInDocument(String searchTerm, String id) {
    // sanity check
    if (searchTerm == null || id == null || searchTerm.isEmpty() || id.isEmpty())
        return new String[] {};

    try {/* w ww.java  2s  .co m*/
        // only search content
        MultiFieldQueryParser parser = new MultiFieldQueryParser(Version.LUCENE_47, new String[] { "content" },
                analyzer);

        SolrQuery query = new SolrQuery();

        // set operator and contain by id
        parser.setDefaultOperator(QueryParser.Operator.AND);
        query.setQuery(parser.parse(searchTerm).toString());

        // filter by id
        query.addFilterQuery("id:" + id);
        query.setRows(1);

        // define highlighting
        query.setHighlight(true);
        query.addHighlightField(this.content);
        query.setHighlightFragsize(100);
        query.setHighlightSnippets(100);
        query.setHighlightSimplePre("<b>");
        query.setHighlightSimplePost("</b>");

        // do query
        QueryResponse response = solr.query(query);
        SolrDocumentList results = response.getResults();

        if (!results.isEmpty() && response.getHighlighting().get(id) != null) {
            List<String> fragments = response.getHighlighting().get(id).get(this.content);
            String[] bestFragments = new String[fragments.size() > 100 ? 100 : fragments.size()];
            for (int i = 0; i < bestFragments.length; i++)
                bestFragments[i] = fragments.get(i);
            return bestFragments;
        }
    } catch (Throwable e) {
        logger.error("Error in search.", e);
    }

    return new String[] {};
}