Example usage for org.apache.lucene.search.highlight Highlighter setTextFragmenter

List of usage examples for org.apache.lucene.search.highlight Highlighter setTextFragmenter

Introduction

In this page you can find the example usage for org.apache.lucene.search.highlight Highlighter setTextFragmenter.

Prototype

public void setTextFragmenter(Fragmenter fragmenter) 

Source Link

Usage

From source file:de.spartusch.nasfvi.server.NSearcher.java

License:Apache License

/**
 * Extracts a field's values from a document. This method is aware of
 * <i>collapsed</i> or <i>merged</i> fields and handles them properly. 
 * @param nquery NQuery used for searching
 * @param doc Document to extract the field's values from
 * @param field Name of the field to extract values for
 * @return Set of extracted values/*w w  w  .j av  a  2s  .c  o  m*/
 */
private Set<String> extractValues(final NQuery nquery, final Document doc, final String field) {
    Set<String> values = new HashSet<String>();

    if (NQuery.isFieldToCollapse(field)) {
        // process merged field
        String mfield = NQuery.getMergedField();
        QueryScorer scorer = new QueryScorer(nquery.getQuery(), mfield);
        Highlighter highlighter = new Highlighter(scorer);
        highlighter.setTextFragmenter(new NullFragmenter());

        try {
            Set<String> buffer = new HashSet<String>();

            for (Fieldable f : doc.getFieldables(mfield)) {
                String content = f.stringValue();
                String value = normalizeValue(NQuery.extractValue(field, content));

                // Test if the field was matched by the query
                TokenStream ts = TokenSources.getTokenStream(mfield, content, nquery.getAnalyzer());
                if (highlighter.getBestFragment(ts, content) != null) {
                    values.add(value);
                } else {
                    // Buffer the value - in case no field matches
                    buffer.add(value);
                }
            }

            if (values.isEmpty()) {
                // No field was matched by the query
                values.addAll(buffer);
            }
        } catch (IOException e) {
            throw new RuntimeException(e);
        } catch (InvalidTokenOffsetsException e) {
            throw new RuntimeException(e);
        }
    } else {
        for (String v : doc.getValues(field)) {
            values.add(normalizeValue(v));
        }
    }

    return values;
}

From source file:dk.defxws.fgslucene.Statement.java

License:Open Source License

public ResultSet executeQuery(IndexSearcher searcher, String queryString, int startRecord, int maxResults,
        int snippetsMax, int fieldMaxLength, Analyzer analyzer, String defaultQueryFields,
        boolean allowLeadingWildcard, boolean lowercaseExpandedTerms, String indexPath, String indexName,
        String snippetBegin, String snippetEnd, String sortFields) throws GenericSearchException {
    if (logger.isDebugEnabled())
        logger.debug("executeQuery" + " query=" + queryString + " startRecord=" + startRecord + " maxResults="
                + maxResults + " snippetsMax=" + snippetsMax + " fieldMaxLength=" + fieldMaxLength
                + " indexName=" + indexName + " sortFields=" + sortFields + " defaultQueryFields="
                + defaultQueryFields + " allowLeadingWildcard=" + allowLeadingWildcard
                + " lowercaseExpandedTerms=" + lowercaseExpandedTerms);
    this.searcher = searcher;
    ResultSet rs = null;/*w w  w  .  j a  v  a 2 s  . com*/
    StringTokenizer defaultFieldNames = new StringTokenizer(defaultQueryFields);
    int countFields = defaultFieldNames.countTokens();
    String[] defaultFields = new String[countFields];
    for (int i = 0; i < countFields; i++) {
        defaultFields[i] = defaultFieldNames.nextToken();
    }
    Query query = null;
    if (defaultFields.length == 1) {
        QueryParser queryParser = new QueryParser(Version.LUCENE_36, defaultFields[0], analyzer);
        queryParser.setAllowLeadingWildcard(allowLeadingWildcard);
        queryParser.setLowercaseExpandedTerms(lowercaseExpandedTerms);
        if (logger.isDebugEnabled())
            logger.debug("executeQuery queryParser" + " allowLeadingWildcard="
                    + queryParser.getAllowLeadingWildcard() + " lowercaseExpandedTerms="
                    + queryParser.getLowercaseExpandedTerms());
        try {
            query = queryParser.parse(queryString);
        } catch (ParseException e) {
            throw new GenericSearchException(e.toString());
        }
    } else {
        MultiFieldQueryParser queryParser = new MultiFieldQueryParser(Version.LUCENE_36, defaultFields,
                analyzer);
        queryParser.setAllowLeadingWildcard(allowLeadingWildcard);
        queryParser.setLowercaseExpandedTerms(lowercaseExpandedTerms);
        if (logger.isDebugEnabled())
            logger.debug("executeQuery mfqueryParser" + " allowLeadingWildcard="
                    + queryParser.getAllowLeadingWildcard() + " lowercaseExpandedTerms="
                    + queryParser.getLowercaseExpandedTerms());
        try {
            query = queryParser.parse(queryString);
        } catch (ParseException e) {
            throw new GenericSearchException(e.toString());
        }
    }
    if (logger.isDebugEnabled())
        logger.debug("executeQuery after parse query=" + query);
    try {
        query.rewrite(searcher.getIndexReader());
    } catch (Exception e) {
        throw new GenericSearchException(e.toString());
    }
    if (logger.isDebugEnabled())
        logger.debug("executeQuery after rewrite query=" + query);
    int start = Integer.parseInt(Integer.toString(startRecord));
    TopDocs hits = getHits(query, start + maxResults - 1, sortFields);
    ScoreDoc[] docs = hits.scoreDocs;
    int end = Math.min(hits.totalHits, start + maxResults - 1);
    if (logger.isDebugEnabled())
        logger.debug("executeQuery hits.totalHits=" + hits.totalHits);
    StringBuffer resultXml = new StringBuffer();
    resultXml.append("<?xml version=\"1.0\" encoding=\"UTF-8\"?>");
    String queryStringEncoded = null;
    try {
        queryStringEncoded = URLEncoder.encode(queryString, "UTF-8");
    } catch (UnsupportedEncodingException e) {
        errorExit(e.toString());
    }
    resultXml.append("<lucenesearch " + "   xmlns:dc=\"http://purl.org/dc/elements/1.1/" + "\" query=\""
            + queryStringEncoded + "\" indexName=\"" + indexName + "\" sortFields=\"" + sortFields
            + "\" hitPageStart=\"" + startRecord + "\" hitPageSize=\"" + maxResults + "\" hitTotal=\""
            + hits.totalHits + "\">");
    ScoreDoc hit = null;
    Document doc = null;
    String hitsScore = null;
    for (int i = start; i <= end; i++) {
        try {
            hit = docs[i - 1];
            doc = searcher.doc(hit.doc);
            hitsScore = "" + hit.score;
        } catch (CorruptIndexException e) {
            errorExit(e.toString());
        } catch (IOException e) {
            errorExit(e.toString());
        }
        resultXml.append("<hit no=\"" + i + "\" score=\"" + hitsScore + "\">");
        for (ListIterator li = doc.getFields().listIterator(); li.hasNext();) {
            Fieldable f = (Fieldable) li.next();
            resultXml.append("<field name=\"" + f.name() + "\"");
            String snippets = null;
            if (snippetsMax > 0) {
                SimpleHTMLFormatter formatter = new SimpleHTMLFormatter("!!!SNIPPETBEGIN", "!!!SNIPPETEND");
                QueryScorer scorer = new QueryScorer(query, f.name());
                Highlighter highlighter = new Highlighter(formatter, scorer);
                Fragmenter fragmenter = new SimpleFragmenter(fieldMaxLength);
                highlighter.setTextFragmenter(fragmenter);
                TokenStream tokenStream = analyzer.tokenStream(f.name(), new StringReader(f.stringValue()));
                try {
                    snippets = highlighter.getBestFragments(tokenStream, f.stringValue(), snippetsMax, " ... ");
                } catch (Exception e) { // all Exceptions to be caught, not just IOException 
                    errorExit(e.toString());
                }
                snippets = checkTruncatedWords(snippets, " ... ");
                snippets = StreamUtility.enc(snippets);
                snippets = snippets.replaceAll("!!!SNIPPETBEGIN", snippetBegin);
                snippets = snippets.replaceAll("!!!SNIPPETEND", snippetEnd);
                if (snippets != null && !snippets.equals("")) {
                    resultXml.append(" snippet=\"yes\">" + snippets);
                }
            }
            if (snippets == null || snippets.equals(""))
                if (fieldMaxLength > 0 && f.stringValue().length() > fieldMaxLength) {
                    String snippet = f.stringValue().substring(0, fieldMaxLength);
                    int iamp = snippet.lastIndexOf("&");
                    if (iamp > -1 && iamp > fieldMaxLength - 8)
                        snippet = snippet.substring(0, iamp);
                    resultXml.append(">" + StreamUtility.enc(snippet) + " ... ");
                } else
                    resultXml.append(">" + StreamUtility.enc(f.stringValue()));
            resultXml.append("</field>");
        }
        resultXml.append("</hit>");
    }
    resultXml.append("</lucenesearch>");
    if (logger.isDebugEnabled()) {
        int size = 500;
        if (resultXml.length() < size)
            size = resultXml.length();
        String debugString = resultXml.substring(0, size);
        if (resultXml.length() > size)
            debugString += "...";
        logger.debug("executeQuery resultXml=" + debugString);
    }
    rs = new ResultSet(resultXml);
    return rs;
}

From source file:dk.defxws.fgssolr.Statement.java

License:Open Source License

public ResultSet executeQuery(IndexSearcher searcher, String queryString, int startRecord, int maxResults,
        int snippetsMax, int fieldMaxLength, Analyzer analyzer, String defaultQueryFields, String indexPath,
        String indexName, String snippetBegin, String snippetEnd, String sortFields)
        throws GenericSearchException {
    boolean allowLeadingWildcard = true;
    boolean lowercaseExpandedTerms = true;
    if (logger.isDebugEnabled())
        logger.debug("executeQuery" + " query=" + queryString + " startRecord=" + startRecord + " maxResults="
                + maxResults + " snippetsMax=" + snippetsMax + " fieldMaxLength=" + fieldMaxLength
                + " indexName=" + indexName + " sortFields=" + sortFields + " defaultQueryFields="
                + defaultQueryFields + " allowLeadingWildcard=" + allowLeadingWildcard
                + " lowercaseExpandedTerms=" + lowercaseExpandedTerms);
    this.searcher = searcher;
    ResultSet rs = null;//from ww  w  . ja  v a 2  s.c om
    StringTokenizer defaultFieldNames = new StringTokenizer(defaultQueryFields);
    int countFields = defaultFieldNames.countTokens();
    String[] defaultFields = new String[countFields];
    for (int i = 0; i < countFields; i++) {
        defaultFields[i] = defaultFieldNames.nextToken();
    }
    Query query = null;
    if (defaultFields.length == 1) {
        QueryParser queryParser = new QueryParser(Version.LUCENE_36, defaultFields[0], analyzer);
        queryParser.setAllowLeadingWildcard(allowLeadingWildcard);
        queryParser.setLowercaseExpandedTerms(lowercaseExpandedTerms);
        if (logger.isDebugEnabled())
            logger.debug("executeQuery queryParser" + " allowLeadingWildcard="
                    + queryParser.getAllowLeadingWildcard() + " lowercaseExpandedTerms="
                    + queryParser.getLowercaseExpandedTerms());
        try {
            query = queryParser.parse(queryString);
        } catch (ParseException e) {
            throw new GenericSearchException(e.toString());
        }
    } else {
        MultiFieldQueryParser queryParser = new MultiFieldQueryParser(Version.LUCENE_36, defaultFields,
                analyzer);
        queryParser.setAllowLeadingWildcard(allowLeadingWildcard);
        queryParser.setLowercaseExpandedTerms(lowercaseExpandedTerms);
        if (logger.isDebugEnabled())
            logger.debug("executeQuery mfqueryParser" + " allowLeadingWildcard="
                    + queryParser.getAllowLeadingWildcard() + " lowercaseExpandedTerms="
                    + queryParser.getLowercaseExpandedTerms());
        try {
            query = queryParser.parse(queryString);
        } catch (ParseException e) {
            throw new GenericSearchException(e.toString());
        }
    }
    if (logger.isDebugEnabled())
        logger.debug("executeQuery after parse query=" + query);
    try {
        query.rewrite(searcher.getIndexReader());
    } catch (Exception e) {
        throw new GenericSearchException(e.toString());
    }
    if (logger.isDebugEnabled())
        logger.debug("executeQuery after rewrite query=" + query);
    int start = Integer.parseInt(Integer.toString(startRecord));
    TopDocs hits = getHits(query, start + maxResults - 1, sortFields);
    ScoreDoc[] docs = hits.scoreDocs;
    int end = Math.min(hits.totalHits, start + maxResults - 1);
    if (logger.isDebugEnabled())
        logger.debug("executeQuery hits.totalHits=" + hits.totalHits);
    StringBuffer resultXml = new StringBuffer();
    resultXml.append("<?xml version=\"1.0\" encoding=\"UTF-8\"?>");
    String queryStringEncoded = null;
    try {
        queryStringEncoded = URLEncoder.encode(queryString, "UTF-8");
    } catch (UnsupportedEncodingException e) {
        errorExit(e.toString());
    }
    resultXml.append("<solrsearch " + "   xmlns:dc=\"http://purl.org/dc/elements/1.1/" + "\" query=\""
            + queryStringEncoded + "\" indexName=\"" + indexName + "\" sortFields=\"" + sortFields
            + "\" hitPageStart=\"" + startRecord + "\" hitPageSize=\"" + maxResults + "\" hitTotal=\""
            + hits.totalHits + "\">");
    ScoreDoc hit = null;
    Document doc = null;
    String hitsScore = null;
    for (int i = start; i <= end; i++) {
        try {
            hit = docs[i - 1];
            doc = searcher.doc(hit.doc);
            hitsScore = "" + hit.score;
        } catch (CorruptIndexException e) {
            errorExit(e.toString());
        } catch (IOException e) {
            errorExit(e.toString());
        }
        resultXml.append("<hit no=\"" + i + "\" score=\"" + hitsScore + "\">");
        for (ListIterator li = doc.getFields().listIterator(); li.hasNext();) {
            Fieldable f = (Fieldable) li.next();
            resultXml.append("<field name=\"" + f.name() + "\"");
            String snippets = null;
            if (snippetsMax > 0) {
                SimpleHTMLFormatter formatter = new SimpleHTMLFormatter("!!!SNIPPETBEGIN", "!!!SNIPPETEND");
                QueryScorer scorer = new QueryScorer(query, f.name());
                Highlighter highlighter = new Highlighter(formatter, scorer);
                Fragmenter fragmenter = new SimpleFragmenter(fieldMaxLength);
                highlighter.setTextFragmenter(fragmenter);
                TokenStream tokenStream = analyzer.tokenStream(f.name(), new StringReader(f.stringValue()));
                try {
                    snippets = highlighter.getBestFragments(tokenStream, f.stringValue(), snippetsMax, " ... ");
                } catch (Exception e) { // all Exceptions to be caught, not just IOException 
                    errorExit(e.toString());
                }
                snippets = checkTruncatedWords(snippets, " ... ");
                snippets = StreamUtility.enc(snippets);
                snippets = snippets.replaceAll("!!!SNIPPETBEGIN", snippetBegin);
                snippets = snippets.replaceAll("!!!SNIPPETEND", snippetEnd);
                if (snippets != null && !snippets.equals("")) {
                    resultXml.append(" snippet=\"yes\">" + snippets);
                }
            }
            if (snippets == null || snippets.equals(""))
                if (fieldMaxLength > 0 && f.stringValue().length() > fieldMaxLength) {
                    String snippet = f.stringValue().substring(0, fieldMaxLength);
                    int iamp = snippet.lastIndexOf("&");
                    if (iamp > -1 && iamp > fieldMaxLength - 8)
                        snippet = snippet.substring(0, iamp);
                    resultXml.append(">" + StreamUtility.enc(snippet) + " ... ");
                } else
                    resultXml.append(">" + StreamUtility.enc(f.stringValue()));
            resultXml.append("</field>");
        }
        resultXml.append("</hit>");
    }
    resultXml.append("</solrsearch>");
    if (logger.isDebugEnabled()) {
        int size = 500;
        if (resultXml.length() < size)
            size = resultXml.length();
        String debugString = resultXml.substring(0, size);
        if (resultXml.length() > size)
            debugString += "...";
        logger.debug("executeQuery resultXml=" + debugString);
    }
    rs = new ResultSet(resultXml);
    return rs;
}

From source file:docet.engine.SimpleDocetDocSearcher.java

License:Apache License

@Override
public List<DocetPage> searchForMatchingDocuments(final String searchText, final String lang,
        final int maxNumResults) throws DocetDocumentSearchException {
    final List<DocetPage> results = new ArrayList<>();
    final String fallbackLang = this.getFallbackLangForLang(lang);
    final String actualSearchLang;
    if (fallbackLang.isEmpty()) {
        actualSearchLang = lang;//from  w  ww. ja va  2 s  .  c  om
    } else {
        actualSearchLang = fallbackLang;
    }
    try {
        final IndexSearcher searcher = new IndexSearcher(reader);
        final Analyzer analyzer = new AnalyzerBuilder().language(actualSearchLang).build();
        QueryParser queryParser = new QueryParser(LUCENE_QUERY_CONTENT_PREFIX + actualSearchLang, analyzer);
        final Query query = queryParser.parse(constructLucenePhraseTermSearchQuery(searchText));
        final QueryScorer queryScorer = new QueryScorer(query, LUCENE_QUERY_CONTENT_PREFIX + actualSearchLang);

        final Fragmenter fragmenter = new SimpleSpanFragmenter(queryScorer);
        final Highlighter highlighter = new Highlighter(queryScorer);
        highlighter.setMaxDocCharsToAnalyze(Integer.MAX_VALUE);
        highlighter.setTextFragmenter(fragmenter);

        final TopDocs res = searcher.search(query, maxNumResults);
        final float maxScore = res.getMaxScore();
        final List<ScoreDoc> scoreDocs = Arrays.asList(res.scoreDocs);
        Map<org.apache.lucene.document.Document, String> docs = new HashMap<>();
        Map<String, ScoreDoc> scoresForDocs = new HashMap<>();
        for (final ScoreDoc sd : scoreDocs) {
            final org.apache.lucene.document.Document doc = searcher.doc(sd.doc);
            final String contents = doc.get(LUCENE_QUERY_CONTENT_PREFIX + actualSearchLang);
            final String docId = doc.get("id");
            final String[] fragments = highlighter.getBestFragments(analyzer,
                    LUCENE_QUERY_CONTENT_PREFIX + actualSearchLang, contents, MAX_NUM_FRAGMENTS);
            List<String> fragmentList = Arrays.asList(fragments);
            fragmentList = fragmentList.stream().map(s1 -> s1.trim().split("\n"))
                    .map(s1 -> Arrays.asList(s1).stream().filter(s -> !s.trim().isEmpty())
                            .reduce((sa, sb) -> sa + MACHING_EXCERPTS_SEPARATOR + sb)
                            .orElse(MACHING_EXCERPTS_SEPARATOR))
                    .collect(Collectors.toList());
            docs.put(doc,
                    MACHING_EXCERPTS_SEPARATOR
                            + fragmentList.stream().filter(s -> !s.isEmpty())
                                    .reduce((s1, s2) -> s1 + "..." + s2).orElse("")
                            + MACHING_EXCERPTS_SEPARATOR);
            scoresForDocs.putIfAbsent(docId, sd);
        }
        docs.entrySet().stream().forEach(e -> {
            final int relevance = Math.round((scoresForDocs.get(e.getKey().get("id")).score / maxScore) * 100);
            results.add(DocetPage.toDocetDocument(e.getKey(), e.getValue(), relevance));
        });
        return results;
    } catch (ParseException | IOException | InvalidTokenOffsetsException ex) {
        throw new DocetDocumentSearchException(
                "Error on searching query " + searchText + " for lang " + actualSearchLang, ex);
    }
}

From source file:edu.coeia.util.TextHighlighter.java

License:Open Source License

public static String getHighlightString(String text, String keyword) throws IOException {
    TermQuery query = new TermQuery(new Term("f", keyword));
    QueryScorer scorer = new QueryScorer(query);
    SimpleHTMLFormatter formatter = new SimpleHTMLFormatter("<span class=\"highlight\">", "</span>");
    Highlighter highlighter = new Highlighter(formatter, scorer);
    Fragmenter fragmenter = new SimpleFragmenter(50);
    highlighter.setTextFragmenter(fragmenter);
    TokenStream tokenStream = new StandardAnalyzer(Version.LUCENE_20).tokenStream("f", new StringReader(text));
    //String result = highlighter.getBestFragments(tokenStream, text, 30, "...");

    StringBuilder writer = new StringBuilder("");
    writer.append("<html>");
    writer.append("<style>\n" + ".highlight {\n" + " background: yellow;\n" + "}\n" + "</style>");
    writer.append("<body>");
    writer.append("");
    writer.append("</body></html>");

    return (writer.toString());
}

From source file:fr.mael.microrss.dao.impl.UserArticleDaoImpl.java

License:Open Source License

public List<UserArticle> search(String queryStr, User user, int start, int nb)
        throws ParseException, IOException, InvalidTokenOffsetsException {
    FullTextSession searchSession = Search.getFullTextSession(getSessionFactory().getCurrentSession());
    QueryParser parser = new MultiFieldQueryParser(Version.LUCENE_31,
            new String[] { "article.content", "article.title" }, new StandardAnalyzer(Version.LUCENE_31));
    org.apache.lucene.search.Query query = parser.parse(queryStr);
    FullTextQuery hibQuery = searchSession.createFullTextQuery(query, UserArticle.class);
    Criteria fetchingStrategy = searchSession.createCriteria(UserArticle.class);
    fetchingStrategy.setFetchMode("article.feed", FetchMode.JOIN);
    fetchingStrategy.setFetchMode("userLabels", FetchMode.JOIN);
    fetchingStrategy.add(Property.forName("user").eq(user));
    hibQuery.setCriteriaQuery(fetchingStrategy);
    hibQuery.setFirstResult(start);/* ww w  .  j  a  v  a 2 s. c  o m*/
    hibQuery.setMaxResults(nb);

    QueryScorer scorer = new QueryScorer(query);
    SimpleHTMLFormatter formatter = new SimpleHTMLFormatter("[highlight]", "[/highlight]");
    Highlighter highlighter = new Highlighter(formatter, scorer);
    highlighter.setTextFragmenter(new SimpleSpanFragmenter(scorer, 200));

    List<UserArticle> userArticles = (List<UserArticle>) hibQuery.list();

    for (UserArticle userArticle : userArticles) {
        String highlight = highlighter.getBestFragment(new StandardAnalyzer(Version.LUCENE_32), "content",
                userArticle.getArticle().getContent());
        if (highlight != null) {
            highlight = highlight.replaceAll("\\<.*?>", "").replace("\n", " ");
            userArticle.getArticle().setHighlight(highlight);
        }
    }

    return userArticles;
}

From source file:framework.retrieval.engine.query.formatter.impl.HighlighterMaker.java

License:Apache License

public String getHighlighter(Query query, String fieldName, String keyWord, int resumeLength) {

    QueryScorer scorer = new QueryScorer(query);

    Highlighter highlighter = new Highlighter(getFormatter(), scorer);

    Fragmenter fragmenter = new SimpleFragmenter(resumeLength);
    highlighter.setTextFragmenter(fragmenter);

    String result = "";

    try {//from  ww w.  ja  v a  2  s.  com
        result = highlighter.getBestFragment(analyzer, fieldName, keyWord);
    } catch (Exception e) {
        throw new RetrievalQueryException(e);
    }

    return result;
}

From source file:index.IndexUtils.java

public static List highlight(IndexSearcher indexSearcher, String key) throws ClassNotFoundException {
    try {/*w  ww  .j  a  va 2s .  c  o  m*/
        QueryParser queryParser = new QueryParser("name", new StandardAnalyzer());
        Query query = queryParser.parse(key);
        TopDocCollector collector = new TopDocCollector(800);
        indexSearcher.search(query, collector);
        ScoreDoc[] hits = collector.topDocs().scoreDocs;

        Highlighter highlighter = null;
        SimpleHTMLFormatter simpleHTMLFormatter = new SimpleHTMLFormatter("<font color='red'>", "</font>");
        highlighter = new Highlighter(simpleHTMLFormatter, new QueryScorer(query));
        highlighter.setTextFragmenter(new SimpleFragmenter(200));
        List list = new ArrayList();
        Document doc;
        for (int i = 0; i < hits.length; i++) {
            //System.out.println(hits[i].score);  
            doc = indexSearcher.doc(hits[i].doc);
            TokenStream tokenStream = new StandardAnalyzer().tokenStream("name",
                    new StringReader(doc.get("name")));
            //                IndexResult ir = getIndexResult(doc,"index.IndexResult");  
            //                ir.setName(highlighter.getBestFragment(tokenStream, doc.get("name")));  
            //                list.add(ir);  
        }
        return list;
    } catch (ParseException e) {
        e.printStackTrace();
    } catch (IOException e) {
        e.printStackTrace();
    }
    return null;

}

From source file:io.bdrc.lucene.bo.TibetanAnalyzerTest.java

License:Apache License

@Test
public void ewtsOffsetBug2() throws IOException, ParseException, InvalidTokenOffsetsException {
    String input = "(cha) bka' bkan gnyis kyi lung";
    String queryLucene = "test:\"bka'\"";
    Analyzer indexingAnalyzer = new TibetanAnalyzer(false, true, false, "ewts", "");
    Analyzer queryAnalyzer = new TibetanAnalyzer(false, true, false, "ewts", "");
    TokenStream indexTk = indexingAnalyzer.tokenStream("", input);
    QueryParser queryParser = new QueryParser("test", queryAnalyzer);
    Query query = queryParser.parse(queryLucene);
    SimpleHTMLFormatter formatter = new SimpleHTMLFormatter("->", "<-");
    Highlighter highlighter = new Highlighter(formatter, new QueryScorer(query));
    highlighter.setTextFragmenter(new SimpleFragmenter(10));
    TextFragment[] frags = highlighter.getBestTextFragments(indexTk, input, true, 128);
    final String firstFrag = frags[0].toString();
    System.out.println(firstFrag);
    assert (firstFrag.equals("(cha) ->bka'<- bkan gnyis kyi lung"));
    indexingAnalyzer.close();/*from  w  w  w .j av a2s  .c o m*/
    queryAnalyzer.close();
}

From source file:io.jpress.module.article.searcher.LuceneSearcher.java

License:LGPL

@Override
public Page<Article> search(String keyword, int pageNum, int pageSize) {
    IndexReader indexReader = null;/*  ww w.  ja va2  s  . com*/
    try {
        //Bug fix , QueryParser.escape(keyword),keyword=I/O,?buildQuery
        keyword = QueryParser.escape(keyword);
        indexReader = DirectoryReader.open(directory);
        IndexSearcher indexSearcher = new IndexSearcher(indexReader);
        Query query = buildQuery(keyword);

        ScoreDoc lastScoreDoc = getLastScoreDoc(pageNum, pageSize, query, indexSearcher);
        TopDocs topDocs = indexSearcher.searchAfter(lastScoreDoc, query, pageSize);

        SimpleHTMLFormatter formatter = new SimpleHTMLFormatter("<font class=\"" + HIGH_LIGHT_CLASS + "\">",
                "</font>");
        Highlighter highlighter = new Highlighter(formatter, new QueryScorer(query));
        highlighter.setTextFragmenter(new SimpleFragmenter(100));

        List<Article> articles = toArticleList(indexSearcher, topDocs, highlighter, keyword);
        int totalRow = getTotalRow(indexSearcher, query);
        return newPage(pageNum, pageSize, totalRow, articles);
    } catch (IOException e) {
        e.printStackTrace();
    } finally {
        CommonsUtils.quietlyClose(indexReader);
    }
    return null;
}