Example usage for org.apache.lucene.search.highlight Highlighter Highlighter

List of usage examples for org.apache.lucene.search.highlight Highlighter Highlighter

Introduction

In this page you can find the example usage for org.apache.lucene.search.highlight Highlighter Highlighter.

Prototype

public Highlighter(Formatter formatter, Scorer fragmentScorer) 

Source Link

Usage

From source file:com.liferay.portal.search.lucene.LuceneHelperImpl.java

License:Open Source License

public String getSnippet(Query query, String field, String s, int maxNumFragments, int fragmentLength,
        String fragmentSuffix, String preTag, String postTag) throws IOException {

    SimpleHTMLFormatter simpleHTMLFormatter = new SimpleHTMLFormatter(preTag, postTag);

    QueryScorer queryScorer = new QueryScorer(query, field);

    Highlighter highlighter = new Highlighter(simpleHTMLFormatter, queryScorer);

    highlighter.setTextFragmenter(new SimpleFragmenter(fragmentLength));

    TokenStream tokenStream = getAnalyzer().tokenStream(field, new UnsyncStringReader(s));

    try {//from   w  w  w.  j a v  a  2 s . c o m
        String snippet = highlighter.getBestFragments(tokenStream, s, maxNumFragments, fragmentSuffix);

        if (Validator.isNotNull(snippet) && !StringUtil.endsWith(snippet, fragmentSuffix)) {

            snippet = snippet.concat(fragmentSuffix);
        }

        return snippet;
    } catch (InvalidTokenOffsetsException itoe) {
        throw new IOException(itoe.getMessage());
    }
}

From source file:com.liferay.portal.search.lucene31.LuceneHelperImpl.java

License:Open Source License

public String getSnippet(Query query, String field, String s, int maxNumFragments, int fragmentLength,
        String fragmentSuffix, String preTag, String postTag) throws IOException {

    SimpleHTMLFormatter simpleHTMLFormatter = new SimpleHTMLFormatter(preTag, postTag);

    QueryScorer queryScorer = new QueryScorer(query, field);

    Highlighter highlighter = new Highlighter(simpleHTMLFormatter, queryScorer);

    highlighter.setTextFragmenter(new SimpleFragmenter(fragmentLength));

    TokenStream tokenStream = getAnalyzer().tokenStream(field, new UnsyncStringReader(s));

    try {/*from  w  w w.ja v a 2 s .  c  o m*/
        String snippet = highlighter.getBestFragments(tokenStream, s, maxNumFragments, fragmentSuffix);

        if (Validator.isNotNull(snippet) && !StringUtil.endsWith(snippet, fragmentSuffix)) {

            snippet = snippet + fragmentSuffix;
        }

        return snippet;
    } catch (InvalidTokenOffsetsException itoe) {
        throw new IOException(itoe.getMessage());
    }
}

From source file:com.main.Searcher.java

public List<Bean> searching(String s1, String s2, String radioBtn)
        throws IOException, ParseException, InvalidTokenOffsetsException {
    //getting reference of directory
    Directory dir = FSDirectory.open(Paths.get(Index_Dir));

    //Index reader - an interface for accessing a point-in-time view of a lucene index
    IndexReader reader = DirectoryReader.open(dir);

    IndexSearcher searcher = new IndexSearcher(reader);
    //analyzer with the default stop words, takes out the stop words
    Analyzer analyzer = new StandardAnalyzer();

    String contents = "contents";

    QueryParser parser = new QueryParser(contents, analyzer);

    int numOfDoc = reader.numDocs();

    for (int i = 0; i < numOfDoc; i++) {

        Document d = reader.document(i);

    }/*from w w  w  .  j ava2  s  . co m*/

    Query q1 = parser.parse(s1);
    Query q2 = parser.parse(s2);

    //conjuction, disjunction and negation
    BooleanQuery.Builder bq = new BooleanQuery.Builder();

    //occur.must : both queries required in a doc
    if (radioBtn.equals("conjunction")) {
        bq.add(q1, BooleanClause.Occur.MUST);
        bq.add(q2, BooleanClause.Occur.MUST);
        bq.build();
    } //occur.should: one of the q1 should be presen t in doc
    else if (radioBtn.equals("disjunction")) {
        bq.add(q1, BooleanClause.Occur.SHOULD);
        bq.add(q2, BooleanClause.Occur.SHOULD);
        bq.build();
    } //negation: first should present , second should not
    else {
        bq.add(q1, BooleanClause.Occur.MUST);
        bq.add(q2, BooleanClause.Occur.MUST_NOT);
        bq.build();
    }

    TopDocs hits = searcher.search(bq.build(), 10);

    Formatter formatter = new SimpleHTMLFormatter();

    QueryScorer scorer = new QueryScorer(bq.build());

    //used to markup highlighted terms found in the best sections of a cont
    Highlighter highlighter = new Highlighter(formatter, scorer);
    //It breaks cont up into same-size texts but does not split up spans
    Fragmenter fragmenter = new SimpleSpanFragmenter(scorer, 10);
    //breaks cont up into same-size fragments with no concerns over spotting sentence boundaries.

    //set fragmenter to highlighter
    highlighter.setTextFragmenter(fragmenter);

    for (int i = 0; i < hits.scoreDocs.length; i++) {
        Bean bean = new Bean();

        int outResult = hits.scoreDocs.length;
        bean.setNumFile(outResult);
        int docid = hits.scoreDocs[i].doc;
        double rank = hits.scoreDocs[i].score;
        bean.setRankSc(rank);
        Document doc = searcher.doc(docid);

        String name = doc.get("name");
        String title = doc.get("title");
        bean.setTitle(name);

        String path = doc.get("path");
        bean.setPath(path);

        String cont = doc.get("contents");
        //Create token stream
        TokenStream stream = TokenSources.getAnyTokenStream(reader, docid, "contents", analyzer);
        //Get highlighted cont fragments
        String[] frags = highlighter.getBestFragments(stream, cont, 10);

        ArrayList<String> dummy = new ArrayList<>();
        for (String frag : frags) {

            dummy.add(frag);
        }

        bean.setContent(dummy);
        beanList.add(bean);
    }

    dir.close();
    // }
    return beanList;
}

From source file:com.main.Searcher.java

public List<Bean> searching(String s1) throws IOException, ParseException, InvalidTokenOffsetsException {
    //Get directory reference
    Directory dir = FSDirectory.open(Paths.get(Index_Dir));
    //Index reader - an interface for accessing a point-in-time view of a lucene index
    IndexReader reader = DirectoryReader.open(dir);
    //CreateIndexReader reader = DirectoryReader.open(dir); lucene searcher. It search over a single IndexReader.
    IndexSearcher searcher = new IndexSearcher(reader);
    //analyzer with the default stop words
    Analyzer analyzer = new StandardAnalyzer();
    //Query parser to be used for creating TermQuery

    String queries = null;/*from  ww  w . j a  va2  s .  c  o m*/
    String queryString = null; //regular search
    String contents = "contents";
    BufferedReader in = null;
    if (queries != null) {
        in = Files.newBufferedReader(Paths.get(queries), StandardCharsets.UTF_8);
    } else {
        in = new BufferedReader(new InputStreamReader(System.in, StandardCharsets.UTF_8));
    }
    QueryParser parser = new QueryParser(contents, analyzer);

    int numOfDoc = reader.numDocs();

    for (int i = 0; i < numOfDoc; i++) {

        Document d = reader.document(i);

    }

    Query q1 = parser.parse(s1);

    BooleanQuery.Builder bq = new BooleanQuery.Builder();

    bq.add(q1, BooleanClause.Occur.MUST);
    //Search the lucene documents
    TopDocs hits = searcher.search(bq.build(), 10);
    // TopScoreDocCollector collector = TopScoreDocCollector.create(5);
    /**
     * Highlighter Code Start ***
     */
    //Uses HTML &lt;B&gt;&lt;/B&gt; tag to highlight the searched terms
    Formatter formatter = new SimpleHTMLFormatter();
    //It scores cont fragments by the number of unique q1 terms found
    //Basically the matching score in layman terms
    QueryScorer scorer = new QueryScorer(bq.build());
    //used to markup highlighted terms found in the best sections of a cont
    Highlighter highlighter = new Highlighter(formatter, scorer);
    //It breaks cont up into same-size texts but does not split up spans
    Fragmenter fragmenter = new SimpleSpanFragmenter(scorer, 10);
    //breaks cont up into same-size fragments with no concerns over spotting sentence boundaries.

    //set fragmenter to highlighter
    highlighter.setTextFragmenter(fragmenter);
    //Iterate over found results
    for (int i = 0; i < hits.scoreDocs.length; i++) {
        Bean bean = new Bean();
        //int rank = hits.scoreDocs.length;
        int outResult = hits.scoreDocs.length;
        bean.setNumFile(outResult);
        int docid = hits.scoreDocs[i].doc;
        double rank = hits.scoreDocs[i].score;
        bean.setRankSc(rank);
        Document doc = searcher.doc(docid);
        // String title = doc.get("title");
        String name = doc.get("name");
        String title = doc.get("title");
        bean.setTitle(name);

        String path = doc.get("path");
        bean.setPath(path);

        String cont = doc.get("contents");
        //Create token stream
        TokenStream stream = TokenSources.getAnyTokenStream(reader, docid, "contents", analyzer);
        //Get highlighted cont fragments
        String[] frags = highlighter.getBestFragments(stream, cont, 10);

        ArrayList<String> dummy = new ArrayList<>();
        for (String frag : frags) {

            dummy.add(frag);
        }

        bean.setContent(dummy);
        beanList.add(bean);
    }

    dir.close();
    // }
    return beanList;
}

From source file:com.mathworks.xzheng.tools.HighlightIt.java

License:Apache License

public static void main(String[] args) throws Exception {

    if (args.length != 1) {
        System.err.println("Usage: HighlightIt <filename-out>");
        System.exit(-1);//from  w  w  w  . ja v  a 2  s  .  co m
    }

    String filename = args[0];

    String searchText = "term"; // #1
    QueryParser parser = new QueryParser(Version.LUCENE_46, // #1
            "f", // #1
            new StandardAnalyzer(Version.LUCENE_46));// #1
    Query query = parser.parse(searchText); // #1

    SimpleHTMLFormatter formatter = // #2
            new SimpleHTMLFormatter("<span class=\"highlight\">", // #2
                    "</span>"); // #2

    TokenStream tokens = new StandardAnalyzer(Version.LUCENE_46) // #3
            .tokenStream("f", new StringReader(text)); // #3

    QueryScorer scorer = new QueryScorer(query, "f"); // #4

    Highlighter highlighter = new Highlighter(formatter, scorer); // #5
    highlighter.setTextFragmenter( // #6
            new SimpleSpanFragmenter(scorer)); // #6

    String result = // #7
            highlighter.getBestFragments(tokens, text, 3, "..."); // #7

    FileWriter writer = new FileWriter(filename); // #8
    writer.write("<html>"); // #8
    writer.write("<style>\n" + // #8
            ".highlight {\n" + // #8
            " background: yellow;\n" + // #8
            "}\n" + // #8
            "</style>"); // #8
    writer.write("<body>"); // #8
    writer.write(result); // #8
    writer.write("</body></html>"); // #8
    writer.close(); // #8
}

From source file:com.oneis.app.SearchResultExcerptHighlighter.java

License:Mozilla Public License

static public String[] bestHighlightedExcerpts(String escapedText, String searchTerms, int maxExcerptLength) {
    try {/*from  w w  w .  jav a2 s.c  om*/
        // Scorer selects the terms which need highlighting. Created from a 'query' based on the extracted search terms.
        Scorer scorer;
        Fragmenter fragmenter;
        if (searchTerms != null && searchTerms.length() > 0) {
            QueryParser queryParser = new QueryParser("FIELD", new StandardAnalyzer());
            Query query = queryParser.parse(searchTerms);
            scorer = new QueryScorer(query);
            fragmenter = new SimpleSpanFragmenter((QueryScorer) scorer, maxExcerptLength);
        } else {
            scorer = new NoHighlightingScorer();
            fragmenter = new SimpleFragmenter(maxExcerptLength);
        }

        // Parse the escaped text into tokens, which retain the positions in the text
        StandardAnalyzer analyser = new StandardAnalyzer();
        TokenStream tokenStream = analyser.tokenStream("FIELD", new StringReader(escapedText));

        // Finally, do the highlighting!
        Highlighter highlighter = new Highlighter(new SimpleHTMLFormatter("<b>", "</b>"), scorer);
        highlighter.setTextFragmenter(fragmenter);
        return highlighter.getBestFragments(tokenStream, escapedText, NUMBER_OF_FRAGMENTS);
    } catch (Exception e) {
        Logger.getLogger("com.oneis.app").info("Exception in SearchResultExcerptHighlighter: ", e);
        return null;
    }
}

From source file:com.paladin.common.Tools.java

License:Apache License

/**
 *   ? /*  ww  w  .  j a va  2 s . c o m*/
 *
 * @param _query
 * @param _field
 * @param _content
 * @return
 */
public static String highlight(final Query _query, final String _field, final String _content) {
    // 
    Scorer scorer = new QueryScorer(_query);
    SimpleHTMLFormatter formatter = new SimpleHTMLFormatter(Constants.HIGHLIGHT_STYLE, "</span>");
    Highlighter hl = new Highlighter(formatter, scorer);
    TokenStream tokens = new IKAnalyzer().tokenStream(_field, new StringReader(_content));
    try {
        return hl.getBestFragment(tokens, _content);
    } catch (IOException e) {
        e.printStackTrace();
    } catch (InvalidTokenOffsetsException e) {
        e.printStackTrace();
    }
    return null;
}

From source file:com.rapidminer.search.GlobalSearchHandler.java

License:Open Source License

/**
 * Creates the search result for search methods.
 *
 * @param searchTerm//from  ww  w .j a  va 2s.c  o m
 *       the search string
 * @param searcher
 *       the index searcher instance which was used to search
 * @param result
 *       the result of the search
 * @param highlightResult
 *       if {@code true}, the {@link GlobalSearchResult#getBestFragments()} will be created
 * @return the search result instance, never {@code null}
 * @throws IOException
 *       if something goes wrong
 */
private GlobalSearchResult createSearchResult(final String searchTerm, final Query parsedQuery,
        final IndexSearcher searcher, final TopDocs result, final boolean highlightResult) throws IOException {
    int resultNumber = result.scoreDocs.length;
    List<Document> resultList = new ArrayList<>(resultNumber);
    List<String[]> highlights = highlightResult ? new LinkedList<>() : null;
    ScoreDoc lastResult = resultNumber > 0 ? result.scoreDocs[result.scoreDocs.length - 1] : null;
    for (ScoreDoc scoreDoc : result.scoreDocs) {
        Document doc = searcher.doc(scoreDoc.doc);
        resultList.add(doc);

        if (highlightResult) {
            // search result highlighting best match on name field
            QueryScorer scorer = new QueryScorer(parsedQuery);
            Highlighter highlighter = new Highlighter(HIGHLIGHT_FORMATTER, scorer);
            Fragmenter fragmenter = new SimpleSpanFragmenter(scorer, FRAGMENT_SIZE);
            highlighter.setTextFragmenter(fragmenter);
            try {
                TokenStream stream = TokenSources.getTokenStream(GlobalSearchUtilities.FIELD_NAME,
                        searcher.getIndexReader().getTermVectors(scoreDoc.doc),
                        doc.get(GlobalSearchUtilities.FIELD_NAME), GlobalSearchUtilities.ANALYZER,
                        Highlighter.DEFAULT_MAX_CHARS_TO_ANALYZE - 1);
                if (stream != null) {
                    highlights.add(highlighter.getBestFragments(stream,
                            doc.get(GlobalSearchUtilities.FIELD_NAME), MAX_NUMBER_OF_FRAGMENTS));
                } else {
                    highlights.add(null);
                }
            } catch (InvalidTokenOffsetsException e) {
                highlights.add(null);
            }
        }
    }
    return new GlobalSearchResult(resultList, searchTerm, lastResult, result.totalHits, highlights);
}

From source file:com.recomdata.search.Finder.java

License:Open Source License

private void display(Document doc, int id, float score, Query query, Analyzer analyzer) {

    System.out.println("repository = " + doc.get("repository"));
    System.out.println("path       = " + doc.get("path"));
    System.out.println("extension  = " + doc.get("extension"));
    System.out.println("title      = " + doc.get("title"));

    Highlighter highlighter = new Highlighter(new SimpleHTMLFormatter("<b>", "</b>"),
            new QueryScorer(query, "contents"));
    highlighter.setTextFragmenter(new SimpleFragmenter(50));
    String summary = doc.get("contents");
    TokenStream tokenStream = analyzer.tokenStream("contents", new StringReader(summary));
    try {//  w ww. java2s .  c  o m
        System.out.println("contents   = " + highlighter.getBestFragments(tokenStream, summary, 5, "..."));
    } catch (IOException e) {
        System.out.println("exception: " + e.getMessage());
    }

    System.out.println();
}

From source file:com.searchlocal.lucene.ContentSearcher.java

License:Open Source License

/** 
 * ? /*from   w ww  .  jav a2  s  .c  om*/
 * 
 * @param param ?
 * @param indexlocal ?
 * @return list 
 */
public static List<ResultBean> query(SearchParam param) throws IOException, LogicException {
    // ?
    String indexPath = param.getIndexPath();
    // 
    if (null == fsd) {
        fsd = SimpleFSDirectory.open(new File(indexPath));
    }
    List<ResultBean> beanList = new ArrayList<ResultBean>();
    try {
        // ?KeyWord
        Analyzer analyzer = new PaodingAnalyzer();
        QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, "content", analyzer);
        Query query;
        query = parser.parse(param.getKeyWord());

        TopScoreDocCollector collector = TopScoreDocCollector.create(100, true);
        // ?
        if (null == is) {
            is = new IndexSearcher(fsd, true);
        }
        is.search(query, collector);

        ScoreDoc[] scoreDoc = collector.topDocs().scoreDocs;

        SimpleHTMLFormatter simpleHTMLFormatter = new SimpleHTMLFormatter("<font color=\"red\">", "</font>");

        Highlighter highlighter = new Highlighter(simpleHTMLFormatter, new QueryScorer(query));
        highlighter.setTextFragmenter(new SimpleFragmenter(CONTENTS_SHOW_LENGTH));

        if (scoreDoc.length == 0) {
            return beanList;
        }
        int startRow = param.getStartRow();
        int endRow = param.getEndRow();
        endRow = scoreDoc.length > endRow ? endRow : scoreDoc.length;
        for (int i = startRow; i < endRow; i++) {
            Document doc = is.doc(scoreDoc[i].doc);
            String content = doc.get("content");
            // 
            TokenStream tokenStream = analyzer.tokenStream("content", new StringReader(content));
            content = highlighter.getBestFragment(tokenStream, content);
            ResultBean bean = BeanUtil.getBean(doc, content);
            beanList.add(bean);
        }
    } catch (IOException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    } catch (InvalidTokenOffsetsException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    } catch (ParseException e) {
        e.printStackTrace();
    }
    return beanList;
}