Example usage for org.apache.lucene.search.highlight Highlighter Highlighter

List of usage examples for org.apache.lucene.search.highlight Highlighter Highlighter

Introduction

In this page you can find the example usage for org.apache.lucene.search.highlight Highlighter Highlighter.

Prototype

public Highlighter(Formatter formatter, Scorer fragmentScorer) 

Source Link

Usage

From source file:org.shredzone.cilla.service.search.strategy.LuceneSearchStrategy.java

License:Open Source License

/**
 * Creates a list of highlights for a search result.
 *
 * @param pq// ww w  .j a v  a 2s. c om
 *            {@link Query} that was used
 * @param result
 *            List of {@link Page} results
 * @return matching list of text extracts with highlights
 */
private List<String> createHighlights(Query pq, List<Page> result) {
    QueryScorer scorer = new QueryScorer(pq, "text");
    Fragmenter fragmenter = searchResultRenderer.createFragmenter(scorer);
    Formatter formatter = searchResultRenderer.createFormatter();

    Highlighter hilighter = new Highlighter(formatter, scorer);
    hilighter.setTextFragmenter(fragmenter);

    PageBridge bridge = new PageBridge();

    return result.stream().parallel().map(bridge::objectToString).map(plain -> highlight(plain, hilighter))
            .collect(Collectors.toList());
}

From source file:org.zilverline.service.SearchServiceImpl.java

License:Open Source License

/**
 * Searches the given Collections for the query. Sending an empty or null array of names will betreated as if all collections
 * need to be searched, which is handy for external queries, that can't know the names of collections.
 * /*from   w w  w  .j  a v  a2s . com*/
 * @param names array of collection names
 * @param queryString the query
 * @param startAt the first result to return (start counting from 0)
 * @param numberOfResults the (maximum) number of results to return
 * @return Object containing the results, and number of hits and the possibly changed startAt and endAt
 * 
 * @throws SearchException if query can't be executed
 */
public SearchResult doSearch(final String[] names, final String queryString, int startAt,
        final int numberOfResults) throws SearchException {
    try {
        MultiSearcher ms = null;
        try {
            // for given collections create a List of IndexSearchers
            IndexSearcher[] allSearchers = createSearchersForCollectionsByName(names);

            // prepare the query
            // TODO: which analyzer to use? Different collections may have different Analyers
            Analyzer analyzer = getCollectionManager().createAnalyzer();
            log.debug("Using Analyzer " + analyzer.getClass());

            // for each occurence of contents, add a boost for fields specified in BoostFactor
            BoostingParser zp = new BoostingParser("contents", analyzer);
            zp.setFactors(factors);

            Query localquery = zp.parse(queryString);
            log.debug("the Query: " + query);

            // and search in all collections
            ms = new MultiSearcher(allSearchers);
            Hits hits = ms.search(localquery);

            log.debug("Query " + queryString + ", with " + hits.length() + " hits");
            // if we're changing the query, and we've paged too far
            if (startAt >= hits.length() || startAt < 0) {
                startAt = 0;
            }
            // only get the number of results we're interested in that is from startAt to endAt, or last
            int endAt = startAt + numberOfResults;
            // set the max index to maxpage or last
            if (endAt > hits.length()) {
                endAt = hits.length();
            }
            log.debug("Returning hits " + startAt + " to " + (endAt - 1));

            // get all the hits into results array
            Result[] results = new Result[endAt - startAt];

            Highlighter highlighter = new Highlighter(
                    new SimpleHTMLFormatter("<span class=\"highlight\">", "</span>"),
                    new QueryScorer(localquery));

            // get all the hits into Result
            for (int j = startAt; j < endAt; j++) {
                Document doc = hits.doc(j);
                float score = hits.score(j);
                results[j - startAt] = doc2ResultHelper(doc, score, highlighter, analyzer);
            }
            log.info("The query '" + queryString + "', has " + hits.length() + " hits, returning " + maxResults
                    + " results: " + (startAt + 1) + " to " + endAt);

            return new SearchResult(results, hits.length(), startAt, endAt);
        } catch (org.apache.lucene.queryParser.ParseException pe) {
            throw new SearchException("Error executing query '" + queryString + "'", pe);
        } catch (TooManyClauses e) {
            throw new SearchException("Error executing query '" + queryString
                    + ". Too complex, possibly spanning more than 1024 days'", e);
        } finally {
            if (ms != null) {
                ms.close();
            }
        }
    } catch (Exception e) {
        log.error("Error executing query '" + queryString + "', " + e);
        throw new SearchException("Error executing query '" + queryString + "'", e);
    }
}

From source file:perf.SearchTask.java

License:Apache License

@Override
public void go(IndexState state) throws IOException {
    //System.out.println("go group=" + this.group + " single=" + singlePassGroup + " xxx=" + xxx + " this=" + this);
    final IndexSearcher searcher = state.mgr.acquire();

    //System.out.println("GO query=" + q);

    try {/*  w w  w .  j a  v  a  2 s.  c  o m*/
        if (doHilite) {
            if (state.fastHighlighter != null) {
                fieldQuery = state.fastHighlighter.getFieldQuery(q, searcher.getIndexReader());
            } else if (state.useHighlighter) {
                highlighter = new Highlighter(new SimpleHTMLFormatter(), new QueryScorer(q));
            } else {
                // no setup for postingshighlighter
            }
        }

        if (group != null) {
            if (singlePassGroup) {
                final BlockGroupingCollector c = new BlockGroupingCollector(Sort.RELEVANCE, 10, true,
                        searcher.createNormalizedWeight(state.groupEndQuery, false));
                searcher.search(q, c);
                groupsResultBlock = c.getTopGroups(Sort.RELEVANCE, 0, 0, 10, true);

                if (doHilite) {
                    hilite(groupsResultBlock, state, searcher);
                }

            } else {
                //System.out.println("GB: " + group);
                final TermFirstPassGroupingCollector c1 = new TermFirstPassGroupingCollector(group,
                        Sort.RELEVANCE, 10);

                final Collector c;
                final TermAllGroupsCollector allGroupsCollector;
                // Turn off AllGroupsCollector for now -- it's very slow:
                if (false && doCountGroups) {
                    allGroupsCollector = new TermAllGroupsCollector(group);
                    //c = MultiCollector.wrap(allGroupsCollector, c1);
                    c = c1;
                } else {
                    allGroupsCollector = null;
                    c = c1;
                }

                searcher.search(q, c);

                final Collection<SearchGroup<BytesRef>> topGroups = c1.getTopGroups(0, true);
                if (topGroups != null) {
                    final TermSecondPassGroupingCollector c2 = new TermSecondPassGroupingCollector(group,
                            topGroups, Sort.RELEVANCE, Sort.RELEVANCE, 10, true, true, true);
                    searcher.search(q, c2);
                    groupsResultTerms = c2.getTopGroups(0);
                    if (allGroupsCollector != null) {
                        groupsResultTerms = new TopGroups<BytesRef>(groupsResultTerms,
                                allGroupsCollector.getGroupCount());
                    }
                    if (doHilite) {
                        hilite(groupsResultTerms, state, searcher);
                    }
                }
            }
        } else if (!facetRequests.isEmpty()) {
            // TODO: support sort, filter too!!
            // TODO: support other facet methods
            if (doDrillSideways) {
                // nocommit todo
                hits = null;
                facetResults = null;
            } else {
                facetResults = new ArrayList<FacetResult>();
                FacetsCollector fc = new FacetsCollector();
                hits = FacetsCollector.search(searcher, q, 10, fc);
                long t0 = System.nanoTime();

                Facets mainFacets = null;
                for (String request : facetRequests) {
                    if (request.startsWith("range:")) {
                        int i = request.indexOf(':', 6);
                        if (i == -1) {
                            throw new IllegalArgumentException("range facets request \"" + request
                                    + "\" is missing field; should be range:field:0-10,10-20");
                        }
                        String field = request.substring(6, i);
                        String[] rangeStrings = request.substring(i + 1, request.length()).split(",");
                        LongRange[] ranges = new LongRange[rangeStrings.length];
                        for (int rangeIDX = 0; rangeIDX < ranges.length; rangeIDX++) {
                            String rangeString = rangeStrings[rangeIDX];
                            int j = rangeString.indexOf('-');
                            if (j == -1) {
                                throw new IllegalArgumentException(
                                        "range facets request should be X-Y; got: " + rangeString);
                            }
                            long start = Long.parseLong(rangeString.substring(0, j));
                            long end = Long.parseLong(rangeString.substring(j + 1));
                            ranges[rangeIDX] = new LongRange(rangeString, start, true, end, true);
                        }
                        LongRangeFacetCounts facets = new LongRangeFacetCounts(field, fc, ranges);
                        facetResults.add(facets.getTopChildren(ranges.length, field));
                    } else {
                        Facets facets = new FastTaxonomyFacetCounts(state.taxoReader, state.facetsConfig, fc);
                        facetResults.add(facets.getTopChildren(10, request));
                    }
                }
                getFacetResultsMsec = (System.nanoTime() - t0) / 1000000.0;
            }
        } else if (s == null) {
            hits = searcher.search(q, topN);
            if (doHilite) {
                hilite(hits, state, searcher, q);
            }
        } else {
            hits = searcher.search(q, topN, s);
            if (doHilite) {
                hilite(hits, state, searcher, q);
            }
            /*
              final boolean fillFields = true;
              final boolean fieldSortDoTrackScores = true;
              final boolean fieldSortDoMaxScore = true;
              final TopFieldCollector c = TopFieldCollector.create(s, topN,
              fillFields,
              fieldSortDoTrackScores,
              fieldSortDoMaxScore,
              false);
              searcher.search(q, c);
              hits = c.topDocs();
            */
        }
        if (hits != null) {
            totalHitCount = hits.totalHits;

            if (doStoredLoads) {
                for (int i = 0; i < hits.scoreDocs.length; i++) {
                    ScoreDoc scoreDoc = hits.scoreDocs[i];
                    searcher.doc(scoreDoc.doc);
                }
            }

        } else if (groupsResultBlock != null) {
            totalHitCount = groupsResultBlock.totalHitCount;
        }
    } catch (Throwable t) {
        System.out.println("EXC: " + q);
        throw new RuntimeException(t);
        //System.out.println("TE: " + TermsEnum.getStats());
    } finally {
        state.mgr.release(searcher);
        fieldQuery = null;
        highlighter = null;
    }
}

From source file:Search.SearchExecutor.java

License:Educational Community License

/**
 * Search user generated transcriptions. Results contain embedded images. Results are restricted to those the user usrID has
 * permissing to view the transcription/* ww w  . j av a  2  s.co m*/
 * @param searchWord
 * @param language
 * @param order
 * @param paged
 * @param pageNumber
 * @param usrID
 * @return
 * @throws Exception
 */
public Stack<Transcription> transcriptionSearch(String searchWord, String language, int order, Boolean paged,
        int pageNumber, String usrID) throws Exception {
    Boolean wildcard = true;
    final int pageSize = 20; //Number of results per page, could be made a parm one day
    final int maxResults = 1000; //No matter what dont return more than this many results from Lucene. This is ok because result filtering occurs before this limitation is applied
    String returnStringArray = "";
    //we dont currently worry about language filtering, but ENAP did, so we could do it if we wanted to
    if (language != null && language.length() > 1) {
        //searchWord=searchWord+" AND lang:"+language;
    }
    /**@TODO the location should be a param*/
    IndexSearcher is = new IndexSearcher("/usr/indexTranscriptions");
    QueryParser parser = new QueryParser("text", analyser);
    Sort newsort;
    Query query = parser.parse(searchWord);
    is.rewrite(parser.parse(searchWord));
    QueryScorer queryScorer = new QueryScorer(query);
    ScoreDoc[] hits;
    //If the person wasnt logged in, give them only public comments. comment owner
    if (usrID.compareTo("") == 0) {
        usrID = "0";
    }
    Query secQuery = parser.parse("security:private OR creator:" + usrID);
    //This will filter search results so only comments owned by the user and public comments will be returned
    QueryFilter secFilter = new QueryFilter(query);
    //If a sort was specified, use it, otherwise use the default sorting which is by hit quality
    if (order > 0) {
        //order=1 means sort by line number, first line of the text is first.
        //order=2 means inverse sort by line number, last line of the text is first.
        //Java, the Nanny language, doesn't want to let me use newsort even if I ensure its not a null pointer
        //So if were going to use a filter, set the filter to type 1, then check to see if it should be something else.

        try {
            hits = is.search(query, secFilter, maxResults).scoreDocs;
        } catch (org.apache.lucene.search.BooleanQuery.TooManyClauses e) {
            return null;
        }
    } else {
        try {
            newsort = new Sort("creator", false);
            hits = is.search(query, secFilter, maxResults, newsort).scoreDocs;

        } catch (org.apache.lucene.search.BooleanQuery.TooManyClauses e) {
            return null;
        }
    }
    //Start at the hit that belongs at the top of the page they requested. For page 2, that is 19
    //Ensure we do not print more than pageNumber hits, or go beyond the end of the hit list
    String link = "";
    int ctr = 1;
    Stack<Transcription> results = new Stack();
    SimpleHTMLFormatter formatter = new SimpleHTMLFormatter("<span class=\"highlight\">", "</span>");
    Highlighter highlighter = new Highlighter(formatter, queryScorer);

    if (pageSize * (pageNumber - 1) < hits.length) {
        returnStringArray += "Your search for \"<b>" + searchWord + "</b>\" returned " + hits.length
                + " results.<br/>";
        for (int i = pageSize * (pageNumber - 1); i < hits.length
                && i - (pageSize * (pageNumber - 1)) < pageSize; i++) {

            Document hitDoc = is.doc(hits[i].doc);

            field = hitDoc.getField("line");
            Transcription t = new Transcription(Integer.parseInt(hitDoc.getField("id").stringValue()));
            results.add(t);
            String paragraph = field.stringValue();
            String pageno = "";
            String creator = hitDoc.getField("creator").stringValue();
            user.User u = new User(Integer.parseInt(creator));
            creator = "" + u.getLname() + " " + u.getFname();
            if (isInteger(paragraph)) {

                field = hitDoc.getField("page");
                pageno = field.stringValue();
                if (pageno == null) {
                    pageno = "hi null";
                }
                if (paragraph == null) {
                    paragraph = "hola null";
                }
            } else {
                String folio = "";
                Folio f = new Folio(Integer.parseInt(folio));
                link = "&nbsp;&nbsp;&nbsp;<a href=transcriptionImageTest.jsp?p=" + folio + ">"
                        + field.stringValue() + "(Archive:" + f.getArchive() + " Shelfmark:"
                        + f.getCollectionName() + " page:" + folio + ")</a>";
            }
            returnStringArray = returnStringArray + (ctr + ". " + link + "<br/>");
            ctr++;
        }
    } else /*we dont have any results for the page/search they gave us*/ {
        returnStringArray = "No results to display.";
    }
    totalHits = hits.length;
    totalPages = hits.length / pageSize;
    if (hits.length % pageSize > 0) {
        totalPages++;
    }
    if (!wildcard) {
        return results;
    } else {
        return results;
    }
}

From source file:searchEngine.SearchFiles.java

License:Apache License

public static QueryResult doPagingSearch(BufferedReader in, IndexSearcher searcher, Query query,
        int hitsPerPage, int pageNumber, boolean raw, boolean interactive)
        throws IOException, InvalidTokenOffsetsException {

    QueryResult queryResults = new QueryResult();
    TopDocs results = searcher.search(query, pageNumber * hitsPerPage);
    ScoreDoc[] hits = results.scoreDocs;

    int numTotalHits = results.totalHits;
    System.out.println(numTotalHits + " total matching documents");

    Highlighter highlighter = new Highlighter(new SimpleHTMLFormatter("<font color=\"red\">", "</font>"),
            new QueryScorer(query));
    highlighter.setTextFragmenter(new SimpleFragmenter(20));

    int start = 0;
    int end = Math.min(numTotalHits, pageNumber * hitsPerPage);
    for (int i = start; i < end; i++) {
        if (raw) {
            System.out.println("doc=" + hits[i].doc + " score=" + hits[i].score);
            continue;
        }/* w  ww .ja  va  2 s  .  c  om*/

        Document doc = searcher.doc(hits[i].doc);
        Analyzer analyzer = new SmartChineseAnalyzer();
        String text = doc.get("contents");
        TokenStream tokenStream = analyzer.tokenStream("contents", new StringReader(text));
        String highlighterResult = highlighter.getBestFragments(tokenStream, text, 2, "");
        System.out.println("########### " + highlighterResult);

        String path = doc.get("path");
        if (path != null) {
            System.out.println((i + 1) + ". " + path + " Score: " + hits[i].score);
            //System.out.println(results[i].)
            queryResults.addUrl(path);
            String title = doc.get("title");
            queryResults.addTitle(title);
            queryResults.addContent(highlighterResult);
        } else {
            System.out.println((i + 1) + ". " + "No path for this document");
        }
    }
    return queryResults;
}

From source file:searcher.CollStat.java

String getSnippet(Query q, Document doc, int docid) throws Exception {
    StringBuffer buff = new StringBuffer();
    SimpleHTMLFormatter htmlFormatter = new SimpleHTMLFormatter();
    Highlighter highlighter = new Highlighter(htmlFormatter, new QueryScorer(q));
    System.out.println("enterEd");
    // Get the decompressed html
    String html = IndexHtmlToText.decompress(doc.getBinaryValue(WTDocument.WTDOC_FIELD_HTML).bytes);

    // Generate snippet...
    InputStream input = new ByteArrayInputStream(html.getBytes(StandardCharsets.UTF_8));
    ContentHandler handler = new BodyContentHandler(-1);
    Metadata metadata = new Metadata();
    new HtmlParser().parse(input, handler, metadata, new ParseContext());
    String text = handler.toString();

    TokenStream tokenStream = analyzer.tokenStream("dummy", new StringReader(text));
    TextFragment[] frag = highlighter.getBestTextFragments(tokenStream, text, false, 5);
    for (int j = 0; j < frag.length; j++) {
        if ((frag[j] != null) && (frag[j].getScore() > 0)) {
            buff.append((frag[j].toString()));
        }//from w w w  .  java  2s .co  m
    }
    String snippet = buff.toString();
    String modifiedText = snippet;

    String pattern = "<(\\s*)[a-zA-Z0-9]+[^>]+$";
    Pattern r = Pattern.compile(pattern);
    Matcher m = r.matcher(snippet);
    if (m.find()) {
        modifiedText = m.replaceAll("");
    }
    snippet = modifiedText;
    return snippet;
    //byte[] encodedBytes = Base64.encodeBase64(snippet.getBytes());
    //return new String(encodedBytes);
}

From source file:servlet.Checkcopy.java

/**
 * Processes requests for both HTTP <code>GET</code> and <code>POST</code>
 * methods.//from w w w  .  jav  a2 s  .co  m
 *
 * @param request servlet request
 * @param response servlet response
 * @throws ServletException if a servlet-specific error occurs
 * @throws IOException if an I/O error occurs
 */
protected void processRequest(HttpServletRequest request, HttpServletResponse response)
        throws ServletException, IOException {
    HttpSession ss = request.getSession();
    Assignment a = (Assignment) ss.getAttribute("curAm");
    int safv_id = Integer.parseInt(request.getParameter("safv_id"));
    String studentAmPath = getServletContext().getRealPath("/") + "/file/student_assignment_file/";
    if (a.getAss_type().equalsIgnoreCase("file")) {
        StAssignmentFile sa = (StAssignmentFile) ss.getAttribute("sa");
        StAmFileList f = StAmFileList.getSafvByListIdSafv(safv_id, sa.getList_id());
        String filename = f.getPath_file();
        String fileExtension = filename.substring(filename.lastIndexOf(".") + 1);
        String keyword = "";
        if (fileExtension.equalsIgnoreCase("docx")) {
            keyword = DocumentFunction.readDocxFile(studentAmPath + filename);
        } else if (fileExtension.equalsIgnoreCase("doc")) {
            keyword = DocumentFunction.readDocFile(studentAmPath + filename);
        } else if (fileExtension.equalsIgnoreCase("xls")) {
            keyword = DocumentFunction.readXlsFile(studentAmPath + filename);
        } else if (fileExtension.equalsIgnoreCase("xlsx")) {
            keyword = DocumentFunction.readXlsxFile(studentAmPath + filename);
        } else if (fileExtension.equalsIgnoreCase("pdf")) {
            keyword = DocumentFunction.readPdfFile(studentAmPath + filename);
        }

        if (!keyword.equals("")) {
            System.out.println("----------------------search...");
            Directory directory = null;
            IndexReader indexReader;
            ArrayList<String[]> indexsetList = null;
            try {
                directory = FSDirectory.open(
                        new File(studentAmPath + "//" + a.getCourse().getCourse_id() + "//" + sa.getAm_id()));
                indexReader = DirectoryReader.open(directory);
                IndexSearcher searcher = new IndexSearcher(indexReader);
                BooleanQuery.setMaxClauseCount(20000);
                QueryParser parser = new QueryParser(Version.LUCENE_47, "student_assignment",
                        new ThaiAnalyzer(Version.LUCENE_47));
                Query query = parser.parse(QueryParser.escape(keyword));

                int hitsPerPage = 10;
                Sort sort = new Sort(new SortField[] { SortField.FIELD_SCORE,
                        new SortField("student_assignment", SortField.Type.STRING) });
                TopFieldCollector topField = TopFieldCollector.create(sort, hitsPerPage, true, true, true,
                        false);
                searcher.search(query, topField);
                TopDocs docs = topField.topDocs();
                SimpleHTMLFormatter htmlFormatter = new SimpleHTMLFormatter("<font color=red>", "</font>");
                Highlighter highlighter = new Highlighter(htmlFormatter, new QueryScorer(query));

                indexsetList = new ArrayList<>();
                for (int i = 0; i < docs.totalHits; i++) {
                    String[] indexset = new String[5];
                    int id = docs.scoreDocs[i].doc;
                    float score = docs.scoreDocs[i].score;
                    Document doc = searcher.doc(id);
                    String text = doc.get("student_assignment");
                    String st_am_id = doc.get("st_am_id");
                    String owner_safv_id = doc.get("safv_id");
                    //                    System.out.println(text);
                    //                    System.out.println(st_am_id);
                    //                    System.out.println(owner_safv_id);
                    //                    System.out.println("-----------");
                    TokenStream tokenStream = TokenSources.getAnyTokenStream(searcher.getIndexReader(), id,
                            "student_assignment", new ThaiAnalyzer(Version.LUCENE_47));

                    String[] hltextArr = highlighter.getBestFragments(tokenStream, text, hitsPerPage);
                    String hltext = "";
                    for (String string : hltextArr) {
                        hltext += string.toString() + "<br/>";
                    }
                    indexset[0] = st_am_id;
                    indexset[1] = hltext;
                    //getting owner of
                    StAmFileList file = StAmFileList.getSafvBySafv(Integer.parseInt(owner_safv_id));
                    if (file != null) {
                        System.out.println((a.getAm_id() + " /" + file.getList_id()));
                        StAssignmentFile stam = StAssignmentFile.getStAmBbyAmIDAndList(a.getAm_id(),
                                file.getList_id());
                        String html = "";
                        //add ???
                        boolean add = true;
                        if (stam.getG_id() == 0) {
                            //if no group that mean it's a individual work
                            if (sa.getAcc_id() != stam.getAcc_id()) {
                                Account owneracc = Account.getNameByID(stam.getAcc_id());
                                html = "<img style=\"width:30px\" src=\"" + owneracc.getProfile_pic()
                                        + "\" data-toggle=\"tooltip\" data-placement=\"top\" title=\"\" class=\"img-circle\" data-original-title=\""
                                        + owneracc.getFirstname() + "\">";
                            } else {
                                add = false;
                            }
                        } else {
                            if (sa.getG_id() != stam.getG_id()) {
                                List<Account> ownerlist = Account.getNameByGIDandAmID(stam.getG_id(),
                                        stam.getAm_id());
                                html = "<a class=\"showGroup\" data-toggle=\"popover\" data-html=\"true\" data-content=\""
                                        + Util.createPopoverGroup(ownerlist) + "\">Group no. "
                                        + Group_member.getGNOById(stam.getG_id()) + "</a>";
                            } else {
                                add = false;
                            }
                        }
                        indexset[2] = html;
                        indexset[3] = score + "";
                        indexset[4] = owner_safv_id;
                        if (add) {
                            indexsetList.add(indexset);
                        }
                    }
                }
            } catch (IOException ex) {
                Logger.getLogger(TestDriver.class.getName()).log(Level.SEVERE, null, ex);
            } catch (ParseException ex) {
                Logger.getLogger(TestDriver.class.getName()).log(Level.SEVERE, null, ex);
            } catch (InvalidTokenOffsetsException ex) {
                Logger.getLogger(TestDriver.class.getName()).log(Level.SEVERE, null, ex);
            }
            //            for (String[] strings : indexsetList) {
            //                System.out.println(strings[0] + " : "+ strings[2] +" : " + strings[1] );
            //            }
            request.setAttribute("nowUUid", f.getUuid());
            request.setAttribute("keyword", keyword);
            request.setAttribute("indexsetList", indexsetList);
        } else {
            request.setAttribute("error_msg", "This assignment cannot use for check copy.");
        }
        //            System.out.println(keyword);

        getServletContext().getRequestDispatcher("/Checkcopy.jsp?tab=AllAssignment").forward(request, response);
    }
}

From source file:suneido.runtime.builtin.Lucene.java

License:Open Source License

@Params("dir, query, limit, block")
public static Object Search(Object self, Object a, Object b, Object c, Object d) {
    Path path = getPath(a);// w w w . ja v a 2 s.  c om
    if (!path.toFile().exists())
        throw dirNotFound("Search", path);
    String queryStr = Ops.toStr(b);
    int limit = Ops.toInt(c);
    try (FSDirectory dir = FSDirectory.open(getPath(a)); IndexReader ir = DirectoryReader.open(dir)) {
        IndexSearcher searcher = new IndexSearcher(ir);
        Analyzer analyzer = analyzer();
        QueryParser parser = new QueryParser("content", analyzer);
        Query query = parser.parse(queryStr);
        TopDocs results = searcher.search(query, limit);
        SimpleHTMLFormatter htmlFormatter = new SimpleHTMLFormatter();
        Highlighter highlighter = new Highlighter(htmlFormatter, new QueryScorer(query));
        ScoreDoc[] hits = results.scoreDocs;
        for (ScoreDoc hit : hits) {
            int id = hit.doc;
            Document doc = ir.document(id);
            String key = doc.get("key");
            String content = doc.get("content");
            TokenStream tokenStream = analyzer.tokenStream("content", content);
            TextFragment[] frag = highlighter.getBestTextFragments(tokenStream, content, false, 4);
            SuObject fragments = new SuObject();

            for (int j = 0; j < frag.length; j++) {
                if ((frag[j] != null) && (frag[j].getScore() > 0)) {
                    fragments.add(frag[j].toString());
                }
            }

            Ops.call2(d, key, fragments);
        }
        return null;
    } catch (Exception e) {
        throw new SuException("Lucene.Search: failed", e);
    }
}

From source file:top.sj.lucene.LuceneSearchUtil.java

License:Open Source License

/**
 * ???//w ww .  j  a  v  a 2  s  .com
 * 
 * @param primaryKeyByHibernateEntity
 *            Hibernate??
 * @param analysisTarget
 *            ?????
 * @param analysisCondition
 *            ????
 * @return ????
 * @throws IOException
 * @throws ParseException
 * @throws InvalidTokenOffsetsException
 */
public static List<LuceneSearchDTO> searchOfSingleAreaAndSingleCondition(String primaryKeyByHibernateEntity,
        String analysisTarget, String analysisCondition)
        throws IOException, ParseException, InvalidTokenOffsetsException {

    String configPath = PropertiesTool.getPropertiesFileAsObject("lucene_config.properties")
            .getProperty("index_location");
    Directory dir = null;
    try {
        dir = FSDirectory.open(new File(configPath));
    } catch (Exception e) {
        e.printStackTrace();
    }

    // Directory dir = FSDirectory.open(new File("D:\\lucene"));

    IndexSearcher searcher = new IndexSearcher(dir);

    QueryParser parser = new QueryParser(Version.LUCENE_30, analysisTarget,
            new StandardAnalyzer(Version.LUCENE_30));

    // ??
    Query query = parser.parse(analysisCondition);

    TopDocs topDocs = searcher.search(query, MAX_SEARCH_RESULT);

    /**
     * 
     */
    QueryScorer queryScorer = new QueryScorer(query);
    Fragmenter fragmenter = new SimpleSpanFragmenter(queryScorer);
    Formatter formatter = new SimpleHTMLFormatter("<b>", "<b/>");
    Highlighter highlighter = new Highlighter(formatter, queryScorer);
    highlighter.setTextFragmenter(fragmenter);

    List<LuceneSearchDTO> analysisResults = new ArrayList<LuceneSearchDTO>();

    for (int i = 0; i < topDocs.scoreDocs.length; i++) {
        int docId = topDocs.scoreDocs[i].doc;
        Document doc = searcher.doc(docId);
        String attr = highlighter.getBestFragment(new StandardAnalyzer(Version.LUCENE_30), analysisTarget,
                doc.get(analysisTarget));
        analysisResults.add(new LuceneSearchDTO(Integer.valueOf(doc.get(primaryKeyByHibernateEntity)), attr));
    }
    return analysisResults;
}

From source file:top.sj.lucene.LuceneSearchUtil.java

License:Open Source License

/**
 * ??/*from   ww  w.  j a v  a  2s  .  c o  m*/
 * 
 * @param primaryKeyByHibernateEntity
 *            Hibernate??
 * @param analysisTarget
 *            ?????
 * @param analysisConditions
 *            ?????????????
 * @return ????
 * @throws IOException
 * @throws ParseException
 * @throws InvalidTokenOffsetsException
 */
public static List<LuceneSearchDTO> searchOfSingleAreaAndMultiCondition(String primaryKeyByHibernateEntity,
        String analysisTarget, String... analysisConditions)
        throws IOException, ParseException, InvalidTokenOffsetsException {
    String configPath = PropertiesTool.getPropertiesFileAsObject("lucene_config.properties")
            .getProperty("index_location");
    Directory dir = FSDirectory.open(new File(configPath));

    // Directory dir = FSDirectory.open(new File("D://lucene"));
    IndexSearcher searcher = new IndexSearcher(dir);

    QueryParser parser = new QueryParser(Version.LUCENE_30, analysisTarget,
            new StandardAnalyzer(Version.LUCENE_30));

    BooleanQuery query = new BooleanQuery();

    for (int i = 0; i < analysisConditions.length; i++) {
        Query query1 = parser.parse(analysisConditions[i]);
        query.add(query1, i == 0 ? Occur.MUST : Occur.SHOULD);
    }
    TopDocs topDocs = searcher.search(query, MAX_SEARCH_RESULT);

    /**
     * 
     */
    QueryScorer queryScorer = new QueryScorer(query);
    Fragmenter fragmenter = new SimpleSpanFragmenter(queryScorer);
    Formatter formatter = new SimpleHTMLFormatter("<b>", "<b/>");
    Highlighter highlighter = new Highlighter(formatter, queryScorer);
    highlighter.setTextFragmenter(fragmenter);

    List<LuceneSearchDTO> analysisResults = new ArrayList<LuceneSearchDTO>();

    for (int i = 0; i < topDocs.scoreDocs.length; i++) {
        int docId = topDocs.scoreDocs[i].doc;
        Document doc = searcher.doc(docId);
        String attr = highlighter.getBestFragment(new StandardAnalyzer(Version.LUCENE_30), analysisTarget,
                doc.get(analysisTarget));
        analysisResults.add(new LuceneSearchDTO(Integer.valueOf(doc.get(primaryKeyByHibernateEntity)), attr));
    }
    return analysisResults;
}