Example usage for org.apache.lucene.search Sort Sort

List of usage examples for org.apache.lucene.search Sort Sort

Introduction

In this page you can find the example usage for org.apache.lucene.search Sort Sort.

Prototype

public Sort(SortField... fields) 

Source Link

Document

Sets the sort to the given criteria in succession: the first SortField is checked first, but if it produces a tie, then the second SortField is used to break the tie, etc.

Usage

From source file:org.apache.solr.uninverting.TestFieldCacheSort.java

License:Apache License

public void testScore() throws IOException {
    Directory dir = newDirectory();// w  w  w  .  j av a  2 s .  com
    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
    Document doc = new Document();
    doc.add(newStringField("value", "bar", Field.Store.NO));
    writer.addDocument(doc);
    doc = new Document();
    doc.add(newStringField("value", "foo", Field.Store.NO));
    writer.addDocument(doc);
    IndexReader ir = writer.getReader();
    writer.close();

    IndexSearcher searcher = newSearcher(ir);
    Sort sort = new Sort(SortField.FIELD_SCORE);

    final BooleanQuery.Builder bq = new BooleanQuery.Builder();
    bq.add(new TermQuery(new Term("value", "foo")), Occur.SHOULD);
    bq.add(new MatchAllDocsQuery(), Occur.SHOULD);
    TopDocs td = searcher.search(bq.build(), 10, sort);
    assertEquals(2, td.totalHits);
    if (Float.isNaN(td.scoreDocs[0].score) == false && Float.isNaN(td.scoreDocs[1].score) == false) {
        assertEquals(1, td.scoreDocs[0].doc);
        assertEquals(0, td.scoreDocs[1].doc);
    }
    TestUtil.checkReader(ir);
    ir.close();
    dir.close();
}

From source file:org.apache.solr.uninverting.TestFieldCacheSortRandom.java

License:Apache License

private void testRandomStringSort(SortField.Type type) throws Exception {
    Random random = new Random(random().nextLong());

    final int NUM_DOCS = atLeast(100);
    final Directory dir = newDirectory();
    final RandomIndexWriter writer = new RandomIndexWriter(random, dir);
    final boolean allowDups = random.nextBoolean();
    final Set<String> seen = new HashSet<>();
    final int maxLength = TestUtil.nextInt(random, 5, 100);
    if (VERBOSE) {
        System.out/*from  w w w . ja  v  a  2s . co m*/
                .println("TEST: NUM_DOCS=" + NUM_DOCS + " maxLength=" + maxLength + " allowDups=" + allowDups);
    }

    int numDocs = 0;
    final List<BytesRef> docValues = new ArrayList<>();
    // TODO: deletions
    while (numDocs < NUM_DOCS) {
        final Document doc = new Document();

        // 10% of the time, the document is missing the value:
        final BytesRef br;
        if (random().nextInt(10) != 7) {
            final String s;
            if (random.nextBoolean()) {
                s = TestUtil.randomSimpleString(random, maxLength);
            } else {
                s = TestUtil.randomUnicodeString(random, maxLength);
            }

            if (!allowDups) {
                if (seen.contains(s)) {
                    continue;
                }
                seen.add(s);
            }

            if (VERBOSE) {
                System.out.println("  " + numDocs + ": s=" + s);
            }

            doc.add(new StringField("stringdv", s, Field.Store.NO));
            docValues.add(new BytesRef(s));

        } else {
            br = null;
            if (VERBOSE) {
                System.out.println("  " + numDocs + ": <missing>");
            }
            docValues.add(null);
        }

        doc.add(new IntPoint("id", numDocs));
        doc.add(new StoredField("id", numDocs));
        writer.addDocument(doc);
        numDocs++;

        if (random.nextInt(40) == 17) {
            // force flush
            writer.getReader().close();
        }
    }

    Map<String, UninvertingReader.Type> mapping = new HashMap<>();
    mapping.put("stringdv", Type.SORTED);
    mapping.put("id", Type.INTEGER_POINT);
    final IndexReader r = UninvertingReader.wrap(writer.getReader(), mapping);
    writer.close();
    if (VERBOSE) {
        System.out.println("  reader=" + r);
    }

    final IndexSearcher s = newSearcher(r, false);
    final int ITERS = atLeast(100);
    for (int iter = 0; iter < ITERS; iter++) {
        final boolean reverse = random.nextBoolean();

        final TopFieldDocs hits;
        final SortField sf;
        final boolean sortMissingLast;
        final boolean missingIsNull;
        sf = new SortField("stringdv", type, reverse);
        sortMissingLast = random().nextBoolean();
        missingIsNull = true;

        if (sortMissingLast) {
            sf.setMissingValue(SortField.STRING_LAST);
        }

        final Sort sort;
        if (random.nextBoolean()) {
            sort = new Sort(sf);
        } else {
            sort = new Sort(sf, SortField.FIELD_DOC);
        }
        final int hitCount = TestUtil.nextInt(random, 1, r.maxDoc() + 20);
        final RandomQuery f = new RandomQuery(random.nextLong(), random.nextFloat(), docValues);
        int queryType = random.nextInt(2);
        if (queryType == 0) {
            hits = s.search(new ConstantScoreQuery(f), hitCount, sort, random.nextBoolean(),
                    random.nextBoolean());
        } else {
            hits = s.search(f, hitCount, sort, random.nextBoolean(), random.nextBoolean());
        }

        if (VERBOSE) {
            System.out.println("\nTEST: iter=" + iter + " " + hits.totalHits + " hits; topN=" + hitCount
                    + "; reverse=" + reverse + "; sortMissingLast=" + sortMissingLast + " sort=" + sort);
        }

        // Compute expected results:
        Collections.sort(f.matchValues, new Comparator<BytesRef>() {
            @Override
            public int compare(BytesRef a, BytesRef b) {
                if (a == null) {
                    if (b == null) {
                        return 0;
                    }
                    if (sortMissingLast) {
                        return 1;
                    } else {
                        return -1;
                    }
                } else if (b == null) {
                    if (sortMissingLast) {
                        return -1;
                    } else {
                        return 1;
                    }
                } else {
                    return a.compareTo(b);
                }
            }
        });

        if (reverse) {
            Collections.reverse(f.matchValues);
        }
        final List<BytesRef> expected = f.matchValues;
        if (VERBOSE) {
            System.out.println("  expected:");
            for (int idx = 0; idx < expected.size(); idx++) {
                BytesRef br = expected.get(idx);
                if (br == null && missingIsNull == false) {
                    br = new BytesRef();
                }
                System.out.println("    " + idx + ": " + (br == null ? "<missing>" : br.utf8ToString()));
                if (idx == hitCount - 1) {
                    break;
                }
            }
        }

        if (VERBOSE) {
            System.out.println("  actual:");
            for (int hitIDX = 0; hitIDX < hits.scoreDocs.length; hitIDX++) {
                final FieldDoc fd = (FieldDoc) hits.scoreDocs[hitIDX];
                BytesRef br = (BytesRef) fd.fields[0];

                System.out.println("    " + hitIDX + ": " + (br == null ? "<missing>" : br.utf8ToString())
                        + " id=" + s.doc(fd.doc).get("id"));
            }
        }
        for (int hitIDX = 0; hitIDX < hits.scoreDocs.length; hitIDX++) {
            final FieldDoc fd = (FieldDoc) hits.scoreDocs[hitIDX];
            BytesRef br = expected.get(hitIDX);
            if (br == null && missingIsNull == false) {
                br = new BytesRef();
            }

            // Normally, the old codecs (that don't support
            // docsWithField via doc values) will always return
            // an empty BytesRef for the missing case; however,
            // if all docs in a given segment were missing, in
            // that case it will return null!  So we must map
            // null here, too:
            BytesRef br2 = (BytesRef) fd.fields[0];
            if (br2 == null && missingIsNull == false) {
                br2 = new BytesRef();
            }

            assertEquals(br, br2);
        }
    }

    r.close();
    dir.close();
}

From source file:org.apache.solr.uninverting.TestNumericTerms32.java

License:Apache License

private void testSorting(int precisionStep) throws Exception {
    String field = "field" + precisionStep;
    // 10 random tests, the index order is ascending,
    // so using a reverse sort field should retun descending documents
    int num = TestUtil.nextInt(random(), 10, 20);
    for (int i = 0; i < num; i++) {
        int lower = (int) (random().nextDouble() * noDocs * distance) + startOffset;
        int upper = (int) (random().nextDouble() * noDocs * distance) + startOffset;
        if (lower > upper) {
            int a = lower;
            lower = upper;//from   w w  w .  j  a  va 2 s  .c o m
            upper = a;
        }
        Query tq = LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, true);
        TopDocs topDocs = searcher.search(tq, noDocs, new Sort(new SortField(field, SortField.Type.INT, true)));
        if (topDocs.totalHits == 0)
            continue;
        ScoreDoc[] sd = topDocs.scoreDocs;
        assertNotNull(sd);
        int last = searcher.doc(sd[0].doc).getField(field).numericValue().intValue();
        for (int j = 1; j < sd.length; j++) {
            int act = searcher.doc(sd[j].doc).getField(field).numericValue().intValue();
            assertTrue("Docs should be sorted backwards", last > act);
            last = act;
        }
    }
}

From source file:org.apache.solr.uninverting.TestNumericTerms64.java

License:Apache License

private void testSorting(int precisionStep) throws Exception {
    String field = "field" + precisionStep;
    // 10 random tests, the index order is ascending,
    // so using a reverse sort field should retun descending documents
    int num = TestUtil.nextInt(random(), 10, 20);
    for (int i = 0; i < num; i++) {
        long lower = (long) (random().nextDouble() * noDocs * distance) + startOffset;
        long upper = (long) (random().nextDouble() * noDocs * distance) + startOffset;
        if (lower > upper) {
            long a = lower;
            lower = upper;/*  w w  w.  j  a v a  2 s .  c o m*/
            upper = a;
        }
        Query tq = LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, true);
        TopDocs topDocs = searcher.search(tq, noDocs,
                new Sort(new SortField(field, SortField.Type.LONG, true)));
        if (topDocs.totalHits == 0)
            continue;
        ScoreDoc[] sd = topDocs.scoreDocs;
        assertNotNull(sd);
        long last = searcher.doc(sd[0].doc).getField(field).numericValue().longValue();
        for (int j = 1; j < sd.length; j++) {
            long act = searcher.doc(sd[j].doc).getField(field).numericValue().longValue();
            assertTrue("Docs should be sorted backwards", last > act);
            last = act;
        }
    }
}

From source file:org.azrul.langkuik.dao.HibernateGenericDAO.java

public Collection<T> search(Collection<SearchTerm> searchTerms, Class<T> daoClass, String orderBy, boolean asc,
        int startIndex, int offset) {
    if (searchTerms == null || searchTerms.isEmpty()) {
        return getAll(daoClass, orderBy, asc, startIndex, offset);
    }/*www . jav  a  2  s .  c om*/
    try {
        EntityManager em = emf.createEntityManager();

        FullTextEntityManager fullTextEntityManager = org.hibernate.search.jpa.Search
                .getFullTextEntityManager(em);

        //Session session = (Session) em.getDelegate();
        //String currentIdField = session.getSessionFactory().getClassMetadata(daoClass).getIdentifierPropertyName();
        String currentIdField = this.getIdentifierFieldName();
        org.apache.lucene.search.Query luceneQuery = buildSearchQuery(fullTextEntityManager, daoClass,
                searchTerms);
        Sort sort = Sort.INDEXORDER;
        if (orderBy == null) {
            if (Long.TYPE.equals(daoClass.getDeclaredField(currentIdField).getType())) {
                sort = new Sort(new SortField(currentIdField, SortField.LONG, asc));
            } else if (Integer.TYPE.equals(daoClass.getDeclaredField(currentIdField).getType())) {
                sort = new Sort(new SortField(currentIdField, SortField.INT, asc));
            } else if (String.class.equals(daoClass.getDeclaredField(currentIdField).getType())) {
                sort = new Sort(new SortField(currentIdField, SortField.STRING, asc));
            }
        } else {
            if (Long.TYPE.equals(daoClass.getDeclaredField(orderBy).getType())) {
                sort = new Sort(new SortField(orderBy, SortField.LONG, asc));
            } else if (Integer.TYPE.equals(daoClass.getDeclaredField(orderBy).getType())) {
                sort = new Sort(new SortField(orderBy, SortField.INT, asc));
            } else { //all else fail, sort as string
                sort = new Sort(new SortField(orderBy, SortField.STRING, asc));
            }
        }

        Query jpaQuery = fullTextEntityManager.createFullTextQuery(luceneQuery, daoClass).setSort(sort)
                .setFirstResult(startIndex).setMaxResults(offset);

        List<T> results = jpaQuery.getResultList();
        fullTextEntityManager.close();

        return results;
    } catch (NoSuchFieldException ex) {
        Logger.getLogger(HibernateGenericDAO.class.getName()).log(Level.SEVERE, null, ex);
    }
    return new ArrayList<T>();
}

From source file:org.bibsonomy.lucene.index.LuceneResourceIndex.java

License:Open Source License

/**
 * @return the latest log_date[ms] from index 
 *///  w  w  w . java  2 s  .  co m
public long getLastLogDate() {
    // FIXME: this synchronisation is very inefficient 
    synchronized (this) {
        if (!isIndexEnabled()) {
            return Long.MAX_VALUE;
        } else if (this.lastLogDate != null) {
            return this.lastLogDate;
        }

        //----------------------------------------------------------------
        // search over all elements sort them reverse by date 
        // and return 1 top document (newest one)
        //----------------------------------------------------------------
        // get all documents
        final Query matchAll = new MatchAllDocsQuery();
        // sort by last_log_date of type LONG in reversed order 
        final Sort sort = new Sort(new SortField(FLD_LAST_LOG_DATE, SortField.LONG, true));

        final Document doc = searchIndex(matchAll, 1, sort);
        if (doc != null) {
            try {
                // parse date
                return Long.parseLong(doc.get(FLD_LAST_LOG_DATE));
            } catch (final NumberFormatException e) {
                log.error("Error parsing last_log_date " + doc.get(FLD_LAST_LOG_DATE));
            }
        }

        return Long.MAX_VALUE;
    }
}

From source file:org.bibsonomy.lucene.index.LuceneResourceIndex.java

License:Open Source License

/** 
 * @return the newest tas_id from index/*from  www .  j  a  v a 2 s  .  c  o  m*/
 */
public Integer getLastTasId() {
    synchronized (this) {
        if (!isIndexEnabled()) {
            return Integer.MAX_VALUE;
        } else if (this.lastTasId != null) {
            return this.lastTasId;
        }

        //----------------------------------------------------------------
        // search over all elements sort them reverse by last_tas_id
        // and return 1 top document (newest one)
        //----------------------------------------------------------------
        // get all documents
        final Query matchAll = new MatchAllDocsQuery();
        // order by last_tas_id of type INT in reversed order
        final Sort sort = new Sort(new SortField(FLD_LAST_TAS_ID, SortField.INT, true));

        Integer lastTasId = null;
        final Document doc = searchIndex(matchAll, 1, sort);
        if (doc != null) {
            try {
                lastTasId = Integer.parseInt(doc.get(FLD_LAST_TAS_ID));
            } catch (final NumberFormatException e) {
                log.error("Error parsing last_tas_id " + doc.get(FLD_LAST_TAS_ID));
            }
        }

        return lastTasId != null ? lastTasId : Integer.MAX_VALUE;
    }
}

From source file:org.bibsonomy.lucene.search.LuceneResourceSearch.java

License:Open Source License

/**
 * build the overall lucene search query term
 * @param userName//from  www.j av a  2  s.c  om
 * @param requestedUserName restrict the resulting posts to those which are owned by this user name
 * @param requestedGroupName restrict the resulting posts to those which are owned this group
 * @param searchTerms
 * @return overall lucene search query
 */
protected QuerySortContainer buildQuery(final String userName, final String requestedUserName,
        final String requestedGroupName, final Collection<String> allowedGroups, final String searchTerms,
        final String titleSearchTerms, final String authorSearchTerms, final Collection<String> tagIndex,
        final String year, final String firstYear, final String lastYear) {
    //--------------------------------------------------------------------
    // build the query
    //--------------------------------------------------------------------
    // the resulting main query
    final BooleanQuery mainQuery = new BooleanQuery();
    final BooleanQuery searchQuery = this.buildSearchQuery(userName, searchTerms, titleSearchTerms,
            authorSearchTerms, tagIndex);

    // restrict result to given group
    if (present(requestedGroupName)) {
        final BooleanQuery groupQuery = this.buildGroupSearchQuery(requestedGroupName);
        if (groupQuery.getClauses().length >= 1) {
            mainQuery.add(groupQuery, Occur.MUST);
        }
    }

    // restricting access to posts visible to the user
    final Query accessModeQuery = buildAccessModeQuery(userName, allowedGroups);

    //--------------------------------------------------------------------
    // post owned by user
    //--------------------------------------------------------------------
    if (present(requestedUserName)) {
        mainQuery.add(new TermQuery(new Term(FLD_USER, requestedUserName)), Occur.MUST);
    }

    //--------------------------------------------------------------------
    // post owned by group
    //--------------------------------------------------------------------
    // TODO: remove code below??!
    //      if ( false && present(requestedGroupName) ) {
    //         mainQuery.add(new TermQuery(new Term(FLD_GROUP, requestedGroupName)), Occur.MUST);
    //      }

    //--------------------------------------------------------------------
    // build final query
    //--------------------------------------------------------------------

    // combine query terms
    mainQuery.add(searchQuery, Occur.MUST);
    mainQuery.add(accessModeQuery, Occur.MUST);

    // set ordering
    final Sort sort = new Sort(new SortField(FLD_DATE, SortField.LONG, true));

    // all done
    log.debug("[Full text] Search query: " + mainQuery.toString());

    final QuerySortContainer qf = new QuerySortContainer();
    qf.setQuery(makeTimeRangeQuery(mainQuery, year, firstYear, lastYear));
    qf.setSort(sort);

    // set up collector
    TagCountCollector collector;
    try {
        collector = new TagCountCollector(null, CFG_TAG_CLOUD_LIMIT, qf.getSort());
    } catch (final IOException e) {
        log.error("Error building tag cloud collector");
        collector = null;
    }
    qf.setTagCountCollector(collector);

    return qf;
}

From source file:org.bibsonomy.lucene.util.LuceneCommandLine.java

License:Open Source License

private void doQuerying() throws Exception {
    String bibTexIndexPath = LuceneBase.getIndexBasePath() + "lucene_BibTex" + CFG_INDEX_ID_DELIMITER + "0";
    Directory bibTexDirectory = FSDirectory.open(new File(bibTexIndexPath));
    IndexReader bibTexReader = IndexReader.open(bibTexDirectory, false);
    IndexSearcher bibTexSearcher = new IndexSearcher(bibTexReader);

    String bookmarkIndexPath = LuceneBase.getIndexBasePath() + "lucene_Bookmark" + CFG_INDEX_ID_DELIMITER + "0";
    Directory bookmarkDirectory = FSDirectory.open(new File(bookmarkIndexPath));
    IndexReader bookmarkReader = IndexReader.open(bookmarkDirectory, false);
    IndexSearcher bookmarkSearcher = new IndexSearcher(bookmarkReader);

    SortField sortField = new SortField("last_tas_id", SortField.INT, true);
    Sort sort = new Sort(sortField);

    String searchTerms = null;/*from ww w.  j  av a 2s. c  o  m*/
    while (!"!quit".equals(searchTerms)) {
        System.out.print("Query: ");
        searchTerms = readStdIn();

        doSearching(bookmarkSearcher, sort, searchTerms);
        doSearching(bibTexSearcher, sort, searchTerms);
    }
}

From source file:org.codelibs.elasticsearch.search.sort.SortBuilder.java

License:Apache License

public static Optional<SortAndFormats> buildSort(List<SortBuilder<?>> sortBuilders, QueryShardContext context)
        throws IOException {
    List<SortField> sortFields = new ArrayList<>(sortBuilders.size());
    List<DocValueFormat> sortFormats = new ArrayList<>(sortBuilders.size());
    for (SortBuilder<?> builder : sortBuilders) {
        SortFieldAndFormat sf = builder.build(context);
        sortFields.add(sf.field);//w  w  w . ja  va2 s.c om
        sortFormats.add(sf.format);
    }
    if (!sortFields.isEmpty()) {
        // optimize if we just sort on score non reversed, we don't really
        // need sorting
        boolean sort;
        if (sortFields.size() > 1) {
            sort = true;
        } else {
            SortField sortField = sortFields.get(0);
            if (sortField.getType() == SortField.Type.SCORE && !sortField.getReverse()) {
                sort = false;
            } else {
                sort = true;
            }
        }
        if (sort) {
            return Optional
                    .of(new SortAndFormats(new Sort(sortFields.toArray(new SortField[sortFields.size()])),
                            sortFormats.toArray(new DocValueFormat[sortFormats.size()])));
        }
    }
    return Optional.empty();
}