List of usage examples for org.apache.lucene.search Sort Sort
public Sort(SortField... fields)
From source file:org.compass.core.lucene.engine.LuceneSearchEngineQuery.java
License:Apache License
public Sort getSort() { if (sortFields.size() == 0) { return null; }/*from w w w. ja va 2 s. com*/ SortField[] sortFieldsArr = sortFields.toArray(new SortField[sortFields.size()]); return new Sort(sortFieldsArr); }
From source file:org.dspace.search.DSQuery.java
License:BSD License
private static TopDocs performQuery(QueryArgs args, IndexSearcher searcher, Query myquery, int max) throws IOException { TopDocs hits;//from w w w. jav a 2s .com try { if (args.getSortOption() == null) { SortField[] sortFields = new SortField[] { new SortField("search.resourcetype", SortField.Type.INT, true), new SortField(null, SortField.FIELD_SCORE.getType(), SortOption.ASCENDING.equals(args.getSortOrder())) }; hits = searcher.search(myquery, max, new Sort(sortFields)); } else { SortField[] sortFields = new SortField[] { new SortField("search.resourcetype", SortField.Type.INT, true), new SortField("sort_" + args.getSortOption().getName(), SortField.Type.STRING, SortOption.DESCENDING.equals(args.getSortOrder())), SortField.FIELD_SCORE }; hits = searcher.search(myquery, max, new Sort(sortFields)); } } catch (Exception e) { // Lucene can throw an exception if it is unable to determine a sort time from the specified field // Provide a fall back that just works on relevancy. log.error("Unable to use speficied sort option: " + (args.getSortOption() == null ? "type/relevance" : args.getSortOption().getName())); hits = searcher.search(myquery, max, new Sort(SortField.FIELD_SCORE)); } return hits; }
From source file:org.dspace.search.LuceneIndex.java
License:BSD License
private static TopDocs performQuery(QueryArgs args, IndexSearcher searcher, Query myquery, int max) throws IOException { TopDocs hits;//w w w .j a v a 2 s .com try { if (args.getSortOption() == null) { SortField[] sortFields = new SortField[] { new SortField("search.resourcetype", Type.INT, true), new SortField(null, Type.SCORE, SortOption.ASCENDING.equals(args.getSortOrder())) }; hits = searcher.search(myquery, max, new Sort(sortFields)); } else { SortField[] sortFields = new SortField[] { new SortField("search.resourcetype", Type.INT, true), new SortField("sort_" + args.getSortOption().getName(), Type.STRING, SortOption.DESCENDING.equals(args.getSortOrder())), SortField.FIELD_SCORE }; hits = searcher.search(myquery, max, new Sort(sortFields)); } } catch (Exception e) { // Lucene can throw an exception if it is unable to determine a sort time from the specified field // Provide a fall back that just works on relevancy. log.error("Unable to use speficied sort option: " + (args.getSortOption() == null ? "type/relevance" : args.getSortOption().getName())); hits = searcher.search(myquery, max, new Sort(SortField.FIELD_SCORE)); } return hits; }
From source file:org.efaps.admin.index.Searcher.java
License:Apache License
/** * Search.//from w w w .j a va2s.c o m * * @param _search the search * @return the search result * @throws EFapsException on error */ protected SearchResult executeSearch(final ISearch _search) throws EFapsException { final SearchResult ret = new SearchResult(); try { LOG.debug("Starting search with: {}", _search.getQuery()); final StandardQueryParser queryParser = new StandardQueryParser(Index.getAnalyzer()); queryParser.setAllowLeadingWildcard(true); if (EFapsSystemConfiguration.get().containsAttributeValue(KernelSettings.INDEXDEFAULTOP)) { queryParser.setDefaultOperator(EnumUtils.getEnum(StandardQueryConfigHandler.Operator.class, EFapsSystemConfiguration.get().getAttributeValue(KernelSettings.INDEXDEFAULTOP))); } else { queryParser.setDefaultOperator(StandardQueryConfigHandler.Operator.AND); } final Query query = queryParser.parse(_search.getQuery(), "ALL"); final IndexReader reader = DirectoryReader.open(Index.getDirectory()); Sort sort = _search.getSort(); if (sort == null) { sort = new Sort(new SortField(Key.CREATED.name(), SortField.Type.LONG, true)); } final FacetsConfig facetConfig = Index.getFacetsConfig(); final DirectoryTaxonomyReader taxoReader = new DirectoryTaxonomyReader(Index.getTaxonomyDirectory()); final IndexSearcher searcher = new IndexSearcher(reader); final FacetsCollector fc = new FacetsCollector(); final TopFieldDocs topFieldDocs = FacetsCollector.search(searcher, query, _search.getNumHits(), sort, fc); if (_search.getConfigs().contains(SearchConfig.ACTIVATE_DIMENSION)) { final Facets facets = new FastTaxonomyFacetCounts(taxoReader, facetConfig, fc); for (final FacetResult result : facets.getAllDims(1000)) { LOG.debug("FacetResult {}.", result); final DimConfig dimConfig = facetConfig.getDimConfig(result.dim); final Dimension retDim = new Dimension().setKey(result.dim); ret.getDimensions().add(retDim); for (final LabelAndValue labelValue : result.labelValues) { final DimValue dimValue = new DimValue().setLabel(labelValue.label) .setValue(labelValue.value.intValue()); dimValue.setPath(new String[] { retDim.getKey() }); retDim.getValues().add(dimValue); if (dimConfig.hierarchical) { addSubDimension(facets, dimValue, result.dim, labelValue.label); } } } } ret.setHitCount(topFieldDocs.totalHits); if (ret.getHitCount() > 0) { final ScoreDoc[] hits = topFieldDocs.scoreDocs; LOG.debug("Found {} hits.", hits.length); for (int i = 0; i < hits.length; ++i) { final Document doc = searcher.doc(hits[i].doc); final String oid = doc.get(Key.OID.name()); final String text = doc.get(Key.MSGPHRASE.name()); LOG.debug("{}. {}\t {}", i + 1, oid, text); final Instance instance = Instance.get(oid); final List<Instance> list; if (this.typeMapping.containsKey(instance.getType())) { list = this.typeMapping.get(instance.getType()); } else { list = new ArrayList<Instance>(); this.typeMapping.put(instance.getType(), list); } list.add(instance); final Element element = new Element().setOid(oid).setText(text); for (final Entry<String, Collection<String>> entry : _search.getResultFields().entrySet()) { for (final String name : entry.getValue()) { final String value = doc.get(name); if (value != null) { element.addField(name, value); } } } this.elements.put(instance, element); } } reader.close(); checkAccess(); ret.getElements().addAll(this.elements.values()); } catch (final IOException | QueryNodeException e) { LOG.error("Catched Exception", e); } return ret; }
From source file:org.elasticsearch.action.admin.indices.create.ShrinkIndexIT.java
License:Apache License
public void testCreateShrinkWithIndexSort() throws Exception { SortField expectedSortField = new SortedSetSortField("id", true, SortedSetSelector.Type.MAX); expectedSortField.setMissingValue(SortedSetSortField.STRING_FIRST); Sort expectedIndexSort = new Sort(expectedSortField); internalCluster().ensureAtLeastNumDataNodes(2); prepareCreate("source") .setSettings(Settings.builder().put(indexSettings()).put("sort.field", "id") .put("sort.order", "desc").put("number_of_shards", 8).put("number_of_replicas", 0)) .addMapping("type", "id", "type=keyword,doc_values=true").get(); for (int i = 0; i < 20; i++) { client().prepareIndex("source", "type", Integer.toString(i)) .setSource("{\"foo\" : \"bar\", \"id\" : " + i + "}", XContentType.JSON).get(); }/*from w ww.j av a 2 s.c om*/ ImmutableOpenMap<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get() .getState().nodes().getDataNodes(); assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class); String mergeNode = discoveryNodes[0].getName(); // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due // to the require._name below. ensureGreen(); flushAndRefresh(); assertSortedSegments("source", expectedIndexSort); // relocate all shards to one node such that we can merge it. client().admin().indices().prepareUpdateSettings("source").setSettings(Settings.builder() .put("index.routing.allocation.require._name", mergeNode).put("index.blocks.write", true)).get(); ensureGreen(); // check that index sort cannot be set on the target index IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> client().admin().indices().prepareShrinkIndex("source", "target") .setSettings(Settings.builder().put("index.number_of_replicas", 0) .put("index.number_of_shards", "2").put("index.sort.field", "foo").build()) .get()); assertThat(exc.getMessage(), containsString("can't override index sort when shrinking index")); // check that the index sort order of `source` is correctly applied to the `target` assertAcked(client().admin().indices().prepareShrinkIndex("source", "target").setSettings( Settings.builder().put("index.number_of_replicas", 0).put("index.number_of_shards", "2").build()) .get()); ensureGreen(); flushAndRefresh(); GetSettingsResponse settingsResponse = client().admin().indices().prepareGetSettings("target").execute() .actionGet(); assertEquals(settingsResponse.getSetting("target", "index.sort.field"), "id"); assertEquals(settingsResponse.getSetting("target", "index.sort.order"), "desc"); assertSortedSegments("target", expectedIndexSort); // ... and that the index sort is also applied to updates for (int i = 20; i < 40; i++) { client().prepareIndex("target", "type") .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); } flushAndRefresh(); assertSortedSegments("target", expectedIndexSort); }
From source file:org.elasticsearch.action.admin.indices.create.SplitIndexIT.java
License:Apache License
public void testCreateSplitWithIndexSort() throws Exception { SortField expectedSortField = new SortedSetSortField("id", true, SortedSetSelector.Type.MAX); expectedSortField.setMissingValue(SortedSetSortField.STRING_FIRST); Sort expectedIndexSort = new Sort(expectedSortField); internalCluster().ensureAtLeastNumDataNodes(2); prepareCreate("source") .setSettings(Settings.builder().put(indexSettings()).put("sort.field", "id") .put("sort.order", "desc").put("number_of_shards", 2).put("number_of_replicas", 0)) .addMapping("type", "id", "type=keyword,doc_values=true").get(); for (int i = 0; i < 20; i++) { client().prepareIndex("source", "type", Integer.toString(i)) .setSource("{\"foo\" : \"bar\", \"id\" : " + i + "}", XContentType.JSON).get(); }/*from w ww . java2s .c o m*/ ImmutableOpenMap<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get() .getState().nodes().getDataNodes(); assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class); String mergeNode = discoveryNodes[0].getName(); // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due // to the require._name below. ensureGreen(); flushAndRefresh(); assertSortedSegments("source", expectedIndexSort); client().admin().indices().prepareUpdateSettings("source") .setSettings(Settings.builder().put("index.blocks.write", true)).get(); ensureYellow(); // check that index sort cannot be set on the target index IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> client().admin().indices().prepareResizeIndex("source", "target") .setResizeType(ResizeType.SPLIT) .setSettings(Settings.builder().put("index.number_of_replicas", 0) .put("index.number_of_shards", 4).put("index.sort.field", "foo").build()) .get()); assertThat(exc.getMessage(), containsString("can't override index sort when resizing an index")); // check that the index sort order of `source` is correctly applied to the `target` assertAcked(client().admin().indices().prepareResizeIndex("source", "target") .setResizeType(ResizeType.SPLIT).setSettings(Settings.builder().put("index.number_of_replicas", 0) .put("index.number_of_shards", 4).build()) .get()); ensureGreen(); flushAndRefresh(); GetSettingsResponse settingsResponse = client().admin().indices().prepareGetSettings("target").execute() .actionGet(); assertEquals(settingsResponse.getSetting("target", "index.sort.field"), "id"); assertEquals(settingsResponse.getSetting("target", "index.sort.order"), "desc"); assertSortedSegments("target", expectedIndexSort); // ... and that the index sort is also applied to updates for (int i = 20; i < 40; i++) { client().prepareIndex("target", "type") .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); } flushAndRefresh(); assertSortedSegments("target", expectedIndexSort); }
From source file:org.elasticsearch.action.search.SearchPhaseController.java
License:Apache License
/** * Returns a score doc array of top N search docs across all shards, followed by top suggest docs for each * named completion suggestion across all shards. If more than one named completion suggestion is specified in the * request, the suggest docs for a named suggestion are ordered by the suggestion name. * * Note: The order of the sorted score docs depends on the shard index in the result array if the merge process needs to disambiguate * the result. In oder to obtain stable results the shard index (index of the result in the result array) must be the same. * * @param ignoreFrom Whether to ignore the from and sort all hits in each shard result. * Enabled only for scroll search, because that only retrieves hits of length 'size' in the query phase. * @param resultsArr Shard result holder *///w w w . j av a 2 s . c o m public ScoreDoc[] sortDocs(boolean ignoreFrom, AtomicArray<? extends QuerySearchResultProvider> resultsArr) throws IOException { List<? extends AtomicArray.Entry<? extends QuerySearchResultProvider>> results = resultsArr.asList(); if (results.isEmpty()) { return EMPTY_DOCS; } final QuerySearchResult result; boolean canOptimize = false; int shardIndex = -1; if (results.size() == 1) { canOptimize = true; result = results.get(0).value.queryResult(); shardIndex = results.get(0).index; } else { boolean hasResult = false; QuerySearchResult resultToOptimize = null; // lets see if we only got hits from a single shard, if so, we can optimize... for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : results) { if (entry.value.queryResult().hasHits()) { if (hasResult) { // we already have one, can't really optimize canOptimize = false; break; } canOptimize = true; hasResult = true; resultToOptimize = entry.value.queryResult(); shardIndex = entry.index; } } result = canOptimize ? resultToOptimize : results.get(0).value.queryResult(); assert result != null; } if (canOptimize) { int offset = result.from(); if (ignoreFrom) { offset = 0; } ScoreDoc[] scoreDocs = result.topDocs().scoreDocs; ScoreDoc[] docs; int numSuggestDocs = 0; final Suggest suggest = result.queryResult().suggest(); final List<CompletionSuggestion> completionSuggestions; if (suggest != null) { completionSuggestions = suggest.filter(CompletionSuggestion.class); for (CompletionSuggestion suggestion : completionSuggestions) { numSuggestDocs += suggestion.getOptions().size(); } } else { completionSuggestions = Collections.emptyList(); } int docsOffset = 0; if (scoreDocs.length == 0 || scoreDocs.length < offset) { docs = new ScoreDoc[numSuggestDocs]; } else { int resultDocsSize = result.size(); if ((scoreDocs.length - offset) < resultDocsSize) { resultDocsSize = scoreDocs.length - offset; } docs = new ScoreDoc[resultDocsSize + numSuggestDocs]; for (int i = 0; i < resultDocsSize; i++) { ScoreDoc scoreDoc = scoreDocs[offset + i]; scoreDoc.shardIndex = shardIndex; docs[i] = scoreDoc; docsOffset++; } } for (CompletionSuggestion suggestion : completionSuggestions) { for (CompletionSuggestion.Entry.Option option : suggestion.getOptions()) { ScoreDoc doc = option.getDoc(); doc.shardIndex = shardIndex; docs[docsOffset++] = doc; } } return docs; } final int topN = result.queryResult().size(); final int from = ignoreFrom ? 0 : result.queryResult().from(); final TopDocs mergedTopDocs; final int numShards = resultsArr.length(); if (result.queryResult().topDocs() instanceof CollapseTopFieldDocs) { CollapseTopFieldDocs firstTopDocs = (CollapseTopFieldDocs) result.queryResult().topDocs(); final Sort sort = new Sort(firstTopDocs.fields); final CollapseTopFieldDocs[] shardTopDocs = new CollapseTopFieldDocs[numShards]; fillTopDocs(shardTopDocs, results, new CollapseTopFieldDocs(firstTopDocs.field, 0, new FieldDoc[0], sort.getSort(), new Object[0], Float.NaN)); mergedTopDocs = CollapseTopFieldDocs.merge(sort, from, topN, shardTopDocs); } else if (result.queryResult().topDocs() instanceof TopFieldDocs) { TopFieldDocs firstTopDocs = (TopFieldDocs) result.queryResult().topDocs(); final Sort sort = new Sort(firstTopDocs.fields); final TopFieldDocs[] shardTopDocs = new TopFieldDocs[resultsArr.length()]; fillTopDocs(shardTopDocs, results, new TopFieldDocs(0, new FieldDoc[0], sort.getSort(), Float.NaN)); mergedTopDocs = TopDocs.merge(sort, from, topN, shardTopDocs, true); } else { final TopDocs[] shardTopDocs = new TopDocs[resultsArr.length()]; fillTopDocs(shardTopDocs, results, Lucene.EMPTY_TOP_DOCS); mergedTopDocs = TopDocs.merge(from, topN, shardTopDocs, true); } ScoreDoc[] scoreDocs = mergedTopDocs.scoreDocs; final Map<String, List<Suggestion<CompletionSuggestion.Entry>>> groupedCompletionSuggestions = new HashMap<>(); // group suggestions and assign shard index for (AtomicArray.Entry<? extends QuerySearchResultProvider> sortedResult : results) { Suggest shardSuggest = sortedResult.value.queryResult().suggest(); if (shardSuggest != null) { for (CompletionSuggestion suggestion : shardSuggest.filter(CompletionSuggestion.class)) { suggestion.setShardIndex(sortedResult.index); List<Suggestion<CompletionSuggestion.Entry>> suggestions = groupedCompletionSuggestions .computeIfAbsent(suggestion.getName(), s -> new ArrayList<>()); suggestions.add(suggestion); } } } if (groupedCompletionSuggestions.isEmpty() == false) { int numSuggestDocs = 0; List<Suggestion<? extends Entry<? extends Entry.Option>>> completionSuggestions = new ArrayList<>( groupedCompletionSuggestions.size()); for (List<Suggestion<CompletionSuggestion.Entry>> groupedSuggestions : groupedCompletionSuggestions .values()) { final CompletionSuggestion completionSuggestion = CompletionSuggestion.reduceTo(groupedSuggestions); assert completionSuggestion != null; numSuggestDocs += completionSuggestion.getOptions().size(); completionSuggestions.add(completionSuggestion); } scoreDocs = new ScoreDoc[mergedTopDocs.scoreDocs.length + numSuggestDocs]; System.arraycopy(mergedTopDocs.scoreDocs, 0, scoreDocs, 0, mergedTopDocs.scoreDocs.length); int offset = mergedTopDocs.scoreDocs.length; Suggest suggestions = new Suggest(completionSuggestions); for (CompletionSuggestion completionSuggestion : suggestions.filter(CompletionSuggestion.class)) { for (CompletionSuggestion.Entry.Option option : completionSuggestion.getOptions()) { scoreDocs[offset++] = option.getDoc(); } } } return scoreDocs; }
From source file:org.elasticsearch.deps.lucene.SimpleLuceneTests.java
License:Apache License
@Test public void testSortValues() throws Exception { Directory dir = new RAMDirectory(); IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER)); for (int i = 0; i < 10; i++) { Document document = new Document(); document.add(new TextField("str", new String(new char[] { (char) (97 + i), (char) (97 + i) }), Field.Store.YES)); indexWriter.addDocument(document); }//w w w. j av a 2 s .c om IndexReader reader = DirectoryReader.open(indexWriter, true); IndexSearcher searcher = new IndexSearcher(reader); TopFieldDocs docs = searcher.search(new MatchAllDocsQuery(), null, 10, new Sort(new SortField("str", SortField.Type.STRING))); for (int i = 0; i < 10; i++) { FieldDoc fieldDoc = (FieldDoc) docs.scoreDocs[i]; assertThat((BytesRef) fieldDoc.fields[0], equalTo(new BytesRef(new String(new char[] { (char) (97 + i), (char) (97 + i) })))); } }
From source file:org.elasticsearch.index.engine.SegmentTests.java
License:Apache License
static Sort randomIndexSort() { if (randomBoolean()) { return null; }// www . j a va 2 s .c o m int size = randomIntBetween(1, 5); SortField[] fields = new SortField[size]; for (int i = 0; i < size; i++) { fields[i] = randomSortField(); } return new Sort(fields); }
From source file:org.elasticsearch.index.fielddata.AbstractFieldDataImplTestCase.java
License:Apache License
@Test public void testSingleValueAllSet() throws Exception { fillSingleValueAllSet();/*from ww w. j a va 2 s .c o m*/ IndexFieldData indexFieldData = getForField("value"); LeafReaderContext readerContext = refreshReader(); AtomicFieldData fieldData = indexFieldData.load(readerContext); assertThat(fieldData.ramBytesUsed(), greaterThanOrEqualTo(minRamBytesUsed())); SortedBinaryDocValues bytesValues = fieldData.getBytesValues(); bytesValues.setDocument(0); assertThat(bytesValues.count(), equalTo(1)); assertThat(bytesValues.valueAt(0), equalTo(new BytesRef(two()))); bytesValues.setDocument(1); assertThat(bytesValues.count(), equalTo(1)); assertThat(bytesValues.valueAt(0), equalTo(new BytesRef(one()))); bytesValues.setDocument(2); assertThat(bytesValues.count(), equalTo(1)); assertThat(bytesValues.valueAt(0), equalTo(new BytesRef(three()))); assertValues(bytesValues, 0, two()); assertValues(bytesValues, 1, one()); assertValues(bytesValues, 2, three()); IndexSearcher searcher = new IndexSearcher(readerContext.reader()); TopFieldDocs topDocs; topDocs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(new SortField("value", indexFieldData.comparatorSource(null, MultiValueMode.MIN, null)))); assertThat(topDocs.totalHits, equalTo(3)); assertThat(topDocs.scoreDocs[0].doc, equalTo(1)); assertThat(toString(((FieldDoc) topDocs.scoreDocs[0]).fields[0]), equalTo(one())); assertThat(topDocs.scoreDocs[1].doc, equalTo(0)); assertThat(toString(((FieldDoc) topDocs.scoreDocs[1]).fields[0]), equalTo(two())); assertThat(topDocs.scoreDocs[2].doc, equalTo(2)); assertThat(toString(((FieldDoc) topDocs.scoreDocs[2]).fields[0]), equalTo(three())); topDocs = searcher.search(new MatchAllDocsQuery(), 10, new Sort( new SortField("value", indexFieldData.comparatorSource(null, MultiValueMode.MAX, null), true))); assertThat(topDocs.totalHits, equalTo(3)); assertThat(topDocs.scoreDocs[0].doc, equalTo(2)); assertThat(topDocs.scoreDocs[1].doc, equalTo(0)); assertThat(topDocs.scoreDocs[2].doc, equalTo(1)); }