Example usage for org.apache.solr.common SolrDocumentList getMaxScore

List of usage examples for org.apache.solr.common SolrDocumentList getMaxScore

Introduction

In this page you can find the example usage for org.apache.solr.common SolrDocumentList getMaxScore.

Prototype

public Float getMaxScore() 

Source Link

Usage

From source file:net.hasor.search.server.rsf.service.SorlSearchService.java

License:Apache License

@Override
public QuerySearchResult query(SearchQuery searchQuery) throws Throwable {
    SolrQuery solrQuery = new SolrQuery();
    solrQuery.add(new MultiMapSolrParams(searchQuery.toMap()));
    QueryResponse response = getSolrClient().query(solrQuery);
    SolrDocumentList docList = response.getResults();
    ///*w  w  w .  j  av  a  2  s  .  co m*/
    List<SearchDocument> documentList = new ArrayList<SearchDocument>();
    if (docList != null) {
        for (SolrDocument solrDocument : docList) {
            SearchDocument document = convetTo(solrDocument);
            documentList.add(document);
        }
    }
    //
    QuerySearchResult searchResult = new QuerySearchResult(documentList);
    searchResult.setElapsedTime(response.getElapsedTime());
    searchResult.setMaxScore(docList.getMaxScore());
    searchResult.setNumFound(docList.getNumFound());
    searchResult.setStart(docList.getStart());
    searchResult.setStatus(response.getStatus());
    searchResult.setQueryTime(response.getQTime());
    return searchResult;
}

From source file:net.yacy.cora.federate.solr.instance.ResponseAccumulator.java

License:Open Source License

public void addResponse(NamedList<Object> response) {
    // set the header; this is mostly always the same (well this is not evaluated much)
    @SuppressWarnings("unchecked")
    SimpleOrderedMap<Object> header = (SimpleOrderedMap<Object>) response.get("responseHeader");
    //Integer status = (Integer) header.get("status");
    //Integer QTime = (Integer) header.get("QTime");
    //SimpleOrderedMap<Object> params = (SimpleOrderedMap<Object>) header.get("params");
    if (headerAcc.size() == 0) {
        for (Map.Entry<String, Object> e : header)
            headerAcc.add(e.getKey(), e.getValue());
    }/*from www. j  a v a2s.c om*/

    // accumulate the results
    SolrDocumentList results = (SolrDocumentList) response.get("response");
    if (results != null) {
        long found = results.size();
        for (int i = 0; i < found; i++)
            resultsAcc.add(results.get(i));
        resultsAcc.setNumFound(resultsAcc.getNumFound() + results.getNumFound());
        resultsAcc.setMaxScore(
                Math.max(resultsAcc.getMaxScore() == null ? 0f : resultsAcc.getMaxScore().floatValue(),
                        results.getMaxScore() == null ? 0f : results.getMaxScore().floatValue()));
    }

    // accumulate the highlighting
    @SuppressWarnings("unchecked")
    SimpleOrderedMap<Object> highlighting = (SimpleOrderedMap<Object>) response.get("highlighting");
    if (highlighting != null) {
        for (Map.Entry<String, Object> e : highlighting)
            highlightingAcc.add(e.getKey(), e.getValue());
    }

    // accumulate the facets (well this is not correct at this time...)
    @SuppressWarnings("unchecked")
    SimpleOrderedMap<Object> facet_counts = (SimpleOrderedMap<Object>) response.get("facet_counts");
    if (facet_counts != null) {
        for (Map.Entry<String, Object> e : facet_counts)
            facet_countsAcc.add(e.getKey(), e.getValue());
    }

    // accumulate the index
    @SuppressWarnings("unchecked")
    SimpleOrderedMap<Object> index_counts = (SimpleOrderedMap<Object>) response.get("index");
    if (index_counts != null) {
        for (Map.Entry<String, Object> e : index_counts)
            index_countsAcc.add(e.getKey(), e.getValue());
    }

    // accumulate the fields
    @SuppressWarnings("unchecked")
    SimpleOrderedMap<Object> schema = (SimpleOrderedMap<Object>) response.get("schema");
    if (schema != null) {
        @SuppressWarnings("unchecked")
        SimpleOrderedMap<Object> fields = (SimpleOrderedMap<Object>) schema.get("fields");
        if (fields != null) {
            for (Map.Entry<String, Object> e : fields)
                fieldsAcc.add(e.getKey(), e.getValue());
        }
    }
    @SuppressWarnings("unchecked")
    SimpleOrderedMap<Object> fields = (SimpleOrderedMap<Object>) response.get("fields");
    if (fields != null) {
        for (Map.Entry<String, Object> e : fields)
            fieldsAcc.add(e.getKey(), e.getValue());
    }
}

From source file:net.yacy.cora.federate.solr.responsewriter.EnhancedXMLResponseWriter.java

License:Open Source License

private static final void writeDocs(final Writer writer,
        @SuppressWarnings("unused") final SolrQueryRequest request, final SolrDocumentList docs)
        throws IOException {
    boolean includeScore = false;
    final int sz = docs.size();
    writer.write("<result");
    writeAttr(writer, "name", "response");
    writeAttr(writer, "numFound", Long.toString(docs.getNumFound()));
    writeAttr(writer, "start", Long.toString(docs.getStart()));
    if (includeScore) {
        writeAttr(writer, "maxScore", Float.toString(docs.getMaxScore()));
    }/*from w  ww .  j a v  a  2 s.c  om*/
    if (sz == 0) {
        writer.write("/>");
        return;
    }
    writer.write('>');
    writer.write(lb);
    Iterator<SolrDocument> iterator = docs.iterator();
    for (int i = 0; i < sz; i++) {
        SolrDocument doc = iterator.next();
        writeDoc(writer, doc);
    }
    writer.write("</result>");
    writer.write(lb);
}

From source file:nl.knaw.dans.common.solr.converter.SolrQueryResponseConverter.java

License:Apache License

public static SimpleSearchResult<Document> convert(QueryResponse queryResponse, Index index) {
    SimpleSearchResult<Document> result = new SimpleSearchResult<Document>();

    Map<String, Map<String, List<String>>> hl = queryResponse.getHighlighting();
    SolrDocumentList sdl = queryResponse.getResults();

    // paging info
    result.setNumFound((int) sdl.getNumFound());

    // Relevance scores in Solr are calculated from the base
    // of 1.0f. If any document is scored any different then 
    // the maximum relevance score is not 1.0f anymore. The
    // chances of a maximum relevance score of 1.0f with actual
    // meaning is pretty slim. This therefore assumes that if
    // a maximum relevance score of 1.0f is returned that it
    // is then better for the receiver of the search results
    // to ignore the relevancy score completely.
    result.setUseRelevanceScore(sdl.getMaxScore() != 1.0f);

    // add the documents
    List<SearchHit<Document>> hits = new ArrayList<SearchHit<Document>>(sdl.size());
    String primaryKeyValue = null;
    for (SolrDocument solrDoc : sdl) {
        // Don't change class type! SimpleSearchHit is assumed in SolrSearchEngine!
        SimpleDocument resultDoc = new SimpleDocument();
        float score = 0;
        List<SnippetField> snippetFields = null;

        // copy all fields
        for (Entry<String, Object> fieldEntry : solrDoc.entrySet()) {
            if (index != null) {
                if (fieldEntry.getKey().equals(index.getPrimaryKey())) {
                    primaryKeyValue = fieldEntry.getValue().toString();
                }//from w w w  . j  a v  a2 s.c om
            }

            if (fieldEntry.getKey().equals("score")) {
                score = ((Float) fieldEntry.getValue()).floatValue() / sdl.getMaxScore();
            } else {
                SimpleField<Object> field = new SimpleField<Object>(fieldEntry.getKey(), fieldEntry.getValue());
                resultDoc.addField(field);
            }
        }

        // add highlight info to SearchHit
        if (hl != null && primaryKeyValue != null) {
            Map<String, List<String>> hlMap = hl.get(primaryKeyValue);
            if (hlMap != null && hlMap.size() > 0) {
                snippetFields = new ArrayList<SnippetField>(hlMap.size());
                for (Entry<String, List<String>> hlEntry : hlMap.entrySet()) {
                    SimpleSnippetField snippetField = new SimpleSnippetField(hlEntry.getKey(),
                            hlEntry.getValue());
                    snippetFields.add(snippetField);
                }
            }
        }

        SimpleSearchHit<Document> hit = new SimpleSearchHit<Document>(resultDoc);
        hit.setRelevanceScore(score);
        if (snippetFields != null)
            hit.setSnippets(snippetFields);
        hits.add(hit);
    }
    result.setHits(hits);

    // add facet fields to response
    List<org.apache.solr.client.solrj.response.FacetField> solrFacets = queryResponse.getFacetFields();
    if (solrFacets != null) {
        List<FacetField> facetFields = new ArrayList<FacetField>(solrFacets.size());
        for (org.apache.solr.client.solrj.response.FacetField solrFacet : solrFacets) {
            List<Count> solrFacetValues = solrFacet.getValues();
            if (solrFacetValues == null)
                continue;
            List<FacetValue<?>> facetValues = new ArrayList<FacetValue<?>>(solrFacetValues.size());
            for (Count solrFacetValue : solrFacetValues) {
                SimpleFacetValue<String> facetValue = new SimpleFacetValue<String>();
                facetValue.setCount((int) solrFacetValue.getCount());
                facetValue.setValue(solrFacetValue.getName());
                facetValues.add(facetValue);
            }

            facetFields.add(new SimpleFacetField(solrFacet.getName(), facetValues));
        }
        result.setFacets(facetFields);
    }

    return result;
}

From source file:org.ambraproject.search.service.SolrSearchService.java

License:Apache License

private SearchResultSinglePage readQueryResults(QueryResponse queryResponse, SolrQuery query) {
    SolrDocumentList documentList = queryResponse.getResults();

    if (log.isInfoEnabled()) {
        StringBuilder filterQueriesForLog = new StringBuilder();
        if (query.getFilterQueries() != null && query.getFilterQueries().length > 0) {
            for (String filterQuery : query.getFilterQueries()) {
                filterQueriesForLog.append(filterQuery).append(" , ");
            }//from   www . j a  v  a  2s  . co  m
            if (filterQueriesForLog.length() > 3) {
                filterQueriesForLog.replace(filterQueriesForLog.length() - 3, filterQueriesForLog.length(), "");
            } else {
                filterQueriesForLog.append("No Filter Queries");
            }
        }

        log.info("query.getQuery():{ " + query.getQuery() + " }" + ", query.getSortFields():{ "
                + (query.getSortFields() == null ? null : Arrays.asList(query.getSortFields())) + " }"
                + ", query.getFilterQueries():{ " + filterQueriesForLog.toString() + " }" + ", found:"
                + documentList.getNumFound() + ", start:" + documentList.getStart() + ", max_score:"
                + documentList.getMaxScore() + ", QTime:" + queryResponse.getQTime() + "ms");

        // TODO: implement spell-checking in a meaningful manner.  This loop exists only to generate log output.
        // TODO: Add "spellcheckAlternatives" or something like it to the SearchHits class so it can be displayed to the user like Google's "did you mean..."
        // TODO: Turn off spellchecking for the "author" field.
        if (queryResponse.getSpellCheckResponse() != null
                && queryResponse.getSpellCheckResponse().getSuggestionMap() != null
                && queryResponse.getSpellCheckResponse().getSuggestionMap().keySet().size() > 0) {
            StringBuilder sb = new StringBuilder("Spellcheck alternative suggestions:");
            for (String token : queryResponse.getSpellCheckResponse().getSuggestionMap().keySet()) {
                sb.append(" { ").append(token).append(" : ");
                if (queryResponse.getSpellCheckResponse().getSuggestionMap().get(token).getAlternatives()
                        .size() < 1) {
                    sb.append("NO ALTERNATIVES");
                } else {
                    for (String alternative : queryResponse.getSpellCheckResponse().getSuggestionMap()
                            .get(token).getAlternatives()) {
                        sb.append(alternative).append(", ");
                    }
                    sb.replace(sb.length() - 2, sb.length(), ""); // Remove last comma and space.
                }
                sb.append(" } ,");
            }
            log.info(sb.replace(sb.length() - 2, sb.length(), "").toString()); // Remove last comma and space.
        } else {
            log.info("Solr thinks everything in the query is spelled correctly.");
        }
    }

    Map<String, Map<String, List<String>>> highlightings = queryResponse.getHighlighting();

    List<SearchHit> searchResults = new ArrayList<SearchHit>();
    for (SolrDocument document : documentList) {

        String id = getFieldValue(document, "id", String.class, query.toString());
        String message = id == null ? query.toString() : id;
        Float score = getFieldValue(document, "score", Float.class, message);
        String title = getFieldValue(document, "title_display", String.class, message);
        Date publicationDate = getFieldValue(document, "publication_date", Date.class, message);
        String eissn = getFieldValue(document, "eissn", String.class, message);
        String journal = getFieldValue(document, "journal", String.class, message);
        String articleType = getFieldValue(document, "article_type", String.class, message);

        List<String> authorList = getFieldMultiValue(document, message, String.class, "author_display");

        String highlights = null;
        if (query.getHighlight()) {
            highlights = getHighlights(highlightings.get(id));
        }

        SearchHit hit = new SearchHit(score, id, title, highlights, authorList, publicationDate, eissn, journal,
                articleType);

        if (log.isDebugEnabled())
            log.debug(hit.toString());

        searchResults.add(hit);
    }

    //here we assume that number of hits is always going to be withing range of int
    SearchResultSinglePage results = new SearchResultSinglePage((int) documentList.getNumFound(), -1,
            searchResults, query.getQuery());

    if (queryResponse.getFacetField("subject_facet") != null) {
        results.setSubjectFacet(facetCountsToHashMap(queryResponse.getFacetField("subject_facet")));
    }

    if (queryResponse.getFacetField("author_facet") != null) {
        results.setAuthorFacet(facetCountsToHashMap(queryResponse.getFacetField("author_facet")));
    }

    if (queryResponse.getFacetField("editor_facet") != null) {
        results.setEditorFacet(facetCountsToHashMap(queryResponse.getFacetField("editor_facet")));
    }

    if (queryResponse.getFacetField("article_type_facet") != null) {
        results.setArticleTypeFacet(facetCountsToHashMap(queryResponse.getFacetField("article_type_facet")));
    }

    if (queryResponse.getFacetField("affiliate_facet") != null) {
        results.setInstitutionFacet(facetCountsToHashMap(queryResponse.getFacetField("affiliate_facet")));
    }

    if (queryResponse.getFacetField("cross_published_journal_key") != null) {
        results.setJournalFacet(
                facetCountsToHashMap(queryResponse.getFacetField("cross_published_journal_key")));
    }

    return results;
}

From source file:org.ambraproject.service.search.SolrSearchService.java

License:Apache License

@SuppressWarnings("unchecked")
private SearchResultSinglePage readQueryResults(QueryResponse queryResponse, SolrQuery query) {
    SolrDocumentList documentList = queryResponse.getResults();

    if (log.isInfoEnabled()) {
        StringBuilder filterQueriesForLog = new StringBuilder();
        if (query.getFilterQueries() != null && query.getFilterQueries().length > 0) {
            for (String filterQuery : query.getFilterQueries()) {
                filterQueriesForLog.append(filterQuery).append(" , ");
            }/*from www  . j a va 2s.c om*/
            if (filterQueriesForLog.length() > 3) {
                filterQueriesForLog.replace(filterQueriesForLog.length() - 3, filterQueriesForLog.length(), "");
            } else {
                filterQueriesForLog.append("No Filter Queries");
            }
        }

        log.info("query.getQuery():{ " + query.getQuery() + " }" + ", query.getSortFields():{ "
                + (query.getSortFields() == null ? null : Arrays.asList(query.getSortFields())) + " }"
                + ", query.getFilterQueries():{ " + filterQueriesForLog.toString() + " }" + ", found:"
                + documentList.getNumFound() + ", start:" + documentList.getStart() + ", max_score:"
                + documentList.getMaxScore() + ", QTime:" + queryResponse.getQTime() + "ms");

        // TODO: implement spell-checking in a meaningful manner.  This loop exists only to generate log output.
        // TODO: Add "spellcheckAlternatives" or something like it to the SearchHits class so it can be displayed to the user like Google's "did you mean..."
        // TODO: Turn off spellchecking for the "author" field.
        if (queryResponse.getSpellCheckResponse() != null
                && queryResponse.getSpellCheckResponse().getSuggestionMap() != null
                && queryResponse.getSpellCheckResponse().getSuggestionMap().keySet().size() > 0) {
            StringBuilder sb = new StringBuilder("Spellcheck alternative suggestions:");
            for (String token : queryResponse.getSpellCheckResponse().getSuggestionMap().keySet()) {
                sb.append(" { ").append(token).append(" : ");
                if (queryResponse.getSpellCheckResponse().getSuggestionMap().get(token).getAlternatives()
                        .size() < 1) {
                    sb.append("NO ALTERNATIVES");
                } else {
                    for (String alternative : queryResponse.getSpellCheckResponse().getSuggestionMap()
                            .get(token).getAlternatives()) {
                        sb.append(alternative).append(", ");
                    }
                    sb.replace(sb.length() - 2, sb.length(), ""); // Remove last comma and space.
                }
                sb.append(" } ,");
            }
            log.info(sb.replace(sb.length() - 2, sb.length(), "").toString()); // Remove last comma and space.
        } else {
            log.info("Solr thinks everything in the query is spelled correctly.");
        }
    }

    List<SearchHit> searchResults = new ArrayList<SearchHit>();
    for (SolrDocument document : documentList) {

        String id = SolrServiceUtil.getFieldValue(document, "id", String.class, query.toString());
        String message = id == null ? query.toString() : id;
        Float score = SolrServiceUtil.getFieldValue(document, "score", Float.class, message);
        String title = SolrServiceUtil.getFieldValue(document, "title_display", String.class, message);
        Date publicationDate = SolrServiceUtil.getFieldValue(document, "publication_date", Date.class, message);
        String eissn = SolrServiceUtil.getFieldValue(document, "eissn", String.class, message);
        String journal = SolrServiceUtil.getFieldValue(document, "journal", String.class, message);
        String articleType = SolrServiceUtil.getFieldValue(document, "article_type", String.class, message);
        String strikingImage = SolrServiceUtil.getFieldValue(document, "striking_image", String.class, message);
        List<String> abstractText = SolrServiceUtil.getFieldMultiValue(document, "abstract", String.class,
                message);
        List<String> abstractPrimary = SolrServiceUtil.getFieldMultiValue(document, "abstract_primary_display",
                String.class, message);
        List<String> authorList = SolrServiceUtil.getFieldMultiValue(document, "author_display", String.class,
                message);
        // TODO create a dedicated field for checking the existence of assets for a given article.
        List<String> figureTableCaptions = SolrServiceUtil.getFieldMultiValue(document, "figure_table_caption",
                String.class, message);
        List<String> subjects = SolrServiceUtil.getFieldMultiValue(document, "subject", String.class, message);
        List<String> expressionOfconcern = SolrServiceUtil.getFieldMultiValue(document, "expression_of_concern",
                String.class, message);
        String retraction = SolrServiceUtil.getFieldValue(document, "retraction", String.class, message);
        String abstractResult = "";

        //Use the primary abstract if it exists
        if (abstractPrimary.size() > 0) {
            abstractResult = StringUtils.join(abstractPrimary, ", ");
        } else {
            if (abstractText.size() > 0) {
                abstractResult = StringUtils.join(abstractText, ", ");
            }
        }

        //Flatten the list of subjects to a unique set
        Set<String> flattenedSubjects = new HashSet<String>();
        for (String subject : subjects) {
            for (String temp : subject.split("/")) {
                if (temp.trim().length() > 0) {
                    flattenedSubjects.add(temp);
                }
            }
        }

        SearchHit hit = SearchHit.builder().setHitScore(score).setUri(id).setTitle(title)
                .setListOfCreators(authorList).setDate(publicationDate).setIssn(eissn).setJournalTitle(journal)
                .setArticleTypeForDisplay(articleType).setAbstractText(abstractResult)
                .setStrikingImage(strikingImage).setHasAssets(figureTableCaptions.size() > 0)
                .setSubjects(flattenedSubjects).setSubjectsPolyhierarchy(subjects)
                .setExpressionOfConcern(expressionOfconcern).setRetraction(retraction).build();

        if (log.isDebugEnabled())
            log.debug(hit.toString());

        searchResults.add(hit);
    }

    //here we assume that number of hits is always going to be withing range of int
    SearchResultSinglePage results = new SearchResultSinglePage((int) documentList.getNumFound(), -1,
            searchResults, query.getQuery());

    if (queryResponse.getFacetField("subject_facet") != null) {
        List<Map> subjects = facetCountsToHashMap(queryResponse.getFacetField("subject_facet"));

        if (subjects != null) {
            List<Map> subjectResult = new ArrayList<Map>();
            SortedMap<String, Long> topSubjects = null;

            try {
                topSubjects = getTopSubjects();
            } catch (ApplicationException ex) {
                throw new RuntimeException(ex.getMessage(), ex);
            }

            //Remove top level 1 subjects from list, FEND-805
            for (Map<String, Object> m : subjects) {
                if (!topSubjects.containsKey(m.get("name"))) {
                    HashMap<String, Object> hm = new HashMap<String, Object>();
                    hm.put("name", m.get("name"));
                    hm.put("count", m.get("count"));
                    subjectResult.add(hm);
                }
            }

            results.setSubjectFacet(subjectResult);
        } else {
            results.setSubjectFacet(null);
        }
    }

    if (queryResponse.getFacetField("author_facet") != null) {
        results.setAuthorFacet(facetCountsToHashMap(queryResponse.getFacetField("author_facet")));
    }

    if (queryResponse.getFacetField("editor_facet") != null) {
        results.setEditorFacet(facetCountsToHashMap(queryResponse.getFacetField("editor_facet")));
    }

    if (queryResponse.getFacetField("article_type_facet") != null) {
        results.setArticleTypeFacet(facetCountsToHashMap(queryResponse.getFacetField("article_type_facet")));
    }

    if (queryResponse.getFacetField("affiliate_facet") != null) {
        results.setInstitutionFacet(facetCountsToHashMap(queryResponse.getFacetField("affiliate_facet")));
    }

    if (queryResponse.getFacetField("cross_published_journal_key") != null) {
        results.setJournalFacet(
                facetCountsToHashMap(queryResponse.getFacetField("cross_published_journal_key")));
    }

    return results;
}

From source file:org.codelibs.elasticsearch.solr.solr.XMLWriter.java

License:Apache License

public final void writeSolrDocumentList(final String name, final SolrDocumentList docs,
        final Set<String> fields) throws IOException {
    writeDocuments(name, new DocumentListInfo() {
        @Override/*  w w w .j a  v a  2  s . c o m*/
        public int getCount() {
            return docs.size();
        }

        @Override
        public Float getMaxScore() {
            return docs.getMaxScore();
        }

        @Override
        public long getNumFound() {
            return docs.getNumFound();
        }

        @Override
        public long getStart() {
            return docs.getStart();
        }

        @Override
        public void writeDocs(final boolean includeScore, final Set<String> fields) throws IOException {
            for (final SolrDocument doc : docs) {
                XMLWriter.this.writeDoc(null, doc, fields, includeScore);
            }
        }
    }, fields);
}

From source file:org.craftercms.search.service.impl.SolrSearchService.java

License:Open Source License

@SuppressWarnings("unchecked")
protected Object toSerializableValue(Object namedListValue) {
    if (namedListValue instanceof NamedList) {
        // The value can also be a NamedList, so convert it to map.
        return toMap((NamedList<Object>) namedListValue);
    } else if (namedListValue instanceof SolrDocumentList) {
        // If the value is a SolrDocumentList, copy the list attributes to a map
        SolrDocumentList docList = (SolrDocumentList) namedListValue;
        Map<String, Object> docListMap = new HashMap<String, Object>(4);

        docListMap.put(DOCUMENT_LIST_START_PROPERTY_NAME, docList.getStart());
        docListMap.put(DOCUMENT_LIST_NUM_FOUND_PROPERTY_NAME, docList.getNumFound());
        docListMap.put(DOCUMENT_LIST_MAX_SCORE_PROPERTY_NAME, docList.getMaxScore());
        docListMap.put(DOCUMENT_LIST_DOCUMENTS_PROPERTY_NAME, extractDocs(docList));

        return docListMap;
    } else {//from ww  w.  jav  a  2  s. c  o m
        return namedListValue;
    }
}

From source file:org.intermine.api.searchengine.solr.SolrKeywordSearchHandler.java

License:GNU General Public License

@Override
public KeywordSearchResults doKeywordSearch(InterMineAPI im, String queryString,
        Map<String, String> facetValues, List<Integer> ids, int offSet) {

    KeywordSearchPropertiesManager keywordSearchPropertiesManager = KeywordSearchPropertiesManager
            .getInstance(im.getObjectStore());
    Vector<KeywordSearchFacetData> facets = keywordSearchPropertiesManager.getFacets();

    QueryResponse resp = performSearch(im, queryString, facetValues, ids, offSet,
            keywordSearchPropertiesManager.PER_PAGE);

    SolrDocumentList results = resp.getResults();

    Set<Integer> objectIds = getObjectIds(results);

    Map<Integer, InterMineObject> objMap = null;

    try {/*from  w w w  .jav a  2  s. c  om*/
        objMap = Objects.getObjects(im, objectIds);

    } catch (ObjectStoreException e) {
        LOG.error("ObjectStoreException for query term : " + queryString, e);
    }

    Vector<KeywordSearchResultContainer> searchHits = getSearchHits(results, objMap, results.getMaxScore());

    Collection<KeywordSearchFacet> searchResultsFacets = parseFacets(resp, facets, facetValues);

    return new KeywordSearchResults(searchHits, searchResultsFacets, (int) results.getNumFound());

}

From source file:org.opensextant.extraction.SolrMatcherSupport.java

License:Apache License

/**
 * Solr call: tag input buffer, returning all candiate reference data that
 * matched during tagging.//  w w  w . ja v a 2 s.com
 *
 * @param buffer text to tag
 * @param docid  id for text, only for tracking purposes
 * @param refDataMap
 *            - a map of reference data in solr, It will store caller's
 *            domain objects. e.g., rec.id =&gt; domain(rec)
 * @return solr response
 * @throws ExtractionException tagger error
 */
protected QueryResponse tagTextCallSolrTagger(String buffer, String docid,
        final Map<Integer, Object> refDataMap) throws ExtractionException {
    SolrTaggerRequest tagRequest = new SolrTaggerRequest(getMatcherParameters(), buffer);
    tagRequest.setPath(requestHandler);
    // Stream the response to avoid serialization and to save memory by
    // only keeping one SolrDocument materialized at a time
    tagRequest.setStreamingResponseCallback(new StreamingResponseCallback() {
        @Override
        public void streamDocListInfo(long numFound, long start, Float maxScore) {
        }

        // Future optimization: it would be nice if Solr could give us the
        // doc id without giving us a SolrDocument, allowing us to
        // conditionally get it. It would save disk IO & speed, at the
        // expense of putting ids into memory.
        @Override
        public void streamSolrDocument(final SolrDocument solrDoc) {
            Integer id = (Integer) solrDoc.getFirstValue("id");
            // create a domain object for the given tag;
            // this callback handler caches such domain obj in simple k/v
            // map.
            Object domainObj = createTag(solrDoc);
            if (domainObj != null) {
                refDataMap.put(id, domainObj);
            }
        }
    });

    QueryResponse response;
    try {
        response = tagRequest.process(solr.getInternalSolrServer());
    } catch (Exception err) {
        throw new ExtractionException("Failed to tag document=" + docid, err);
    }

    // see https://issues.apache.org/jira/browse/SOLR-5154
    SolrDocumentList docList = response.getResults();
    if (docList != null) {
        // log.debug("Not streaming docs from Solr (not supported)");
        StreamingResponseCallback callback = tagRequest.getStreamingResponseCallback();
        callback.streamDocListInfo(docList.getNumFound(), docList.getStart(), docList.getMaxScore());
        for (SolrDocument solrDoc : docList) {
            /**
             * This appears to be an empty list; what is this explicit
             * callback loop for?
             */
            callback.streamSolrDocument(solrDoc);
        }
    }

    return response;
}