Example usage for org.apache.solr.common SolrDocumentList SolrDocumentList

List of usage examples for org.apache.solr.common SolrDocumentList SolrDocumentList

Introduction

In this page you can find the example usage for org.apache.solr.common SolrDocumentList SolrDocumentList.

Prototype

SolrDocumentList

Source Link

Usage

From source file:de.qaware.chronix.solr.query.analysis.AnalysisHandler.java

License:Apache License

@Override
public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
    LOGGER.debug("Handling analysis request {}", req);
    //First check if the request should return documents => rows > 0
    SolrParams params = req.getParams();
    String rowsParam = params.get(CommonParams.ROWS, null);
    int rows = -1;
    if (rowsParam != null) {
        rows = Integer.parseInt(rowsParam);
    }//from  ww  w  .  jav  a2 s.c om

    SolrDocumentList results = new SolrDocumentList();
    String[] filterQueries = req.getParams().getParams(CommonParams.FQ);

    //Do a query and collect them on the join function
    Map<String, List<SolrDocument>> collectedDocs = findDocuments(req,
            JoinFunctionEvaluator.joinFunction(filterQueries));

    //If now rows should returned, we only return the num found
    if (rows == 0) {
        results.setNumFound(collectedDocs.keySet().size());
    } else {
        //Otherwise return the aggregated time series
        long queryStart = Long.parseLong(params.get(ChronixQueryParams.QUERY_START_LONG));
        long queryEnd = Long.parseLong(params.get(ChronixQueryParams.QUERY_END_LONG));

        //We have an analysis query
        List<SolrDocument> aggregatedDocs = analyze(collectedDocs,
                AnalysisQueryEvaluator.buildAnalysis(filterQueries), queryStart, queryEnd);

        results.addAll(aggregatedDocs);
        results.setNumFound(aggregatedDocs.size());
    }
    rsp.add("response", results);
    LOGGER.debug("Sending response {}",
            rsp.getToLogAsString(String.join("-", filterQueries == null ? "" : "")) + "/");

}

From source file:edu.cmu.lti.oaqa.openqa.hellobioqa.retrieval.team16.HeuristicSolrRetrievalStrategist.java

License:Apache License

/**
 * retrieve Documnet will do /* w  ww .ja v  a 2 s  .c om*/
 * 1. synonym expansion
 * 2. gene expansion
 * 3. increase recall by changing AND to OR one by one.
 * 
 * It accumulates the results from the above 3 steps.
 * 
 * @param query
 * @return
 */
private List<RetrievalResult> retrieveDocuments(String query) {
    String originalQuery = query;
    String newQuery = query;
    List<RetrievalResult> result = new ArrayList<RetrievalResult>();
    try {
        SolrDocumentList docs = runQuery(newQuery, hitListSize);
        int temp = 0;
        NormalSynonymProvider syn = new NormalSynonymProvider();
        while (docs.size() < this.minimumResult && temp < keyterms.size() - 1) {
            // do synonym expansion 
            newQuery = syn.reformWithSynonym(this.keyterms, originalQuery);
            SolrDocumentList tempDocs = runQuery(newQuery, hitListSize);
            SolrDocumentList duplicate = new SolrDocumentList();
            if (tempDocs != null) {
                for (SolrDocument sall : docs) {
                    for (SolrDocument stemp : tempDocs) {
                        if (stemp.getFieldValue("id").equals(sall.getFieldValue("id"))) {
                            duplicate.add(stemp);
                        }
                    }
                }
                docs.addAll(tempDocs);
                docs.removeAll(duplicate);
            }
            temp++;
        }
        // reset
        newQuery = originalQuery;
        temp = 0;
        // do gene expansion
        GeneSynonymGenerator geneGen = new GeneSynonymGenerator();
        while (docs.size() < this.minimumResult && temp < keyterms.size() - 1) {
            newQuery = geneGen.generalizeGene(this.keyterms, originalQuery);
            SolrDocumentList tempDocs = runQuery(newQuery, hitListSize);
            SolrDocumentList duplicate = new SolrDocumentList();
            if (tempDocs != null) {
                for (SolrDocument sall : docs) {
                    for (SolrDocument stemp : tempDocs) {
                        if (stemp.getFieldValue("id").equals(sall.getFieldValue("id"))) {
                            duplicate.add(stemp);
                        }
                    }
                }
                docs.addAll(tempDocs);
                docs.removeAll(duplicate);
            }
            temp++;
        }

        // do the expansion and synonym
        temp = 0;
        syn = new NormalSynonymProvider();
        newQuery = originalQuery;
        while (temp < keyterms.size() - 1) {
            newQuery = syn.reformWithSynonymForOR(this.keyterms, newQuery);
            temp++;
        }
        temp = 0;
        geneGen = new GeneSynonymGenerator();
        while (temp < keyterms.size() - 1) {
            newQuery = geneGen.generalizeGeneForOR(this.keyterms, newQuery);
            temp++;
        }

        temp = 0;
        while (docs.size() < this.minimumResult && temp < keyterms.size() - 1) {
            // do AND -> OR replace
            newQuery = OperatorSpecialist.changeOperator(newQuery);
            SolrDocumentList tempDocs = runQuery(newQuery, hitListSize);
            SolrDocumentList duplicate = new SolrDocumentList();
            if (tempDocs != null) {
                for (SolrDocument sall : docs) {
                    for (SolrDocument stemp : tempDocs) {
                        if (stemp.getFieldValue("id").equals(sall.getFieldValue("id"))) {
                            duplicate.add(stemp);
                        }
                    }
                }
                docs.addAll(tempDocs);
                docs.removeAll(duplicate);
            }
            temp++;
        }

        // add the result into result set
        for (SolrDocument doc : docs) {
            RetrievalResult r = new RetrievalResult((String) doc.getFieldValue("id"),
                    (Float) doc.getFieldValue("score"), query);
            boolean duplicate = false;
            for (RetrievalResult rr : result) {
                if (rr.getDocID() == r.getDocID()) {
                    duplicate = true;
                    break;
                }
            }
            if (!duplicate) {
                result.add(r);
                System.out.println(doc.getFieldValue("id"));
            }
        }
    } catch (Exception e) {
        e.printStackTrace();
        System.err.println("Error retrieving documents from Solr: " + e);
    }
    return result;
}

From source file:edu.cornell.mannlib.vitro.webapp.searchengine.solr.SolrSearchResultDocumentList.java

License:Open Source License

public SolrSearchResultDocumentList(SolrDocumentList solrDocs) {
    if (solrDocs == null) {
        SolrDocumentList list = new SolrDocumentList();
        list.setStart(0L);/*from w w w .  j ava  2s  .c om*/
        list.setNumFound(0L);
        list.setMaxScore(0.0F);
        this.solrDocs = list;
    } else {
        this.solrDocs = solrDocs;
    }
}

From source file:fr.mcc.ginco.tests.solr.SearcherServiceUtilTest.java

License:CeCILL license

@Test
public void testGetSearchResultList() {
    SolrDocumentList fakeDocList = new SolrDocumentList();
    SolrDocument fakeDoc = new SolrDocument();
    fakeDoc.addField(SolrField.IDENTIFIER, "id1");
    fakeDoc.addField(SolrField.LEXICALVALUE, "lex1");
    fakeDoc.addField(SolrField.THESAURUSID, "th1");
    fakeDoc.addField(SolrField.THESAURUSTITLE, "title1");
    fakeDoc.addField(SolrField.TYPE, ThesaurusTerm.class.getSimpleName());
    fakeDoc.addField(SolrField.EXT_TYPE, ExtEntityType.TERM_NON_PREF);
    fakeDoc.addField(SolrField.MODIFIED, DateUtil.dateFromString("2013-11-21 18:19:47"));
    fakeDoc.addField(SolrField.CREATED, DateUtil.dateFromString("2013-11-21 15:51:00"));
    fakeDoc.addField(SolrField.STATUS, 0);
    fakeDoc.addField(SolrField.LANGUAGE, "lang1");
    fakeDocList.add(fakeDoc);//from   w w w. j a v a2 s  . c o m
    fakeDocList.setNumFound(1);

    SearchResultList searchResultList = searcherServiceUtil.getSearchResultList(fakeDocList);
    Assert.assertEquals(1, searchResultList.getNumFound());

    SearchResult searchResult = searchResultList.get(0);
    Assert.assertEquals(searchResult.getIdentifier(), "id1");
    Assert.assertEquals(searchResult.getLexicalValue(), "lex1");
    Assert.assertEquals(searchResult.getThesaurusId(), "th1");
    Assert.assertEquals(searchResult.getThesaurusTitle(), "title1");
    Assert.assertEquals(searchResult.getType(), ThesaurusTerm.class.getSimpleName());
    Assert.assertEquals(searchResult.getTypeExt(), String.valueOf(ExtEntityType.TERM_NON_PREF));
    Assert.assertEquals(searchResult.getModified(), "2013-11-21 18:19:47");
    Assert.assertEquals(searchResult.getCreated(), "2013-11-21 15:51:00");
    Assert.assertEquals(searchResult.getStatus(), Integer.valueOf(0));
    Assert.assertEquals(searchResult.getLanguages().get(0), "lang1");

}

From source file:geocluster.GeoclusterComponent.java

License:Apache License

private SolrDocumentList finalizeClusters(Map<String, SolrDocument> clusterMap) {
    SolrDocumentList resultClusters = new SolrDocumentList();
    for (Entry<String, SolrDocument> clusterEntry : clusterMap.entrySet()) {
        String geohashPrefix = clusterEntry.getKey();
        if (geohashPrefix == null) {
            continue;
        }//from w  ww  .j  a va  2  s  . c  o m
        SolrDocument cluster = clusterEntry.getValue();
        this.finishCluster(cluster, geohashPrefix);
        resultClusters.add(cluster);
    }
    return resultClusters;
}

From source file:info.papyri.dispatch.atom.AtomFeedServlet.java

License:Creative Commons License

protected void processRequest(HttpServletRequest request, HttpServletResponse response)
        throws ServletException, IOException {

    request.setCharacterEncoding("UTF-8");
    response.setContentType("xml");
    ServletOutputStream out = response.getOutputStream();
    int page = pullOutPageNumber(request);
    EnumMap<TimeParam, String> dateParams = pullOutDateParams(request);
    SearchType typeFlag = pullOutTypeFlag(request);
    SolrDocumentList results = new SolrDocumentList();
    try {/*from   w w w.ja  v a2 s  .  co  m*/

        SolrQuery sq = buildSolrQuery(dateParams, page, typeFlag);
        results = queryServer(sq);

    } catch (ParseException pe) {

        results = buildErrorDocumentList("Parse error in submitted dates " + request.getQueryString());

    }
    if (results.size() == 0)
        results = buildNoResultsDocumentList(buildErrorMsg(dateParams));
    Feed emptyFeed = initFeed(results, typeFlag);
    paginateFeed(emptyFeed, dateParams, page, results);
    ArrayList<EmendationRecord> emendationRecords = buildEmendationRecords(results, typeFlag);
    addEntries(emptyFeed, emendationRecords);
    Writer writer = abdera.getWriterFactory().getWriterByMediaType("application/atom+xml");
    emptyFeed.writeTo(writer, out);

}

From source file:info.papyri.dispatch.atom.AtomFeedServlet.java

License:Creative Commons License

/**
 * Builds an atom:entry element to display information to the user in the event that
 * an exception is thrown./*from w ww.  ja va  2  s  .com*/
 * 
 * @param msg
 * @return 
 */

SolrDocumentList buildErrorDocumentList(String msg) {

    SolrDocumentList sdl = new SolrDocumentList();

    SolrDocument doc = new SolrDocument();
    doc.addField(SolrField.id.name(), SELF + "error");
    doc.addField(SolrField.title.name(), "There has been an error in processing your request");
    doc.addField(SolrField.metadata.name(), msg);
    doc.addField(SolrField.edit_date.name(), new Date());
    sdl.add(doc);

    return sdl;
}

From source file:info.papyri.dispatch.atom.AtomFeedServlet.java

License:Creative Commons License

/**
 * Builds an atom:entry element to display information to the user in the event that
 * no results and no error condition are returned from the server.
 * /*w  w w .ja v a2  s.c  o  m*/
 * 
 * @param msg
 * @return 
 */

SolrDocumentList buildNoResultsDocumentList(String msg) {

    SolrDocumentList sdl = new SolrDocumentList();

    SolrDocument doc = new SolrDocument();
    doc.addField(SolrField.id.name(), SELF + "none");
    doc.addField(SolrField.title.name(), "No results returned");
    doc.addField(SolrField.metadata.name(), msg);
    doc.addField(SolrField.edit_date.name(), new Date());
    sdl.add(doc);

    return sdl;
}

From source file:net.yacy.cora.federate.solr.connector.CachedSolrConnector.java

License:Open Source License

/**
 * get a query result from solr//w  ww.ja v a2 s  . co m
 * to get all results set the query String to "*:*"
 * @param querystring
 * @throws IOException
 */
@Override
public SolrDocumentList getDocumentListByQuery(final String querystring, final String sort, final int offset,
        final int count, final String... fields) throws IOException {
    if (offset == 0 && count == 1 && querystring.startsWith("id:")
            && ((querystring.length() == 17 && querystring.charAt(3) == '"' && querystring.charAt(16) == '"')
                    || querystring.length() == 15)) {
        final SolrDocumentList list = new SolrDocumentList();
        SolrDocument doc = getDocumentById(
                querystring.charAt(3) == '"' ? querystring.substring(4, querystring.length() - 1)
                        : querystring.substring(3),
                fields);
        list.add(doc);
        // no addToCache(list) here because that was already handlet in get();
        return list;
    }
    if (this.solr != null) {
        SolrDocumentList list = this.solr.getDocumentListByQuery(querystring, sort, offset, count, fields);
        addToCache(list, fields.length == 0);
        return list;
    }

    // combine both lists
    SolrDocumentList list;
    list = this.solr.getDocumentListByQuery(querystring, sort, offset, count, fields);

    // add caching
    addToCache(list, fields.length == 0);
    return list;
}

From source file:net.yacy.cora.federate.solr.connector.ConcurrentUpdateSolrConnector.java

License:Open Source License

@Override
public SolrDocumentList getDocumentListByQuery(String querystring, String sort, int offset, int count,
        String... fields) throws IOException, SolrException {
    commitDocBuffer();/*  ww  w .  ja  va2 s.c o  m*/
    if (offset == 0 && count == 1 && querystring.startsWith("id:")
            && ((querystring.length() == 17 && querystring.charAt(3) == '"' && querystring.charAt(16) == '"')
                    || querystring.length() == 15)) {
        final SolrDocumentList list = new SolrDocumentList();
        SolrDocument doc = getDocumentById(
                querystring.charAt(3) == '"' ? querystring.substring(4, querystring.length() - 1)
                        : querystring.substring(3),
                fields);
        list.add(doc);
        return list;
    }

    SolrDocumentList sdl = this.connector.getDocumentListByQuery(querystring, sort, offset, count,
            AbstractSolrConnector.ensureEssentialFieldsIncluded(fields));
    return sdl;
}