Example usage for org.apache.solr.common SolrDocumentList getNumFound

List of usage examples for org.apache.solr.common SolrDocumentList getNumFound

Introduction

In this page you can find the example usage for org.apache.solr.common SolrDocumentList getNumFound.

Prototype

public long getNumFound() 

Source Link

Usage

From source file:org.mousephenotype.cda.solr.service.AnatomyService.java

License:Apache License

public AnatomogramDataBean getUberonIdAndTopLevelMaTerm(AnatomogramDataBean bean)
        throws SolrServerException, IOException {
    SolrQuery solrQuery = new SolrQuery();
    solrQuery.setQuery(AnatomyDTO.ANATOMY_ID + ":\"" + bean.getMaId() + "\"");
    solrQuery.setFields(AnatomyDTO.UBERON_ID, AnatomyDTO.ALL_AE_MAPPED_UBERON_ID,
            AnatomyDTO.SELECTED_TOP_LEVEL_ANATOMY_ID, AnatomyDTO.SELECTED_TOP_LEVEL_ANATOMY_TERM);

    QueryResponse rsp = anatomyCore.query(solrQuery);
    SolrDocumentList res = rsp.getResults();

    ArrayList<String> uberonIds = new ArrayList<String>();
    Set<String> mappedEfoIds = new HashSet<>();
    Set<String> mappedUberonIds = new HashSet<>();

    if (res.getNumFound() > 1) {
        System.err.println("Warning - more than 1 anatomy term found where we only expect one doc!");
    }//ww  w.j ava  2s .c  om

    for (SolrDocument doc : res) {
        if (doc.containsKey(AnatomyDTO.UBERON_ID)) {
            for (Object child : doc.getFieldValues(AnatomyDTO.UBERON_ID)) {
                mappedUberonIds.add((String) child);
            }
            bean.setMappedUberonIdsForAnatomogram(new ArrayList(mappedUberonIds));
        }
        if (doc.containsKey(AnatomyDTO.EFO_ID)) {
            for (Object child : doc.getFieldValues(AnatomyDTO.EFO_ID)) {
                mappedEfoIds.add((String) child);
            }
            bean.setMappedUberonIdsForAnatomogram(new ArrayList(mappedEfoIds));
        }

        if (doc.containsKey(AnatomyDTO.ALL_AE_MAPPED_UBERON_ID)) {
            for (Object child : doc.getFieldValues(AnatomyDTO.ALL_AE_MAPPED_UBERON_ID)) {
                uberonIds.add((String) child);
            }
            bean.setUberonIds(uberonIds);
        }

        if (doc.containsKey(AnatomyDTO.SELECTED_TOP_LEVEL_ANATOMY_ID)) {
            List<String> selectedTopLevelAnas = (List<String>) doc
                    .get(AnatomyDTO.SELECTED_TOP_LEVEL_ANATOMY_ID);
            bean.addTopLevelMaIds(selectedTopLevelAnas);
        }
        if (doc.containsKey(AnatomyDTO.SELECTED_TOP_LEVEL_ANATOMY_TERM)) {
            List<String> selectedTopLevelMaTerms = (List<String>) doc
                    .get(AnatomyDTO.SELECTED_TOP_LEVEL_ANATOMY_TERM);
            bean.addTopLevelMaNames(selectedTopLevelMaTerms);
        }

        if (doc.containsKey(AnatomyDTO.ALL_AE_MAPPED_EFO_ID)) {
            List<String> efoIds = (List<String>) doc.get(AnatomyDTO.ALL_AE_MAPPED_EFO_ID);
            bean.addEfoIds(efoIds);
        }

    }
    return bean;
}

From source file:org.mousephenotype.cda.solr.service.ObservationService.java

License:Apache License

public Map<String, List<DiscreteTimePoint>> getTimeSeriesMutantData(String parameter, List<String> genes,
        List<String> strains, String[] center, String[] sex) throws SolrServerException, IOException {

    Map<String, List<DiscreteTimePoint>> finalRes = new HashMap<String, List<DiscreteTimePoint>>(); // <allele_accession,
    // timeSeriesData>

    SolrQuery query = new SolrQuery().addFilterQuery(ObservationDTO.BIOLOGICAL_SAMPLE_GROUP + ":experimental")
            .addFilterQuery(ObservationDTO.PARAMETER_STABLE_ID + ":" + parameter);

    String q = (strains.size() > 1) ? "(" + ObservationDTO.STRAIN_ACCESSION_ID + ":\""
            + StringUtils.join(strains.toArray(), "\" OR " + ObservationDTO.STRAIN_ACCESSION_ID + ":\"") + "\")"
            : ObservationDTO.STRAIN_ACCESSION_ID + ":\"" + strains.get(0) + "\"";

    if (genes != null && genes.size() > 0) {
        q += " AND (";
        q += (genes.size() > 1) ? ObservationDTO.GENE_ACCESSION_ID + ":\""
                + StringUtils.join(genes.toArray(), "\" OR " + ObservationDTO.GENE_ACCESSION_ID + ":\"") + "\""
                : ObservationDTO.GENE_ACCESSION_ID + ":\"" + genes.get(0) + "\"";
        q += ")";
    }//from   w  ww  .j  a  va 2  s  . co  m

    if (center != null && center.length > 0) {
        q += " AND (";
        q += (center.length > 1)
                ? ObservationDTO.PHENOTYPING_CENTER + ":\""
                        + StringUtils.join(center, "\" OR " + ObservationDTO.PHENOTYPING_CENTER + ":\"") + "\""
                : ObservationDTO.PHENOTYPING_CENTER + ":\"" + center[0] + "\"";
        q += ")";
    }

    if (sex != null && sex.length == 1) {
        q += " AND " + ObservationDTO.SEX + ":\"" + sex[0] + "\"";
    }

    query.setQuery(q);
    query.set("group.field", ObservationDTO.GENE_SYMBOL);
    query.set("group", true);
    query.set("fl", ObservationDTO.DATA_POINT + "," + ObservationDTO.DISCRETE_POINT);
    query.set("group.limit", 100000); // number of documents to be returned
    // per group
    query.set("group.sort", ObservationDTO.DISCRETE_POINT + " asc");
    query.setRows(10000);

    // logger.info("+_+_+ " + SolrUtils.getBaseURL(experimentCore) + "/select?" +
    // query);
    List<Group> groups = experimentCore.query(query).getGroupResponse().getValues().get(0).getValues();
    // for mutants it doesn't seem we need binning
    // groups are the alleles
    for (Group gr : groups) {
        SolrDocumentList resDocs = gr.getResult();
        DescriptiveStatistics stats = new DescriptiveStatistics();
        float discreteTime = (float) resDocs.get(0).getFieldValue(ObservationDTO.DISCRETE_POINT);
        List<DiscreteTimePoint> res = new ArrayList<DiscreteTimePoint>();
        for (int i = 0; i < resDocs.getNumFound(); i++) {
            SolrDocument doc = resDocs.get(i);
            stats.addValue((float) doc.getFieldValue(ObservationDTO.DATA_POINT));
            if (discreteTime != (float) doc.getFieldValue(ObservationDTO.DISCRETE_POINT)
                    || i == resDocs.getNumFound() - 1) { // we
                // are
                // at
                // the
                // end
                // of
                // the
                // document
                // list
                // add to list
                float discreteDataPoint = (float) stats.getMean();
                DiscreteTimePoint dp = new DiscreteTimePoint(discreteTime, discreteDataPoint,
                        new Float(stats.getStandardDeviation()));
                List<Float> errorPair = new ArrayList<>();
                Float lower = new Float(discreteDataPoint);
                Float higher = new Float(discreteDataPoint);
                errorPair.add(lower);
                errorPair.add(higher);
                dp.setErrorPair(errorPair);
                res.add(dp);
                // update discrete point
                discreteTime = Float.valueOf(doc.getFieldValue(ObservationDTO.DISCRETE_POINT).toString());
                // update stats
                stats = new DescriptiveStatistics();
            }
        }
        // add list
        finalRes.put(gr.getGroupValue(), res);
    }
    return finalRes;
}

From source file:org.mousephenotype.cda.solr.service.ObservationService.java

License:Apache License

public List<DiscreteTimePoint> getTimeSeriesControlData(String parameter, List<String> strains, String[] center,
        String[] sex) throws SolrServerException, IOException {

    List<DiscreteTimePoint> res = new ArrayList<DiscreteTimePoint>();
    SolrQuery query = new SolrQuery().addFilterQuery(ObservationDTO.BIOLOGICAL_SAMPLE_GROUP + ":control")
            .addFilterQuery(ObservationDTO.PARAMETER_STABLE_ID + ":" + parameter);
    String q = (strains.size() > 1) ? "(" + ObservationDTO.STRAIN_ACCESSION_ID + ":\""
            + StringUtils.join(strains.toArray(), "\" OR " + ObservationDTO.STRAIN_ACCESSION_ID + ":\"") + "\")"
            : ObservationDTO.STRAIN_ACCESSION_ID + ":\"" + strains.get(0) + "\"";

    if (center != null && center.length > 0) {
        q += " AND (";
        q += (center.length > 1)/*from w  w  w. j  av a 2s  . co m*/
                ? ObservationDTO.PHENOTYPING_CENTER + ":\""
                        + StringUtils.join(center, "\" OR " + ObservationDTO.PHENOTYPING_CENTER + ":\"") + "\""
                : ObservationDTO.PHENOTYPING_CENTER + ":\"" + center[0] + "\"";
        q += ")";
    }

    if (sex != null && sex.length == 1) {
        q += " AND " + ObservationDTO.SEX + ":\"" + sex[0] + "\"";
    }

    query.setQuery(q);
    query.set("group.field", ObservationDTO.DISCRETE_POINT);
    query.set("group", true);
    query.set("fl", ObservationDTO.DATA_POINT + "," + ObservationDTO.DISCRETE_POINT);
    query.set("group.limit", 100000); // number of documents to be returned
    // per group
    query.set("sort", ObservationDTO.DISCRETE_POINT + " asc");
    query.setRows(10000);

    // logger.info("+_+_+ " + SolrUtils.getBaseURL(solr) + "/select?" +
    // query);
    List<Group> groups = experimentCore.query(query).getGroupResponse().getValues().get(0).getValues();
    boolean rounding = false;
    // decide if binning is needed i.e. is the increment points are too
    // scattered, as for calorimetry
    if (groups.size() > 30) { // arbitrary value, just piced it because it
        // seems reasonable for the size of our
        // graphs
        if (Float.valueOf(groups.get(groups.size() - 1).getGroupValue())
                - Float.valueOf(groups.get(0).getGroupValue()) <= 30) { // then
            // rounding
            // will
            // be
            // enough
            rounding = true;
        }
    }
    if (rounding) {
        int bin = Math.round(Float.valueOf(groups.get(0).getGroupValue()));
        for (Group gr : groups) {
            int discreteTime = Math.round(Float.valueOf(gr.getGroupValue()));
            // for calormetry ignore what's before -5 and after 16
            if (parameter.startsWith("IMPC_CAL") || parameter.startsWith("ESLIM_003_001")
                    || parameter.startsWith("M-G-P_003_001")) {
                if (discreteTime < -5) {
                    continue;
                } else if (discreteTime > 16) {
                    break;
                }
            }
            float sum = 0;
            SolrDocumentList resDocs = gr.getResult();
            DescriptiveStatistics stats = new DescriptiveStatistics();
            for (SolrDocument doc : resDocs) {
                sum += (float) doc.getFieldValue(ObservationDTO.DATA_POINT);
                stats.addValue((float) doc.getFieldValue(ObservationDTO.DATA_POINT));
            }
            if (bin < discreteTime || groups.indexOf(gr) == groups.size() - 1) { // finished
                // the
                // groups
                // of
                // filled
                // the
                // bin
                float discreteDataPoint = sum / resDocs.getNumFound();
                DiscreteTimePoint dp = new DiscreteTimePoint((float) discreteTime, discreteDataPoint,
                        new Float(stats.getStandardDeviation()));
                List<Float> errorPair = new ArrayList<>();
                double std = stats.getStandardDeviation();
                Float lower = new Float(discreteDataPoint - std);
                Float higher = new Float(discreteDataPoint + std);
                errorPair.add(lower);
                errorPair.add(higher);
                dp.setErrorPair(errorPair);
                res.add(dp);
                bin = discreteTime;
            }
        }
    } else {
        for (Group gr : groups) {
            Float discreteTime = Float.valueOf(gr.getGroupValue());
            float sum = 0;
            SolrDocumentList resDocs = gr.getResult();
            DescriptiveStatistics stats = new DescriptiveStatistics();
            for (SolrDocument doc : resDocs) {
                sum += (float) doc.getFieldValue(ObservationDTO.DATA_POINT);
                stats.addValue((float) doc.getFieldValue(ObservationDTO.DATA_POINT));
            }
            float discreteDataPoint = sum / resDocs.getNumFound();
            DiscreteTimePoint dp = new DiscreteTimePoint(discreteTime, discreteDataPoint,
                    new Float(stats.getStandardDeviation()));
            List<Float> errorPair = new ArrayList<>();
            double std = stats.getStandardDeviation();
            Float lower = new Float(discreteDataPoint - std);
            Float higher = new Float(discreteDataPoint + std);
            errorPair.add(lower);
            errorPair.add(higher);
            dp.setErrorPair(errorPair);
            res.add(dp);
        }
    }
    return res;
}

From source file:org.mycore.restapi.v1.MCRRestAPIClassifications.java

License:Open Source License

private void filterNonEmpty(String classId, Element e) {
    SolrClient solrClient = MCRSolrClientFactory.getSolrClient();
    for (int i = 0; i < e.getChildren("category").size(); i++) {
        Element cat = e.getChildren("category").get(i);

        SolrQuery solrQquery = new SolrQuery();
        solrQquery.setQuery("category:\""
                + MCRSolrUtils.escapeSearchValue(classId + ":" + cat.getAttributeValue("ID")) + "\"");
        solrQquery.setRows(0);//  w  ww . j ava2 s  . c  o  m
        try {
            QueryResponse response = solrClient.query(solrQquery);
            SolrDocumentList solrResults = response.getResults();
            if (solrResults.getNumFound() == 0) {
                e.removeContent(cat);
                i--;
            }
        } catch (SolrServerException | IOException exc) {
            LOGGER.error(exc);
        }
    }
    for (int i = 0; i < e.getChildren("category").size(); i++) {
        filterNonEmpty(classId, e.getChildren("category").get(i));
    }
}

From source file:org.mycore.restapi.v1.utils.MCRRestAPIObjectsHelper.java

License:Open Source License

private static MCRObject retrieveMCRObject(String idString) throws MCRRestAPIException {
    String key = "mcr"; // the default value for the key
    if (idString.contains(":")) {
        int pos = idString.indexOf(":");
        key = idString.substring(0, pos);
        idString = idString.substring(pos + 1);
        if (!key.equals("mcr")) {
            try {
                idString = URLDecoder.decode(idString, "UTF-8");
            } catch (UnsupportedEncodingException e) {
                //will not happen
            }/*w w w . j a  v  a2s  .co  m*/
            //ToDo - Shall we restrict the key set with a property?

            //throw new MCRRestAPIException(MCRRestAPIError.create(Response.Status.BAD_REQUEST,
            //        "The ID is not valid.", "The prefix is unkown. Only 'mcr' is allowed."));
        }
    }
    if (key.equals("mcr")) {

        MCRObjectID mcrID = null;
        try {
            mcrID = MCRObjectID.getInstance(idString);
        } catch (Exception e) {
            throw new MCRRestAPIException(MCRRestAPIError.create(Response.Status.BAD_REQUEST,
                    "The MyCoRe ID '" + idString
                            + "' is not valid. Did you use the proper format: '{project}_{type}_{number}'?",
                    e.getMessage()));
        }

        if (!MCRMetadataManager.exists(mcrID)) {
            throw new MCRRestAPIException(MCRRestAPIError.create(Response.Status.NOT_FOUND,
                    "There is no object with the given MyCoRe ID '" + idString + "'.", null));
        }

        return MCRMetadataManager.retrieveMCRObject(mcrID);
    } else {
        SolrClient solrClient = MCRSolrClientFactory.getSolrClient();
        SolrQuery query = new SolrQuery();
        query.setQuery(key + ":" + idString);
        try {
            QueryResponse response = solrClient.query(query);
            SolrDocumentList solrResults = response.getResults();
            if (solrResults.getNumFound() == 1) {
                String id = solrResults.get(0).getFieldValue("returnId").toString();
                return retrieveMCRObject(id);
            } else {
                if (solrResults.getNumFound() == 0) {
                    throw new MCRRestAPIException(MCRRestAPIError.create(Response.Status.NOT_FOUND,
                            "There is no object with the given ID '" + key + ":" + idString + "'.", null));
                } else {
                    throw new MCRRestAPIException(
                            MCRRestAPIError.create(Response.Status.NOT_FOUND,
                                    "The ID is not unique. There are " + solrResults.getNumFound()
                                            + " objecst fore the given ID '" + key + ":" + idString + "'.",
                                    null));
                }
            }
        } catch (SolrServerException | IOException e) {
            LOGGER.error(e);
        }
        return null;
    }
}

From source file:org.opencastproject.workflow.impl.WorkflowServiceSolrIndex.java

License:Educational Community License

/**
 * {@inheritDoc}/*  w w w  . j  a  va 2  s . co  m*/
 * 
 * @see org.opencastproject.workflow.impl.WorkflowServiceIndex#getWorkflowInstances(org.opencastproject.workflow.api.WorkflowQuery,
 *      String, boolean)
 */
@Override
public WorkflowSet getWorkflowInstances(WorkflowQuery query, String action, boolean applyPermissions)
        throws WorkflowDatabaseException {
    int count = query.getCount() > 0 ? (int) query.getCount() : 20; // default to 20 items if not specified
    int startPage = query.getStartPage() > 0 ? (int) query.getStartPage() : 0; // default to page zero

    SolrQuery solrQuery = new SolrQuery();
    solrQuery.setRows(count);
    solrQuery.setStart(startPage * count);

    String solrQueryString = createQuery(query, action, applyPermissions);
    solrQuery.setQuery(solrQueryString);

    if (query.getSort() != null) {
        ORDER order = query.isSortAscending() ? ORDER.asc : ORDER.desc;
        solrQuery.addSortField(getSortField(query.getSort()) + "_sort", order);
    }

    if (!Sort.DATE_CREATED.equals(query.getSort())) {
        solrQuery.addSortField(getSortField(Sort.DATE_CREATED) + "_sort", ORDER.desc);
    }

    long totalHits;
    long time = System.currentTimeMillis();
    WorkflowSetImpl set = null;
    try {
        QueryResponse response = solrServer.query(solrQuery);
        SolrDocumentList items = response.getResults();
        long searchTime = System.currentTimeMillis() - time;
        totalHits = items.getNumFound();

        set = new WorkflowSetImpl();
        set.setPageSize(count);
        set.setTotalCount(totalHits);
        set.setStartPage(query.getStartPage());
        set.setSearchTime(searchTime);

        // Iterate through the results
        for (SolrDocument doc : items) {
            String xml = (String) doc.get(XML_KEY);
            try {
                set.addItem(WorkflowParser.parseWorkflowInstance(xml));
            } catch (Exception e) {
                throw new IllegalStateException("can not parse workflow xml", e);
            }
        }
    } catch (Exception e) {
        throw new WorkflowDatabaseException(e);
    }
    long totalTime = System.currentTimeMillis() - time;
    logger.debug("Workflow query took {} ms", totalTime);
    return set;
}

From source file:org.opencms.search.solr.CmsSolrIndex.java

License:Open Source License

/**
 * @see org.opencms.search.CmsSearchIndex#getDocument(java.lang.String, java.lang.String)
 *///from   w w  w  .  j  av a 2  s .  c  om
@Override
public synchronized I_CmsSearchDocument getDocument(String fieldname, String term) {

    try {
        SolrQuery query = new SolrQuery();
        if (CmsSearchField.FIELD_PATH.equals(fieldname)) {
            query.setQuery(fieldname + ":\"" + term + "\"");
        } else {
            query.setQuery(fieldname + ":" + term);
        }
        QueryResponse res = m_solr.query(query);
        if (res != null) {
            SolrDocumentList sdl = m_solr.query(query).getResults();
            if ((sdl.getNumFound() > 0L) && (sdl.get(0) != null)) {
                return new CmsSolrDocument(sdl.get(0));
            }
        }
    } catch (Exception e) {
        // ignore and assume that the document could not be found
        LOG.error(e.getMessage(), e);
    }
    return null;
}

From source file:org.opencommercesearch.RuleManager.java

License:Apache License

/**
 * Loads the rules that matches the given query
 * /*from   www  .j  av a 2 s .  c  om*/
 * @param q is the user query
 * @param categoryPath is the current category path, used to filter out rules (i.e. rule based pages)
 * @param categoryFilterQuery is the current category search token that will be used for filtering out rules and facets
 * @param isSearch indicates if we are browsing or searching the site
 * @param isRuleBasedPage tells whether or not we are on a rule based page
 * @param catalog the current catalog we are browsing/searching
 * @param isOutletPage tells whether or not the current page is outlet
 * @param brandId is the current brand id currently browsed, if any.
 * @throws RepositoryException if an exception happens retrieving a rule from the repository
 * @throws SolrServerException if an exception happens querying the search engine
 */
void loadRules(String q, String categoryPath, String categoryFilterQuery, boolean isSearch,
        boolean isRuleBasedPage, RepositoryItem catalog, boolean isOutletPage, String brandId,
        Set<String> includeExperiments, Set<String> excludeExperiments)
        throws RepositoryException, SolrServerException {
    if (isSearch && StringUtils.isBlank(q)) {
        throw new IllegalArgumentException("Missing query");
    }

    SolrQuery query = new SolrQuery("*:*");
    query.setStart(DEFAULT_START);
    query.setRows(DEFAULT_ROWS);
    query.addSort(FIELD_SORT_PRIORITY, ORDER.asc);
    query.addSort(FIELD_SCORE, ORDER.asc);
    query.addSort(FIELD_ID, ORDER.asc);
    query.add(CommonParams.FL, FIELD_ID, FIELD_BOOST_FUNCTION, FIELD_FACET_FIELD, FIELD_COMBINE_MODE,
            FIELD_QUERY, FIELD_CATEGORY, FIELD_EXPERIMENTAL);

    StringBuilder reusableStringBuilder = new StringBuilder();
    query.addFilterQuery(getTargetFilter(reusableStringBuilder, isSearch, q));
    query.addFilterQuery(getCategoryFilter(reusableStringBuilder, categoryFilterQuery, categoryPath));
    query.addFilterQuery(getSiteFilter(reusableStringBuilder, catalog));
    query.addFilterQuery(getBrandFilter(reusableStringBuilder, brandId));
    query.addFilterQuery(getSubTargetFilter(reusableStringBuilder, isOutletPage));

    StringBuilder catalogFilter = reuseStringBuilder(reusableStringBuilder);
    catalogFilter.append("catalogId:").append(WILDCARD).append(" OR ").append("catalogId:")
            .append(catalog.getRepositoryId());
    query.addFilterQuery(catalogFilter.toString());

    //Notice how the current datetime (NOW wildcard on Solr) is rounded to days (NOW/DAY). This allows filter caches
    //to be reused and hopefully improve performance. If you don't round to day, NOW is very precise (up to milliseconds); so every query
    //would need a new entry on the filter cache...
    //Also, notice that NOW/DAY is midnight from last night, and NOW/DAY+1DAY is midnight today.
    //The below query is intended to match rules with null start or end dates, or start and end dates in the proper range.
    query.addFilterQuery(
            "-(((startDate:[* TO *]) AND -(startDate:[* TO NOW/DAY+1DAY])) OR (endDate:[* TO *] AND -endDate:[NOW/DAY+1DAY TO *]))");

    int queryTime = 0;
    QueryResponse res = server.query(query);
    queryTime += res.getQTime();

    if (res.getResults() == null || res.getResults().getNumFound() == 0) {
        rules = Collections.emptyMap();
        loadRulesTime = queryTime;
        return;
    }

    rules = new HashMap<String, List<RepositoryItem>>(RuleType.values().length);
    ruleDocs = new HashMap<String, SolrDocument>();
    SolrDocumentList docs = res.getResults();
    int total = (int) docs.getNumFound();
    int processed = 0;
    while (processed < total) {
        for (int i = 0; i < docs.size(); i++) {
            ++processed;
            SolrDocument doc = docs.get(i);

            if (isSearch && !matchesQuery(q, doc)) {
                // skip this rule
                continue;
            }

            RepositoryItem rule = searchRepository.getItem((String) doc.getFieldValue("id"),
                    SearchRepositoryItemDescriptor.RULE);

            //for rule based categories, include all facet rules and ranking rules of only that category
            if (rule != null) {

                if (excludeExperiments.contains(rule.getRepositoryId())) {
                    continue;
                }

                Boolean experimental = (Boolean) doc.getFieldValue(FIELD_EXPERIMENTAL);
                if (experimental != null && experimental
                        && !includeExperiments.contains(rule.getRepositoryId())) {
                    continue;
                }

                String ruleType = (String) rule.getPropertyValue(RuleProperty.RULE_TYPE);
                if (ruleType.equals(RuleProperty.TYPE_FACET_RULE)) {
                    buildRuleLists(ruleType, rule, doc);
                } else {
                    if (categoryPath != null && isRuleBasedPage) {
                        List<String> ruleCategories = (List<String>) doc.getFieldValue(FIELD_CATEGORY);
                        if (ruleCategories != null) {
                            if (ruleCategories.contains(categoryPath)) {
                                buildRuleLists(ruleType, rule, doc);
                            }
                        }
                    } else {
                        buildRuleLists(ruleType, rule, doc);
                    }
                }
            } else {
                //TODO gsegura: add logging that we couldn't find the rule item in the DB
            }
        }
        if (processed < total) {
            query.setStart(processed);
            res = server.query(query);
            queryTime += res.getQTime();
            docs = res.getResults();
        }
    }

    loadRulesTime = queryTime;
}

From source file:org.opensextant.extraction.SolrMatcherSupport.java

License:Apache License

/**
 * Solr call: tag input buffer, returning all candiate reference data that
 * matched during tagging./*ww w  .  j av a2  s.co m*/
 *
 * @param buffer text to tag
 * @param docid  id for text, only for tracking purposes
 * @param refDataMap
 *            - a map of reference data in solr, It will store caller's
 *            domain objects. e.g., rec.id =&gt; domain(rec)
 * @return solr response
 * @throws ExtractionException tagger error
 */
protected QueryResponse tagTextCallSolrTagger(String buffer, String docid,
        final Map<Integer, Object> refDataMap) throws ExtractionException {
    SolrTaggerRequest tagRequest = new SolrTaggerRequest(getMatcherParameters(), buffer);
    tagRequest.setPath(requestHandler);
    // Stream the response to avoid serialization and to save memory by
    // only keeping one SolrDocument materialized at a time
    tagRequest.setStreamingResponseCallback(new StreamingResponseCallback() {
        @Override
        public void streamDocListInfo(long numFound, long start, Float maxScore) {
        }

        // Future optimization: it would be nice if Solr could give us the
        // doc id without giving us a SolrDocument, allowing us to
        // conditionally get it. It would save disk IO & speed, at the
        // expense of putting ids into memory.
        @Override
        public void streamSolrDocument(final SolrDocument solrDoc) {
            Integer id = (Integer) solrDoc.getFirstValue("id");
            // create a domain object for the given tag;
            // this callback handler caches such domain obj in simple k/v
            // map.
            Object domainObj = createTag(solrDoc);
            if (domainObj != null) {
                refDataMap.put(id, domainObj);
            }
        }
    });

    QueryResponse response;
    try {
        response = tagRequest.process(solr.getInternalSolrServer());
    } catch (Exception err) {
        throw new ExtractionException("Failed to tag document=" + docid, err);
    }

    // see https://issues.apache.org/jira/browse/SOLR-5154
    SolrDocumentList docList = response.getResults();
    if (docList != null) {
        // log.debug("Not streaming docs from Solr (not supported)");
        StreamingResponseCallback callback = tagRequest.getStreamingResponseCallback();
        callback.streamDocListInfo(docList.getNumFound(), docList.getStart(), docList.getMaxScore());
        for (SolrDocument solrDoc : docList) {
            /**
             * This appears to be an empty list; what is this explicit
             * callback loop for?
             */
            callback.streamSolrDocument(solrDoc);
        }
    }

    return response;
}

From source file:org.phenotips.ontology.internal.solr.AbstractSolrOntologyService.java

License:Open Source License

/**
 * Get the number of entries that match a specific Lucene query.
 *
 * @param query a valid the Lucene query as string
 * @return the number of entries matching the query
 *///w  w w.  j  ava  2s.  co  m
protected long count(String query) {
    ModifiableSolrParams params = new ModifiableSolrParams();
    params.set(CommonParams.Q, query);
    params.set(CommonParams.START, "0");
    params.set(CommonParams.ROWS, "0");
    SolrDocumentList results;
    try {
        results = this.externalServicesAccess.getServer().query(params).getResults();
        return results.getNumFound();
    } catch (Exception ex) {
        this.logger.error("Failed to count ontology terms: {}", ex.getMessage(), ex);
        return 0;
    }
}