List of usage examples for org.apache.solr.common SolrDocumentList SolrDocumentList
SolrDocumentList
From source file:at.pagu.soldockr.core.SolrTemplateTest.java
License:Apache License
@Test public void testCount() throws SolrServerException { ArgumentCaptor<SolrQuery> captor = ArgumentCaptor.forClass(SolrQuery.class); QueryResponse responseMock = Mockito.mock(QueryResponse.class); SolrDocumentList resultList = new SolrDocumentList(); resultList.setNumFound(10);//from w ww .j a v a2s . c om Mockito.when(responseMock.getResults()).thenReturn(resultList); Mockito.when(solrServerMock.query(Mockito.any(SolrQuery.class))).thenReturn(responseMock); long result = solrTemplate.executeCount(new SimpleQuery(new Criteria("field_1").is("value1"))); Assert.assertEquals(resultList.getNumFound(), result); Mockito.verify(solrServerMock, Mockito.times(1)).query(captor.capture()); Assert.assertEquals(Integer.valueOf(0), captor.getValue().getStart()); Assert.assertEquals(Integer.valueOf(0), captor.getValue().getRows()); }
From source file:at.pagu.soldockr.core.SolrTemplateTest.java
License:Apache License
@Test public void testCountWhenPagingSet() throws SolrServerException { ArgumentCaptor<SolrQuery> captor = ArgumentCaptor.forClass(SolrQuery.class); QueryResponse responseMock = Mockito.mock(QueryResponse.class); SolrDocumentList resultList = new SolrDocumentList(); resultList.setNumFound(10);/*w w w . j a v a 2 s . c o m*/ Mockito.when(responseMock.getResults()).thenReturn(resultList); Mockito.when(solrServerMock.query(Mockito.any(SolrQuery.class))).thenReturn(responseMock); Query query = new SimpleQuery(new Criteria("field_1").is("value1")); query.setPageRequest(new PageRequest(0, 5)); long result = solrTemplate.executeCount(query); Assert.assertEquals(resultList.getNumFound(), result); Mockito.verify(solrServerMock, Mockito.times(1)).query(captor.capture()); Assert.assertEquals(Integer.valueOf(0), captor.getValue().getStart()); Assert.assertEquals(Integer.valueOf(0), captor.getValue().getRows()); }
From source file:com.basho.yokozuna.handler.EntropyData.java
License:Open Source License
@Override public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception, InstantiationException, IllegalAccessException { String contParam = req.getParams().get("continue"); BytesRef cont = contParam != null ? decodeCont(contParam) : DEFAULT_CONT; // TODO: Make before required in handler config String before = req.getParams().get("before"); if (before == null) { throw new Exception("Parameter 'before' is required"); }/* w w w. j a va2 s. c om*/ int n = req.getParams().getInt("n", DEFAULT_N); SolrDocumentList docs = new SolrDocumentList(); // Add docs here and modify object inline in code rsp.add("response", docs); try { SolrIndexSearcher searcher = req.getSearcher(); AtomicReader rdr = searcher.getAtomicReader(); BytesRef tmp = null; Terms terms = rdr.terms(ENTROPY_DATA_FIELD); TermsEnum te = terms.iterator(null); if (isContinue(cont)) { log.debug("continue from " + cont); TermsEnum.SeekStatus status = te.seekCeil(cont, true); if (status == TermsEnum.SeekStatus.END) { rsp.add("more", false); return; } else if (status == TermsEnum.SeekStatus.FOUND) { // If this term has already been seen then skip it. tmp = te.next(); if (endOfItr(tmp)) { rsp.add("more", false); return; } } else if (status == TermsEnum.SeekStatus.NOT_FOUND) { tmp = te.next(); } } else { tmp = te.next(); } String text = null; String[] vals = null; String ts = null; String docId = null; String vectorClock = null; int count = 0; BytesRef current = null; while (!endOfItr(tmp) && count < n) { current = BytesRef.deepCopyOf(tmp); text = tmp.utf8ToString(); log.debug("text: " + text); vals = text.split(" "); ts = vals[0]; // TODO: what if null? if (!(ts.compareTo(before) < 0)) { rsp.add("more", false); docs.setNumFound(count); return; } docId = vals[1]; vectorClock = vals[2]; SolrDocument tmpDoc = new SolrDocument(); tmpDoc.addField("doc_id", docId); tmpDoc.addField("base64_vclock", Base64.encodeBase64String(sha(vectorClock))); docs.add(tmpDoc); count++; tmp = te.next(); } if (count < n) { rsp.add("more", false); } else { rsp.add("more", true); String newCont = Base64.encodeBase64URLSafeString(current.bytes); // The continue context for next req to start where // this one finished. rsp.add("continuation", newCont); } docs.setNumFound(count); } catch (Exception e) { e.printStackTrace(); } }
From source file:com.digitalpebble.storm.crawler.solr.persistence.SolrSpout.java
License:Apache License
private void populateBuffer() { // TODO Sames as the ElasticSearchSpout? // TODO Use the cursor feature? // https://cwiki.apache.org/confluence/display/solr/Pagination+of+Results SolrQuery query = new SolrQuery(); query.setQuery("*:*").addFilterQuery("nextFetchDate:[* TO NOW]").setStart(lastStartOffset) .setRows(this.bufferSize); if (StringUtils.isNotBlank(diversityField)) { query.addFilterQuery(String.format("{!collapse field=%s}", diversityField)); query.set("expand", "true").set("expand.rows", diversityBucketSize); }//from w w w .j a va2 s.c om try { QueryResponse response = connection.getClient().query(query); SolrDocumentList docs = new SolrDocumentList(); if (StringUtils.isNotBlank(diversityField)) { // Add the main documents collapsed by the CollapsingQParser // plugin docs.addAll(response.getResults()); Map<String, SolrDocumentList> expandedResults = response.getExpandedResults(); for (String key : expandedResults.keySet()) { docs.addAll(expandedResults.get(key)); } } else { docs = response.getResults(); } int numhits = response.getResults().size(); // no more results? if (numhits == 0) lastStartOffset = 0; else lastStartOffset += numhits; for (SolrDocument doc : docs) { String url = (String) doc.get("url"); // is already being processed - skip it! if (beingProcessed.containsKey(url)) continue; Metadata metadata = new Metadata(); String mdAsString = (String) doc.get("metadata"); // get the serialized metadata information if (mdAsString != null) { // parse the string and generate the MD accordingly // url.path: http://www.lemonde.fr/ // depth: 1 String[] kvs = mdAsString.split("\n"); for (String pair : kvs) { String[] kv = pair.split(": "); if (kv.length != 2) { LOG.info("Invalid key value pair {}", pair); continue; } metadata.addValue(kv[0], kv[1]); } } buffer.add(new Values(url, metadata)); } } catch (Exception e) { LOG.error("Can't query Solr: {}", e); } }
From source file:com.digitalpebble.stormcrawler.solr.persistence.SolrSpout.java
License:Apache License
private void populateBuffer() { // TODO Sames as the ElasticSearchSpout? // TODO Use the cursor feature? // https://cwiki.apache.org/confluence/display/solr/Pagination+of+Results SolrQuery query = new SolrQuery(); query.setQuery("*:*").addFilterQuery("nextFetchDate:[* TO NOW]").setStart(lastStartOffset) .setRows(this.bufferSize); if (StringUtils.isNotBlank(diversityField)) { query.addFilterQuery(String.format("{!collapse field=%s}", diversityField)); query.set("expand", "true").set("expand.rows", diversityBucketSize); }//from w ww. ja v a 2s . com try { QueryResponse response = connection.getClient().query(query); SolrDocumentList docs = new SolrDocumentList(); if (StringUtils.isNotBlank(diversityField)) { // Add the main documents collapsed by the CollapsingQParser // plugin docs.addAll(response.getResults()); Map<String, SolrDocumentList> expandedResults = response.getExpandedResults(); for (String key : expandedResults.keySet()) { docs.addAll(expandedResults.get(key)); } } else { docs = response.getResults(); } int numhits = response.getResults().size(); // no more results? if (numhits == 0) lastStartOffset = 0; else lastStartOffset += numhits; String prefix = mdPrefix.concat("."); for (SolrDocument doc : docs) { String url = (String) doc.get("url"); // is already being processed - skip it! if (beingProcessed.containsKey(url)) continue; Metadata metadata = new Metadata(); Iterator<String> keyIterators = doc.getFieldNames().iterator(); while (keyIterators.hasNext()) { String key = keyIterators.next(); if (key.startsWith(prefix)) { Collection<Object> values = doc.getFieldValues(key); key = StringUtils.replace(key, prefix, "", 1); Iterator<Object> valueIterator = values.iterator(); while (valueIterator.hasNext()) { String value = (String) valueIterator.next(); metadata.addValue(key, value); } } } buffer.add(new Values(url, metadata)); } } catch (Exception e) { LOG.error("Can't query Solr: {}", e); } }
From source file:com.doculibre.constellio.opensearch.OpenSearchSolrServer.java
License:Open Source License
@SuppressWarnings("unchecked") private static SolrDocumentList parse(Element rootElement) throws IOException { SolrDocumentList solrDocumentList = new SolrDocumentList(); Element channelElement = rootElement.element("channel"); String totalResultsStr = channelElement.elementText(new QName("totalResults", NS_OPENSEARCH)); String startIndexStr = channelElement.elementText(new QName("startIndex", NS_OPENSEARCH)); long numFound = Long.parseLong(totalResultsStr); long start = Long.parseLong(startIndexStr); solrDocumentList.setNumFound(numFound); solrDocumentList.setStart(start);/* w w w. ja v a2 s. com*/ for (Iterator<Element> it = channelElement.elementIterator("item"); it.hasNext();) { Element itemElement = it.next(); String title = itemElement.elementText("title"); String description = itemElement.elementText("description"); String link = itemElement.elementText("link"); title = CharSetUtils.convert(title, CharSetUtils.UTF_8, CharSetUtils.ISO_8859_1); description = CharSetUtils.convert(description, CharSetUtils.UTF_8, CharSetUtils.ISO_8859_1); link = CharSetUtils.convert(link, CharSetUtils.UTF_8, CharSetUtils.ISO_8859_1); SolrDocument solrDocument = new SolrDocument(); solrDocument.addField("title", title); solrDocument.addField("description", description); solrDocument.addField("link", link); solrDocumentList.add(solrDocument); } return solrDocumentList; }
From source file:com.doculibre.constellio.utils.NamedListUtilsTest.java
License:Open Source License
/** * The conversion of a SolrDocument and his NamedList form should be equal. * Servlets use the NamedList form/*from w w w. j a v a2s . co m*/ * * @throws IOException */ @Test public void testSolrDocumentListVSNamedList() throws IOException { NamedList<Object> l1 = new NamedList<Object>(); SolrDocumentList l = new SolrDocumentList(); SolrDocument d = new SolrDocument(); d.setField("a", 123); d.setField("myArray", Arrays.asList(new String[] { "A", "B", "C" })); d.setField("title", ""); l.add(d); l.setStart(22); l.setNumFound(1); l1.add(ServletsConstants.RESPONSE, l); NamedList<Object> l2 = new NamedList<Object>(); NamedList<Object> nl = new NamedList<Object>(); NamedList<Object> attr = new NamedList<Object>(); attr.add("numFound", 1); attr.add("start", 22); nl.add("attr", attr); l2.add(ServletsConstants.RESPONSE, nl); NamedList<Object> nlDoc = new NamedList<Object>(); nlDoc.add("a", 123); nlDoc.add("myArray", Arrays.asList(new String[] { "A", "B", "C" })); nlDoc.add("title", ""); nl.add("doc", nlDoc); File tempFile1 = File.createTempFile("temp", ".xml"); NamedListUtils.convertResponseNamedListToXML(l1, new FileOutputStream(tempFile1)); String xml1 = readFileAsString(tempFile1); File tempFile2 = File.createTempFile("temp", ".xml"); NamedListUtils.convertResponseNamedListToXML(l2, new FileOutputStream(tempFile2)); String xml2 = readFileAsString(tempFile2); TestCase.assertEquals(xml1, xml2); }
From source file:com.francelabs.datafari.statistics.StatsProcessor.java
License:Apache License
public static void processStatsResponse(final QueryResponse queryResponse) throws Exception { final NamedList responseHeader = queryResponse.getResponseHeader(); final FacetField QFacet = queryResponse.getFacetField("q"); final Long numTot = queryResponse.getResults().getNumFound(); final SolrDocumentList solrDocumentList = new SolrDocumentList(); solrDocumentList.setNumFound(QFacet.getValueCount()); solrDocumentList.setStart(0);//from w w w .j a va 2 s .co m if (numTot != 0) { final Map<String, FieldStatsInfo> stats = queryResponse.getFieldStatsInfo(); final List<FieldStatsInfo> noHitsStats = stats.get("noHits").getFacets().get("q"); final List<FieldStatsInfo> QTimeStats = stats.get("QTime").getFacets().get("q"); List<FieldStatsInfo> positionClickTotStats = null; try { positionClickTotStats = stats.get("positionClickTot").getFacets().get("q"); } catch (final Exception e) { } final List<FieldStatsInfo> clickStats = stats.get("click").getFacets().get("q"); final List<FieldStatsInfo> numClicksStats = stats.get("numClicks").getFacets().get("q"); final List<FieldStatsInfo> numFoundStats = stats.get("numFound").getFacets().get("q"); final List<Count> QFacetValues = QFacet.getValues(); final Map<String, SolrDocument> mapDocuments = new HashMap<String, SolrDocument>(); for (int i = 0; i < QFacetValues.size(); i++) { final SolrDocument doc = new SolrDocument(); final String query = QFacetValues.get(i).getName(); final double count = QFacetValues.get(i).getCount(); final double frequency = StatsUtils.round(count * 100 / numTot, 2, BigDecimal.ROUND_HALF_UP); doc.addField("query", query); doc.addField("count", count); doc.addField("frequency", frequency); mapDocuments.put(query, doc); solrDocumentList.add(doc); } for (int i = 0; i < QTimeStats.size(); i++) { final String query = QTimeStats.get(i).getName(); final SolrDocument doc = mapDocuments.get(query); final int AVGHits = new Double((Double) numFoundStats.get(i).getMean()).intValue(); final Double noHits = new Double((Double) noHitsStats.get(i).getSum()); final int AVGQTime = new Double((Double) QTimeStats.get(i).getMean()).intValue(); final int MAXQTime = new Double((Double) QTimeStats.get(i).getMax()).intValue(); final double click = new Double((Double) clickStats.get(i).getSum()); final double clickRatio = StatsUtils.round(click * 100 / (Double) doc.getFirstValue("count"), 2, BigDecimal.ROUND_HALF_UP); if (click > 0) { final double AVGClickPosition = new Double((Double) positionClickTotStats.get(i).getSum() / (Double) numClicksStats.get(i).getSum()).intValue(); doc.addField("AVGClickPosition", AVGClickPosition); } else { doc.addField("AVGClickPosition", "-"); } doc.addField("withClickRatio", clickRatio); doc.addField("AVGHits", AVGHits); doc.addField("numNoHits", noHits); doc.addField("withClick", click); doc.addField("AVGQTime", AVGQTime); doc.addField("MaxQTime", MAXQTime); } } final NamedList<Object> response = new SimpleOrderedMap<Object>(); response.add("responseHeader", responseHeader); response.add("response", solrDocumentList); queryResponse.setResponse(response); }
From source file:com.ibm.watson.developer_cloud.professor_languo.pipeline.primary_search.RetrieveAndRankSearcherTest.java
License:Open Source License
private void set_non_empty_repsonse() throws IngestionException { String uniqThreadDirPath = get_unique_thread_path(); SolrDocumentList doclist = new SolrDocumentList(); SolrDocument doc = new SolrDocument(); File serFile = new File(uniqThreadDirPath).listFiles()[0]; StackExchangeThread thread = StackExchangeThreadSerializer.deserializeThreadFromBinFile(serFile.getPath()); final Document luceneDoc = new LuceneDocumentMapper().createDocument(thread); BytesRef bin = luceneDoc.getBinaryValue(IndexDocumentFieldName.SERIALIZED_THREAD.toString()); doc.addField(IndexDocumentFieldName.SERIALIZED_THREAD.toString(), bin.bytes); doc.addField("score", Integer.MAX_VALUE); doc.addField("featureVector", Double.MAX_VALUE); doclist.add(doc);/*from ww w .j ava 2 s.com*/ set_response(doclist); }
From source file:com.ibm.watson.developer_cloud.professor_languo.pipeline.primary_search.RetrieveAndRankSearcherTest.java
License:Open Source License
private void set_empty_repsonse() { set_response(new SolrDocumentList()); }