Example usage for org.apache.lucene.queryparser.classic MultiFieldQueryParser MultiFieldQueryParser

List of usage examples for org.apache.lucene.queryparser.classic MultiFieldQueryParser MultiFieldQueryParser

Introduction

In this page you can find the example usage for org.apache.lucene.queryparser.classic MultiFieldQueryParser MultiFieldQueryParser.

Prototype

public MultiFieldQueryParser(String[] fields, Analyzer analyzer, Map<String, Float> boosts) 

Source Link

Document

Creates a MultiFieldQueryParser.

Usage

From source file:org.ujmp.lucene.LuceneMap.java

License:Open Source License

public synchronized ObjectMatrix2D search(String searchString) {
    try {/*from ww  w . ja  v  a 2s.  c  o  m*/
        MultiFieldQueryParser p = new MultiFieldQueryParser(Version.LUCENE_47, new String[] { VALUESTRING },
                getAnalyzer());
        Query query = p.parse(searchString);
        TopDocs docs = getIndexSearcher().search(query, 100);
        ObjectMatrix2D result = ObjectMatrix2D.Factory.zeros(docs.totalHits, 3);
        for (int row = 0; row < docs.totalHits; row++) {
            ScoreDoc match = docs.scoreDocs[row];
            Document doc = getIndexSearcher().doc(match.doc);
            result.setAsFloat(match.score, row, 0);
            result.setAsObject(SerializationUtil.deserialize(doc.getBinaryValue(KEYDATA).bytes), row, 1);
            result.setAsObject(SerializationUtil.deserialize(doc.getBinaryValue(VALUEDATA).bytes), row, 2);
        }
        return result;
    } catch (Exception e) {
        throw new RuntimeException("could not search documents: " + searchString, e);
    }
}

From source file:part2.SearchFiles.java

License:Apache License

/** Simple command-line based search demo. */
public static void main(String[] args) throws Exception {
    String usage = "Usage:\tjava org.apache.lucene.demo.SearchFiles [-index dir] [-field f] [-repeat n] [-queries file] [-query string] [-raw] [-paging hitsPerPage]\n\nSee http://lucene.apache.org/core/4_1_0/demo/ for details. :-)";
    if (args.length > 0 && ("-h".equals(args[0]) || "-help".equals(args[0]))) {
        System.out.println(usage);
        System.exit(0);//  ww  w .  ja  v a2  s .  co  m
    }

    String index = "index";
    String field = "contents";
    String[] fields = { "contents", "author" };
    String queries = null;
    int repeat = 0;
    boolean raw = false;
    String queryString = null;
    int hitsPerPage = 10;

    for (int i = 0; i < args.length; i++) {
        if ("-index".equals(args[i])) {
            index = args[i + 1];
            i++;
        } else if ("-field".equals(args[i])) {
            field = args[i + 1];
            i++;
        } else if ("-queries".equals(args[i])) {
            queries = args[i + 1];
            i++;
        } else if ("-query".equals(args[i])) {
            queryString = args[i + 1];
            i++;
        } else if ("-repeat".equals(args[i])) {
            repeat = Integer.parseInt(args[i + 1]);
            i++;
        } else if ("-raw".equals(args[i])) {
            raw = true;
        } else if ("-paging".equals(args[i])) {
            hitsPerPage = Integer.parseInt(args[i + 1]);
            if (hitsPerPage <= 0) {
                System.err.println("There must be at least 1 hit per page.");
                System.exit(1);
            }
            i++;
        }
    }

    IndexReader reader = DirectoryReader.open(FSDirectory.open(new File(index)));
    IndexSearcher searcher = new IndexSearcher(reader);
    // :Post-Release-Update-Version.LUCENE_XY:
    Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_4_10_0);

    BufferedReader in = null;
    if (queries != null) {
        in = new BufferedReader(new InputStreamReader(new FileInputStream(queries), StandardCharsets.UTF_8));
    } else {
        in = new BufferedReader(new InputStreamReader(System.in, StandardCharsets.UTF_8));
    }
    // :Post-Release-Update-Version.LUCENE_XY:
    MultiFieldQueryParser parser = new MultiFieldQueryParser(Version.LUCENE_4_10_0, fields, analyzer);
    //QueryParser parser = new QueryParser(Version.LUCENE_4_10_0, field, analyzer);
    while (true) {
        if (queries == null && queryString == null) { // prompt the user
            System.out.println("Enter query: ");
        }

        String line = queryString != null ? queryString : in.readLine();

        if (line == null || line.length() == -1) {
            break;
        }

        line = line.trim();
        if (line.length() == 0) {
            break;
        }

        Query query = parser.parse(line);
        System.out.println("Searching for: " + query.toString(field));

        if (repeat > 0) { // repeat & time as benchmark
            Date start = new Date();
            for (int i = 0; i < repeat; i++) {
                searcher.search(query, null, 100);
            }
            Date end = new Date();
            System.out.println("Time: " + (end.getTime() - start.getTime()) + "ms");
        }

        doPagingSearch(in, searcher, query, hitsPerPage, raw, queries == null && queryString == null);

        if (queryString != null) {
            break;
        }
    }
    reader.close();
}

From source file:ri.trabri.LuceneAbordagem2.java

@Override
/*/*  w w  w  .j a v a  2s .  c  o  m*/
Busca Multicampos implementada limitando o resultado baseado em sua diferena
para o primeiro(maior score)
*/
public ArrayList<String> search(String querystr) throws IOException, ParseException {
    QueryParser queryParser;
    queryParser = new MultiFieldQueryParser(Version.LUCENE_40, typesGood, analyzer);

    Query q = queryParser.parse(querystr);

    int hitsPerPage = 1000;
    IndexReader reader = DirectoryReader.open(index);
    IndexSearcher searcher = new IndexSearcher(reader);
    TopDocs docs = searcher.search(q, hitsPerPage);
    ScoreDoc[] hits = docs.scoreDocs;
    ArrayList<String> result = new ArrayList<>();
    float bestScore = hits[0].score;
    // 4. display results
    int count = 0;
    for (int i = 0; i < hits.length; ++i) {
        int docId = hits[i].doc;
        if (bestScore - hits[i].score < bestScore * 0.9) {
            count++;
            Document d = searcher.doc(docId);
            result.add(d.get("RN"));
            System.out.println((i + 1) + ". " + d.get("RN") + " |score :" + hits[i].score); //+ "\t" + d.get("data"));
        }
    }
    System.out.println("Found " + count + " hits.");

    reader.close();
    for (int i = 0; i < result.size(); ++i) {
        //tira os zeros a esquerda
        result.set(i, result.get(i).replaceFirst("^0+(?!$)", "").replaceAll(" ", ""));
    }
    return result;
}

From source file:spimedb.SpimeDB.java

License:Apache License

private SpimeDB(File file, Directory dir) {

    this.file = file;
    this.dir = dir;
    this.analyzer = new StandardAnalyzer();

    this.facetsConfig.setHierarchical(NObject.ID, true);
    this.facetsConfig.setMultiValued(NObject.ID, false);

    this.facetsConfig.setHierarchical(NObject.TAG, false);
    this.facetsConfig.setMultiValued(NObject.TAG, true);

    final String[] defaultFindFields = new String[] { NObject.NAME, NObject.DESC, NObject.TAG, NObject.ID };

    this.defaultFindQueryParser = ThreadLocal.withInitial(() -> new MultiFieldQueryParser(defaultFindFields,
            analyzer,/*  www  .j  a  v  a  2 s  .  c o  m*/
            Maps.mutable.with(NObject.NAME, 1f, NObject.ID, 1f, NObject.DESC, 0.25f, NObject.TAG, 0.5f)));

    writerConf = new IndexWriterConfig(analyzer);
    writerConf.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND);
    writerConf.setCommitOnClose(true);
    try {
        writer = new IndexWriter(dir, writerConf);
        readerMgr = new ReaderManager(writer, true, true);
        searcherMgr = new SearcherManager(writer, true, true, new SearcherFactory());
    } catch (IOException e) {
        e.printStackTrace();
    }

}

From source file:trabajo.SearchFiles.java

License:Apache License

/** Simple command-line based search demo. */
@SuppressWarnings("deprecation")
public static void main(String[] args) throws Exception {
    String usage = "Usage:\tjava org.apache.lucene.trabajo.SearchFiles -index <indexPath> -infoNeeds <infoNeedsFile> -output <resultsFile>\n\n";
    if (args.length > 0 && ("-h".equals(args[0]) || "-help".equals(args[0]))) {
        System.out.println(usage);
        System.exit(0);//from ww  w  .j ava2s.co m
    }

    String index = "index";
    String infoNeeds = null;
    String output = null;
    int hitsPerPage = 50;

    for (int i = 0; i < args.length; i++) {
        if ("-index".equals(args[i])) {
            index = args[i + 1];
            i++;
        } else if ("-infoNeeds".equals(args[i])) {
            infoNeeds = args[i + 1];
            i++;
        } else if ("-output".equals(args[i])) {
            output = args[i + 1];
            i++;
        }

    }
    IndexReader reader = DirectoryReader.open(FSDirectory.open(new File(index)));
    IndexSearcher searcher = new IndexSearcher(reader);
    Analyzer analyzer = new SpanishAnalyzer(Version.LUCENE_44);

    BufferedReader in = null;
    FileWriter bw = new FileWriter(output);

    if (infoNeeds != null) {
        in = new BufferedReader(new InputStreamReader(new FileInputStream(infoNeeds), "UTF-8"));
    } else {
        in = new BufferedReader(new InputStreamReader(System.in, "UTF-8"));
    }

    QueryParser parser = new MultiFieldQueryParser(Version.LUCENE_44, new String[] { "description", "title" },
            analyzer);

    QueryParser lineParser = new QueryParser(Version.LUCENE_44, "title", analyzer);
    ArrayList<String> aux = getNeeds(infoNeeds);
    ArrayList<String> ids = getIds(infoNeeds);
    int numNeed = 0;
    for (String need : aux) {
        String id = ids.get(numNeed);
        numNeed++;
        while (true) {
            if (infoNeeds == null && output == null) { // prompt the
                // user
                System.out.println("Enter query: ");
            }

            String line = need;
            if (line == null || line.length() == -1) {
                break;
            }

            line = line.trim();
            if (line.length() == 0) {
                break;
            }

            // String lineParsed = lineParser.parse(line).toString("title");

            ArrayList<String> author = new ArrayList<String>();
            ArrayList<String> tipo = new ArrayList<String>();
            ArrayList<String> date = new ArrayList<String>();
            ArrayList<String> interval = new ArrayList<String>();

            Dictionare d = new Dictionare();
            if (line.endsWith(".")) {
                line = line.substring(0, line.length() - 1);
            }
            //Separa la cadena comparando con la expresion regular de manera que optimice
            //diferentes tipos de busquedas futuras
            String[] palabras = line.split("[^a-zA-Z0-9]+");
            System.out.println(line);
            for (int j = 0; j < palabras.length; j++) {
                // CREATOR
                if (d.map.get("author").containsKey(palabras[j])) {
                    author.add(palabras[j]);
                }
                // IDENTIFIER
                if (d.map.get("identifier").containsKey(palabras[j])) {
                    tipo.add(palabras[j]);
                }
                // DATE
                if (isValidDate(palabras[j])) {
                    int nIntervalo = j + 4;
                    if (j + 4 > palabras.length) {
                        nIntervalo = palabras.length - j;
                    }
                    for (int h = j + 1; h < nIntervalo; h++) { // Mira las
                        // siguientes
                        // fechas en
                        // busca
                        // de
                        // intervalos
                        if (isValidDate(palabras[h])) {
                            interval.add(palabras[j]);
                            interval.add(palabras[h]);
                        }
                    }
                    date.add(palabras[j]);

                }

            }

            /*
             * Creamos Queries booleanas en caso de que hayamos detectado
             * algun autor o tipo de trabajo y las ponemos con nivel de
             * ocurrencia "SHOULD"
             */
            BooleanQuery bool = new BooleanQuery();
            if (!author.isEmpty()) {
                Term t = new Term("creator", arrayToQuery(author));
                TermQuery termQuery = new TermQuery(t);
                bool.add(termQuery, BooleanClause.Occur.SHOULD);
            }
            if (!tipo.isEmpty()) {
                Term t = new Term("identifier", arrayToQuery(tipo));
                TermQuery termQuery = new TermQuery(t);
                bool.add(termQuery, BooleanClause.Occur.SHOULD);
            }

            if (!date.isEmpty()) {
                Term t = new Term("date", arrayToQuery(date));
                TermQuery termQuery = new TermQuery(t);
                bool.add(termQuery, BooleanClause.Occur.SHOULD);
            }

            if (!interval.isEmpty()) {
                for (int i = 0; i < interval.size(); i += 2) {
                    NumericRangeQuery<Integer> intervalQuery = NumericRangeQuery.newIntRange("date",
                            Integer.parseInt(interval.get(i)), Integer.parseInt(interval.get(i + 1)), true,
                            true);
                    bool.add(intervalQuery, BooleanClause.Occur.SHOULD);
                }

            }
            Query query = parser.parse(line);

            /*
             * Query de la frase entera en campos "title" y "description" y
             * nivel de ocurrencia "SHOULD"
             */
            bool.add(query, BooleanClause.Occur.SHOULD);
            doPagingSearch(in, searcher, bool, hitsPerPage, infoNeeds == null && output == null, bw, id);

            if (output != null) {
                break;
            }
        }
    }
    reader.close();
    bw.close();
}