Example usage for java.util Locale getAvailableLocales

List of usage examples for java.util Locale getAvailableLocales

Introduction

In this page you can find the example usage for java.util Locale getAvailableLocales.

Prototype

public static Locale[] getAvailableLocales() 

Source Link

Document

Returns an array of all installed locales.

Usage

From source file:org.tolven.web.RegisterAction.java

public List<SelectItem> getLocalesList() {
    if (localesList == null) {
        Locale list[] = Locale.getAvailableLocales();
        localesList = new ArrayList<SelectItem>();

        //Add "default" as en_US at the top of the list
        //The leading space is used so that it doesn't get messed up in the sorting.
        localesList.add(new SelectItem(null, " Use Default"));
        for (Locale userLocale : list) {
            String localeDesc;//from w  w w  .ja  v  a2  s  .  c o m
            String localeCode = userLocale.toString();
            if (userLocale.getCountry() == "") {
                localeDesc = userLocale.getDisplayLanguage() + " (" + userLocale.getLanguage() + ")";
            } else {
                localeDesc = userLocale.getDisplayLanguage() + " - " + userLocale.getDisplayCountry() + " ("
                        + userLocale.getLanguage() + "_" + userLocale.getCountry() + ")";
            }

            //This list is unsorted
            localesList.add(new SelectItem(localeCode, localeDesc));
        }

        //Create a comparator for locale display language-country pairs
        class LabelSort implements Comparator<Object> {

            public int compare(Object o1, Object o2) {
                SelectItem localeLabel1 = (SelectItem) o1;
                SelectItem localeLabel2 = (SelectItem) o2;
                if (localeLabel1.getLabel().equals(localeLabel2.getLabel()))
                    return 0;
                if (localeLabel1.getLabel().compareTo(localeLabel2.getLabel()) < 0)
                    return -1;
                return 1;
            }
        }

        //Sorted list
        java.util.Collections.sort(localesList, new LabelSort());
    }
    return localesList;
}

From source file:edu.ku.brc.specify.tasks.SystemSetupTask.java

/**
 * // w w  w.j  av a 2  s .c  o  m
 */
protected void doSchemaConfig(final Byte schemaType, final DBTableIdMgr tableMgr) {
    SpecifyUser spUser = AppContextMgr.getInstance().getClassObject(SpecifyUser.class);
    boolean ok = ((SpecifyAppContextMgr) AppContextMgr.getInstance()).checkToOverrideLogins(spUser.getName());
    if (!ok) {
        TaskMgr.getTask("Startup").requestContext();
        return;
    }

    UsageTracker.incrUsageCount("SS.SCHEMACFG");

    getStatusBar().setIndeterminate(SYSTEMSETUPTASK, true);
    getStatusBar().setText(getResourceString(getI18NKey("LOADING_LOCALES"))); //$NON-NLS-1$
    getStatusBar().repaint();

    SwingWorker workerThread = new SwingWorker() {
        @Override
        public Object construct() {
            Locale.getAvailableLocales(); // load all the locales
            return null;
        }

        @Override
        public void finished() {
            getStatusBar().setText(""); //$NON-NLS-1$
            getStatusBar().setProgressDone(SYSTEMSETUPTASK);

            JComponent videoBtn = CollapsableSepExtraCompFactory.getInstance().getComponent("Schema", "Config");
            SchemaToolsDlg dlg = new SchemaToolsDlg((Frame) getTopWindow(), schemaType, tableMgr);
            dlg.setExtraBtn(videoBtn);
            dlg.setVisible(true);
            if (!dlg.isCancelled()) {
                TaskMgr.getTask("Startup").requestContext();
            }
        }
    };

    // start the background task
    workerThread.start();
}

From source file:im.vector.VectorApp.java

/**
 * Provides the supported application locales list
 *
 * @param context the context//from  w  w w . ja  v a2s  . c  o m
 * @return the supported application locales list
 */
public static List<Locale> getApplicationLocales(Context context) {
    if (mApplicationLocales.isEmpty()) {

        Set<Pair<String, String>> knownLocalesSet = new HashSet<>();

        try {
            final Locale[] availableLocales = Locale.getAvailableLocales();

            for (Locale locale : availableLocales) {
                knownLocalesSet.add(new Pair<>(getString(context, locale, R.string.resouces_language),
                        getString(context, locale, R.string.resouces_country)));
            }
        } catch (Exception e) {
            Log.e(LOG_TAG, "## getApplicationLocales() : failed " + e.getMessage());
            knownLocalesSet.add(new Pair<>(context.getString(R.string.resouces_language),
                    context.getString(R.string.resouces_country)));
        }

        for (Pair<String, String> knownLocale : knownLocalesSet) {
            mApplicationLocales.add(new Locale(knownLocale.first, knownLocale.second));
        }
    }

    List<Locale> sortedLocalesList = new ArrayList<>(mApplicationLocales);

    // sort by human display names
    Collections.sort(sortedLocalesList, new Comparator<Locale>() {
        @Override
        public int compare(Locale lhs, Locale rhs) {
            return localeToLocalisedString(lhs).compareTo(localeToLocalisedString(rhs));
        }
    });

    return sortedLocalesList;
}

From source file:org.alfresco.repo.search.impl.lucene.AbstractLuceneQueryParser.java

@SuppressWarnings("unchecked")
protected Query getFieldQueryImpl(String field, String queryText, AnalysisMode analysisMode,
        LuceneFunction luceneFunction) throws ParseException {
    // Use the analyzer to get all the tokens, and then build a TermQuery,
    // PhraseQuery, or noth

    // TODO: Untokenised columns with functions require special handling

    if (luceneFunction != LuceneFunction.FIELD) {
        throw new UnsupportedOperationException(
                "Field queries are not supported on lucene functions (UPPER, LOWER, etc)");
    }//w w  w.  j av a 2s .c  o m

    // if the incoming string already has a language identifier we strip it iff and addit back on again

    String localePrefix = "";

    String toTokenise = queryText;

    if (queryText.startsWith("{")) {
        int position = queryText.indexOf("}");
        String language = queryText.substring(0, position + 1);
        Locale locale = new Locale(queryText.substring(1, position));
        String token = queryText.substring(position + 1);
        boolean found = false;
        if (!locale.toString().isEmpty()) {
            for (Locale current : Locale.getAvailableLocales()) {
                if (current.toString().equalsIgnoreCase(locale.toString())) {
                    found = true;
                    break;
                }
            }
        }
        if (found) {
            localePrefix = language;
            toTokenise = token;
        } else {
            toTokenise = token;
        }
    }

    String testText = toTokenise;
    boolean requiresMLTokenDuplication = false;
    String localeString = null;
    if (field.startsWith(PROPERTY_FIELD_PREFIX) && (localePrefix.length() == 0)) {
        if ((queryText.length() > 0) && (queryText.charAt(0) == '\u0000')) {
            int position = queryText.indexOf("\u0000", 1);
            testText = queryText.substring(position + 1);
            requiresMLTokenDuplication = true;
            localeString = queryText.substring(1, position);
        }
    }

    // find the positions of any escaped * and ? and ignore them

    Set<Integer> wildcardPoistions = getWildcardPositions(testText);

    TokenStream source;
    if ((localePrefix.length() == 0) || (wildcardPoistions.size() > 0)
            || (analysisMode == AnalysisMode.IDENTIFIER)) {
        source = getAnalyzer().tokenStream(field, new StringReader(toTokenise), analysisMode);
    } else {
        source = getAnalyzer().tokenStream(field, new StringReader(
                "\u0000" + localePrefix.substring(1, localePrefix.length() - 1) + "\u0000" + toTokenise),
                analysisMode);
        localePrefix = "";
    }

    ArrayList<org.apache.lucene.analysis.Token> list = new ArrayList<org.apache.lucene.analysis.Token>();
    org.apache.lucene.analysis.Token reusableToken = new org.apache.lucene.analysis.Token();
    org.apache.lucene.analysis.Token nextToken;
    int positionCount = 0;
    boolean severalTokensAtSamePosition = false;

    while (true) {
        try {
            nextToken = source.next(reusableToken);
        } catch (IOException e) {
            nextToken = null;
        }
        if (nextToken == null)
            break;
        list.add((org.apache.lucene.analysis.Token) nextToken.clone());
        if (nextToken.getPositionIncrement() != 0)
            positionCount += nextToken.getPositionIncrement();
        else
            severalTokensAtSamePosition = true;
    }
    try {
        source.close();
    } catch (IOException e) {
        // ignore
    }

    // add any alpha numeric wildcards that have been missed
    // Fixes most stop word and wild card issues

    for (int index = 0; index < testText.length(); index++) {
        char current = testText.charAt(index);
        if (((current == '*') || (current == '?')) && wildcardPoistions.contains(index)) {
            StringBuilder pre = new StringBuilder(10);
            if (index == 0) {
                // "*" and "?" at the start

                boolean found = false;
                for (int j = 0; j < list.size(); j++) {
                    org.apache.lucene.analysis.Token test = list.get(j);
                    if ((test.startOffset() <= 0) && (0 < test.endOffset())) {
                        found = true;
                        break;
                    }
                }
                if (!found && (testText.length() == 1)) {
                    // Add new token followed by * not given by the tokeniser
                    org.apache.lucene.analysis.Token newToken = new org.apache.lucene.analysis.Token(0, 0);
                    newToken.setTermBuffer("");
                    newToken.setType("ALPHANUM");
                    if (requiresMLTokenDuplication) {
                        Locale locale = I18NUtil.parseLocale(localeString);
                        MLAnalysisMode mlAnalysisMode = searchParameters.getMlAnalaysisMode() == null
                                ? defaultSearchMLAnalysisMode
                                : searchParameters.getMlAnalaysisMode();
                        MLTokenDuplicator duplicator = new MLTokenDuplicator(locale, mlAnalysisMode);
                        Iterator<org.apache.lucene.analysis.Token> it = duplicator.buildIterator(newToken);
                        if (it != null) {
                            int count = 0;
                            while (it.hasNext()) {
                                list.add(it.next());
                                count++;
                                if (count > 1) {
                                    severalTokensAtSamePosition = true;
                                }
                            }
                        }
                    }
                    // content
                    else {
                        list.add(newToken);
                    }
                }
            } else if (index > 0) {
                // Add * and ? back into any tokens from which it has been removed

                boolean tokenFound = false;
                for (int j = 0; j < list.size(); j++) {
                    org.apache.lucene.analysis.Token test = list.get(j);
                    if ((test.startOffset() <= index) && (index < test.endOffset())) {
                        if (requiresMLTokenDuplication) {
                            String termText = new String(test.termBuffer(), 0, test.termLength());
                            int position = termText.indexOf("}");
                            String language = termText.substring(0, position + 1);
                            String token = termText.substring(position + 1);
                            if (index >= test.startOffset() + token.length()) {
                                test.setTermBuffer(language + token + current);
                            }
                        } else {
                            if (index >= test.startOffset() + test.termLength()) {
                                test.setTermBuffer(test.term() + current);
                            }
                        }
                        tokenFound = true;
                        break;
                    }
                }

                if (!tokenFound) {
                    for (int i = index - 1; i >= 0; i--) {
                        char c = testText.charAt(i);
                        if (Character.isLetterOrDigit(c)) {
                            boolean found = false;
                            for (int j = 0; j < list.size(); j++) {
                                org.apache.lucene.analysis.Token test = list.get(j);
                                if ((test.startOffset() <= i) && (i < test.endOffset())) {
                                    found = true;
                                    break;
                                }
                            }
                            if (found) {
                                break;
                            } else {
                                pre.insert(0, c);
                            }
                        } else {
                            break;
                        }
                    }
                    if (pre.length() > 0) {
                        // Add new token followed by * not given by the tokeniser
                        org.apache.lucene.analysis.Token newToken = new org.apache.lucene.analysis.Token(
                                index - pre.length(), index);
                        newToken.setTermBuffer(pre.toString());
                        newToken.setType("ALPHANUM");
                        if (requiresMLTokenDuplication) {
                            Locale locale = I18NUtil.parseLocale(localeString);
                            MLAnalysisMode mlAnalysisMode = searchParameters.getMlAnalaysisMode() == null
                                    ? defaultSearchMLAnalysisMode
                                    : searchParameters.getMlAnalaysisMode();
                            MLTokenDuplicator duplicator = new MLTokenDuplicator(locale, mlAnalysisMode);
                            Iterator<org.apache.lucene.analysis.Token> it = duplicator.buildIterator(newToken);
                            if (it != null) {
                                int count = 0;
                                while (it.hasNext()) {
                                    list.add(it.next());
                                    count++;
                                    if (count > 1) {
                                        severalTokensAtSamePosition = true;
                                    }
                                }
                            }
                        }
                        // content
                        else {
                            list.add(newToken);
                        }
                    }
                }
            }

            StringBuilder post = new StringBuilder(10);
            if (index > 0) {
                for (int i = index + 1; i < testText.length(); i++) {
                    char c = testText.charAt(i);
                    if (Character.isLetterOrDigit(c)) {
                        boolean found = false;
                        for (int j = 0; j < list.size(); j++) {
                            org.apache.lucene.analysis.Token test = list.get(j);
                            if ((test.startOffset() <= i) && (i < test.endOffset())) {
                                found = true;
                                break;
                            }
                        }
                        if (found) {
                            break;
                        } else {
                            post.append(c);
                        }
                    } else {
                        break;
                    }
                }
                if (post.length() > 0) {
                    // Add new token followed by * not given by the tokeniser
                    org.apache.lucene.analysis.Token newToken = new org.apache.lucene.analysis.Token(index + 1,
                            index + 1 + post.length());
                    newToken.setTermBuffer(post.toString());
                    newToken.setType("ALPHANUM");
                    if (requiresMLTokenDuplication) {
                        Locale locale = I18NUtil.parseLocale(localeString);
                        MLAnalysisMode mlAnalysisMode = searchParameters.getMlAnalaysisMode() == null
                                ? defaultSearchMLAnalysisMode
                                : searchParameters.getMlAnalaysisMode();
                        MLTokenDuplicator duplicator = new MLTokenDuplicator(locale, mlAnalysisMode);
                        Iterator<org.apache.lucene.analysis.Token> it = duplicator.buildIterator(newToken);
                        if (it != null) {
                            int count = 0;
                            while (it.hasNext()) {
                                list.add(it.next());
                                count++;
                                if (count > 1) {
                                    severalTokensAtSamePosition = true;
                                }
                            }
                        }
                    }
                    // content
                    else {
                        list.add(newToken);
                    }
                }
            }

        }
    }

    Collections.sort(list, new Comparator<org.apache.lucene.analysis.Token>() {

        public int compare(Token o1, Token o2) {
            int dif = o1.startOffset() - o2.startOffset();
            if (dif != 0) {
                return dif;
            } else {
                return o2.getPositionIncrement() - o1.getPositionIncrement();
            }
        }
    });

    // Combined * and ? based strings - should redo the tokeniser

    // Build tokens by position

    LinkedList<LinkedList<org.apache.lucene.analysis.Token>> tokensByPosition = new LinkedList<LinkedList<org.apache.lucene.analysis.Token>>();
    LinkedList<org.apache.lucene.analysis.Token> currentList = null;
    for (org.apache.lucene.analysis.Token c : list) {
        if (c.getPositionIncrement() == 0) {
            if (currentList == null) {
                currentList = new LinkedList<org.apache.lucene.analysis.Token>();
                tokensByPosition.add(currentList);
            }
            currentList.add(c);
        } else {
            currentList = new LinkedList<org.apache.lucene.analysis.Token>();
            tokensByPosition.add(currentList);
            currentList.add(c);
        }
    }

    // Build all the token sequences and see which ones get strung together

    LinkedList<LinkedList<org.apache.lucene.analysis.Token>> allTokenSequences = new LinkedList<LinkedList<org.apache.lucene.analysis.Token>>();
    for (LinkedList<org.apache.lucene.analysis.Token> tokensAtPosition : tokensByPosition) {
        if (allTokenSequences.size() == 0) {
            for (org.apache.lucene.analysis.Token t : tokensAtPosition) {
                LinkedList<org.apache.lucene.analysis.Token> newEntry = new LinkedList<org.apache.lucene.analysis.Token>();
                newEntry.add(t);
                allTokenSequences.add(newEntry);
            }
        } else {
            LinkedList<LinkedList<org.apache.lucene.analysis.Token>> newAllTokeSequences = new LinkedList<LinkedList<org.apache.lucene.analysis.Token>>();

            FOR_FIRST_TOKEN_AT_POSITION_ONLY: for (org.apache.lucene.analysis.Token t : tokensAtPosition) {
                boolean tokenFoundSequence = false;
                for (LinkedList<org.apache.lucene.analysis.Token> tokenSequence : allTokenSequences) {
                    LinkedList<org.apache.lucene.analysis.Token> newEntry = new LinkedList<org.apache.lucene.analysis.Token>();
                    newEntry.addAll(tokenSequence);
                    if (newEntry.getLast().endOffset() <= t.startOffset()) {
                        newEntry.add(t);
                        tokenFoundSequence = true;
                    }
                    newAllTokeSequences.add(newEntry);
                }
                if (false == tokenFoundSequence) {
                    LinkedList<org.apache.lucene.analysis.Token> newEntry = new LinkedList<org.apache.lucene.analysis.Token>();
                    newEntry.add(t);
                    newAllTokeSequences.add(newEntry);
                }
                // Limit the max number of permutations we consider
                if (newAllTokeSequences.size() > 64) {
                    break FOR_FIRST_TOKEN_AT_POSITION_ONLY;
                }
            }
            allTokenSequences = newAllTokeSequences;
        }
    }

    // build the uniquie

    LinkedList<LinkedList<org.apache.lucene.analysis.Token>> fixedTokenSequences = new LinkedList<LinkedList<org.apache.lucene.analysis.Token>>();
    for (LinkedList<org.apache.lucene.analysis.Token> tokenSequence : allTokenSequences) {
        LinkedList<org.apache.lucene.analysis.Token> fixedTokenSequence = new LinkedList<org.apache.lucene.analysis.Token>();
        fixedTokenSequences.add(fixedTokenSequence);
        org.apache.lucene.analysis.Token replace = null;
        for (org.apache.lucene.analysis.Token c : tokenSequence) {
            if (replace == null) {
                StringBuilder prefix = new StringBuilder();
                for (int i = c.startOffset() - 1; i >= 0; i--) {
                    char test = testText.charAt(i);
                    if (((test == '*') || (test == '?')) && wildcardPoistions.contains(i)) {
                        prefix.insert(0, test);
                    } else {
                        break;
                    }
                }
                String pre = prefix.toString();
                if (requiresMLTokenDuplication) {
                    String termText = new String(c.termBuffer(), 0, c.termLength());
                    int position = termText.indexOf("}");
                    String language = termText.substring(0, position + 1);
                    String token = termText.substring(position + 1);
                    replace = new org.apache.lucene.analysis.Token(c.startOffset() - pre.length(),
                            c.endOffset());
                    replace.setTermBuffer(language + pre + token);
                    replace.setType(c.type());
                    replace.setPositionIncrement(c.getPositionIncrement());
                } else {
                    String termText = new String(c.termBuffer(), 0, c.termLength());
                    replace = new org.apache.lucene.analysis.Token(c.startOffset() - pre.length(),
                            c.endOffset());
                    replace.setTermBuffer(pre + termText);
                    replace.setType(c.type());
                    replace.setPositionIncrement(c.getPositionIncrement());
                }
            } else {
                StringBuilder prefix = new StringBuilder();
                StringBuilder postfix = new StringBuilder();
                StringBuilder builder = prefix;
                for (int i = c.startOffset() - 1; i >= replace.endOffset(); i--) {
                    char test = testText.charAt(i);
                    if (((test == '*') || (test == '?')) && wildcardPoistions.contains(i)) {
                        builder.insert(0, test);
                    } else {
                        builder = postfix;
                        postfix.setLength(0);
                    }
                }
                String pre = prefix.toString();
                String post = postfix.toString();

                // Does it bridge?
                if ((pre.length() > 0) && (replace.endOffset() + pre.length()) == c.startOffset()) {
                    String termText = new String(c.termBuffer(), 0, c.termLength());
                    if (requiresMLTokenDuplication) {
                        int position = termText.indexOf("}");
                        @SuppressWarnings("unused")
                        String language = termText.substring(0, position + 1);
                        String token = termText.substring(position + 1);
                        int oldPositionIncrement = replace.getPositionIncrement();
                        String replaceTermText = new String(replace.termBuffer(), 0, replace.termLength());
                        replace = new org.apache.lucene.analysis.Token(replace.startOffset(), c.endOffset());
                        replace.setTermBuffer(replaceTermText + pre + token);
                        replace.setType(replace.type());
                        replace.setPositionIncrement(oldPositionIncrement);
                    } else {
                        int oldPositionIncrement = replace.getPositionIncrement();
                        String replaceTermText = new String(replace.termBuffer(), 0, replace.termLength());
                        replace = new org.apache.lucene.analysis.Token(replace.startOffset(), c.endOffset());
                        replace.setTermBuffer(replaceTermText + pre + termText);
                        replace.setType(replace.type());
                        replace.setPositionIncrement(oldPositionIncrement);
                    }
                } else {
                    String termText = new String(c.termBuffer(), 0, c.termLength());
                    if (requiresMLTokenDuplication) {
                        int position = termText.indexOf("}");
                        String language = termText.substring(0, position + 1);
                        String token = termText.substring(position + 1);
                        String replaceTermText = new String(replace.termBuffer(), 0, replace.termLength());
                        org.apache.lucene.analysis.Token last = new org.apache.lucene.analysis.Token(
                                replace.startOffset(), replace.endOffset() + post.length());
                        last.setTermBuffer(replaceTermText + post);
                        last.setType(replace.type());
                        last.setPositionIncrement(replace.getPositionIncrement());
                        fixedTokenSequence.add(last);
                        replace = new org.apache.lucene.analysis.Token(c.startOffset() - pre.length(),
                                c.endOffset());
                        replace.setTermBuffer(language + pre + token);
                        replace.setType(c.type());
                        replace.setPositionIncrement(c.getPositionIncrement());
                    } else {
                        String replaceTermText = new String(replace.termBuffer(), 0, replace.termLength());
                        org.apache.lucene.analysis.Token last = new org.apache.lucene.analysis.Token(
                                replace.startOffset(), replace.endOffset() + post.length());
                        last.setTermBuffer(replaceTermText + post);
                        last.setType(replace.type());
                        last.setPositionIncrement(replace.getPositionIncrement());
                        fixedTokenSequence.add(last);
                        replace = new org.apache.lucene.analysis.Token(c.startOffset() - pre.length(),
                                c.endOffset());
                        replace.setTermBuffer(pre + termText);
                        replace.setType(c.type());
                        replace.setPositionIncrement(c.getPositionIncrement());
                    }
                }
            }
        }
        // finish last
        if (replace != null) {
            StringBuilder postfix = new StringBuilder();
            if ((replace.endOffset() >= 0) && (replace.endOffset() < testText.length())) {
                for (int i = replace.endOffset(); i < testText.length(); i++) {
                    char test = testText.charAt(i);
                    if (((test == '*') || (test == '?')) && wildcardPoistions.contains(i)) {
                        postfix.append(test);
                    } else {
                        break;
                    }
                }
            }
            String post = postfix.toString();
            int oldPositionIncrement = replace.getPositionIncrement();
            String replaceTermText = new String(replace.termBuffer(), 0, replace.termLength());
            replace = new org.apache.lucene.analysis.Token(replace.startOffset(),
                    replace.endOffset() + post.length());
            replace.setTermBuffer(replaceTermText + post);
            replace.setType(replace.type());
            replace.setPositionIncrement(oldPositionIncrement);
            fixedTokenSequence.add(replace);
        }
    }

    // rebuild fixed list

    ArrayList<org.apache.lucene.analysis.Token> fixed = new ArrayList<org.apache.lucene.analysis.Token>();
    for (LinkedList<org.apache.lucene.analysis.Token> tokenSequence : fixedTokenSequences) {
        for (org.apache.lucene.analysis.Token token : tokenSequence) {
            fixed.add(token);
        }
    }

    // reorder by start position and increment

    Collections.sort(fixed, new Comparator<org.apache.lucene.analysis.Token>() {

        public int compare(Token o1, Token o2) {
            int dif = o1.startOffset() - o2.startOffset();
            if (dif != 0) {
                return dif;
            } else {
                return o1.getPositionIncrement() - o2.getPositionIncrement();
            }
        }
    });

    // make sure we remove any tokens we have duplicated

    @SuppressWarnings("rawtypes")
    OrderedHashSet unique = new OrderedHashSet();
    unique.addAll(fixed);
    fixed = new ArrayList<org.apache.lucene.analysis.Token>(unique);

    list = fixed;

    // add any missing locales back to the tokens

    if (localePrefix.length() > 0) {
        for (int j = 0; j < list.size(); j++) {
            org.apache.lucene.analysis.Token currentToken = list.get(j);
            String termText = new String(currentToken.termBuffer(), 0, currentToken.termLength());
            currentToken.setTermBuffer(localePrefix + termText);
        }
    }

    if (list.size() == 0)
        return null;
    else if (list.size() == 1) {
        nextToken = list.get(0);
        String termText = new String(nextToken.termBuffer(), 0, nextToken.termLength());
        if (termText.contains("*") || termText.contains("?")) {
            return newWildcardQuery(
                    new Term(field, getLowercaseExpandedTerms() ? termText.toLowerCase() : termText));
        } else {
            return newTermQuery(new Term(field, termText));
        }
    } else {
        if (severalTokensAtSamePosition) {
            if (positionCount == 1) {
                // no phrase query:
                BooleanQuery q = newBooleanQuery(true);
                for (int i = 0; i < list.size(); i++) {
                    Query currentQuery;
                    nextToken = list.get(i);
                    String termText = new String(nextToken.termBuffer(), 0, nextToken.termLength());
                    if (termText.contains("*") || termText.contains("?")) {
                        currentQuery = newWildcardQuery(new Term(field,
                                getLowercaseExpandedTerms() ? termText.toLowerCase() : termText));
                    } else {
                        currentQuery = newTermQuery(new Term(field, termText));
                    }
                    q.add(currentQuery, BooleanClause.Occur.SHOULD);
                }
                return q;
            }
            // Consider if we can use a multi-phrase query (e.g for synonym use rather then WordDelimiterFilterFactory)
            else if (canUseMultiPhraseQuery(fixedTokenSequences)) {
                // phrase query:
                MultiPhraseQuery mpq = newMultiPhraseQuery();
                mpq.setSlop(internalSlop);
                ArrayList<Term> multiTerms = new ArrayList<Term>();
                int position = 0;
                for (int i = 0; i < list.size(); i++) {
                    nextToken = list.get(i);
                    String termText = new String(nextToken.termBuffer(), 0, nextToken.termLength());

                    Term term = new Term(field, termText);
                    if ((termText != null) && (termText.contains("*") || termText.contains("?"))) {
                        addWildcardTerms(multiTerms, term);
                    } else {
                        multiTerms.add(term);
                    }

                    if (nextToken.getPositionIncrement() > 0 && multiTerms.size() > 0) {
                        if (getEnablePositionIncrements()) {
                            mpq.add(multiTerms.toArray(new Term[0]), position);
                        } else {
                            mpq.add(multiTerms.toArray(new Term[0]));
                        }
                        checkTermCount(field, queryText, mpq);
                        multiTerms.clear();
                    }
                    position += nextToken.getPositionIncrement();

                }
                if (getEnablePositionIncrements()) {
                    if (multiTerms.size() > 0) {
                        mpq.add(multiTerms.toArray(new Term[0]), position);
                    }
                    //                        else
                    //                        {
                    //                            mpq.add(new Term[] { new Term(field, "\u0000") }, position);
                    //                        }
                } else {
                    if (multiTerms.size() > 0) {
                        mpq.add(multiTerms.toArray(new Term[0]));
                    }
                    //                        else
                    //                        {
                    //                            mpq.add(new Term[] { new Term(field, "\u0000") });
                    //                        }
                }
                checkTermCount(field, queryText, mpq);
                return mpq;

            }
            // Word delimiter factory and other odd things generate complex token patterns
            // Smart skip token  sequences with small tokens that generate toomany wildcards
            // Fall back to the larger pattern
            // e.g Site1* will not do (S ite 1*) or (Site 1*)  if 1* matches too much (S ite1*)  and (Site1*) will still be OK 
            // If we skip all (for just 1* in the input) this is still an issue.
            else {
                boolean skippedTokens = false;
                BooleanQuery q = newBooleanQuery(true);
                TOKEN_SEQUENCE: for (LinkedList<org.apache.lucene.analysis.Token> tokenSequence : fixedTokenSequences) {
                    // phrase query:
                    MultiPhraseQuery mpq = newMultiPhraseQuery();
                    mpq.setSlop(internalSlop);
                    int position = 0;
                    for (int i = 0; i < tokenSequence.size(); i++) {
                        nextToken = (org.apache.lucene.analysis.Token) tokenSequence.get(i);
                        String termText = new String(nextToken.termBuffer(), 0, nextToken.termLength());

                        Term term = new Term(field, termText);

                        if (getEnablePositionIncrements()) {
                            if ((termText != null) && (termText.contains("*") || termText.contains("?"))) {
                                mpq.add(getMatchingTerms(field, term), position);
                            } else {
                                mpq.add(new Term[] { term }, position);
                            }
                            if (exceedsTermCount(mpq)) {
                                // We could duplicate the token sequence without the failing wildcard expansion and try again ??
                                skippedTokens = true;
                                continue TOKEN_SEQUENCE;
                            }
                            if (nextToken.getPositionIncrement() > 0) {
                                position += nextToken.getPositionIncrement();
                            } else {
                                position++;
                            }

                        } else {
                            if ((termText != null) && (termText.contains("*") || termText.contains("?"))) {
                                mpq.add(getMatchingTerms(field, term));
                            } else {
                                mpq.add(term);
                            }
                            if (exceedsTermCount(mpq)) {
                                skippedTokens = true;
                                continue TOKEN_SEQUENCE;
                            }
                        }
                    }
                    q.add(mpq, BooleanClause.Occur.SHOULD);
                }
                if (skippedTokens && (q.clauses().size() == 0)) {
                    throw new LuceneQueryParserException(
                            "Query skipped all token sequences as wildcards generated too many clauses: "
                                    + field + " " + queryText);
                }
                return q;
            }
        } else {
            MultiPhraseQuery q = new MultiPhraseQuery();
            q.setSlop(internalSlop);
            int position = 0;
            for (int i = 0; i < list.size(); i++) {
                nextToken = list.get(i);
                String termText = new String(nextToken.termBuffer(), 0, nextToken.termLength());
                Term term = new Term(field, termText);
                if (getEnablePositionIncrements()) {
                    if ((termText != null) && (termText.contains("*") || termText.contains("?"))) {
                        q.add(getMatchingTerms(field, term), position);
                    } else {
                        q.add(new Term[] { term }, position);
                    }
                    checkTermCount(field, queryText, q);
                    if (nextToken.getPositionIncrement() > 0) {
                        position += nextToken.getPositionIncrement();
                    } else {
                        position++;
                    }
                } else {
                    if ((termText != null) && (termText.contains("*") || termText.contains("?"))) {
                        q.add(getMatchingTerms(field, term));
                    } else {
                        q.add(term);
                    }
                    checkTermCount(field, queryText, q);
                }
            }
            return q;
        }
    }
}

From source file:org.alfresco.solr.query.Solr4QueryParser.java

@SuppressWarnings("unchecked")
protected Query getFieldQueryImpl(String field, String queryText, AnalysisMode analysisMode,
        LuceneFunction luceneFunction) throws ParseException, IOException {
    // make sure the field exists or return a dummy query so we have no error ....ACE-3231
    SchemaField schemaField = schema.getFieldOrNull(field);
    boolean isNumeric = false;
    if (schemaField == null) {
        return new TermQuery(new Term("_dummy_", "_miss_"));
    } else {/* w w w. j a v a 2  s .  com*/
        isNumeric = (schemaField.getType().getNumericType() != null);
    }

    // Use the analyzer to get all the tokens, and then build a TermQuery,
    // PhraseQuery, or noth

    // TODO: Untokenised columns with functions require special handling

    if (luceneFunction != LuceneFunction.FIELD) {
        throw new UnsupportedOperationException(
                "Field queries are not supported on lucene functions (UPPER, LOWER, etc)");
    }

    // if the incoming string already has a language identifier we strip it iff and addit back on again

    String localePrefix = "";

    String toTokenise = queryText;

    if (queryText.startsWith("{")) {
        int position = queryText.indexOf("}");
        if (position > 0) {
            String language = queryText.substring(0, position + 1);
            Locale locale = new Locale(queryText.substring(1, position));
            String token = queryText.substring(position + 1);
            boolean found = false;
            for (Locale current : Locale.getAvailableLocales()) {
                if (current.toString().equalsIgnoreCase(locale.toString())) {
                    found = true;
                    break;
                }
            }
            if (found) {
                localePrefix = language;
                toTokenise = token;
            } else {
                //toTokenise = token;
            }
        }
    }

    String testText = toTokenise;
    boolean requiresMLTokenDuplication = false;
    String localeString = null;
    if (isPropertyField(field) && (localePrefix.length() == 0)) {
        if ((queryText.length() > 0) && (queryText.charAt(0) == '\u0000')) {
            int position = queryText.indexOf("\u0000", 1);
            testText = queryText.substring(position + 1);
            requiresMLTokenDuplication = true;
            localeString = queryText.substring(1, position);

        }
    }

    // find the positions of any escaped * and ? and ignore them

    Set<Integer> wildcardPoistions = getWildcardPositions(testText);

    TokenStream source = null;
    ArrayList<org.apache.lucene.analysis.Token> list = new ArrayList<org.apache.lucene.analysis.Token>();
    boolean severalTokensAtSamePosition = false;
    org.apache.lucene.analysis.Token nextToken;
    int positionCount = 0;

    try {
        org.apache.lucene.analysis.Token reusableToken = new org.apache.lucene.analysis.Token();

        source = getAnalyzer().tokenStream(field, new StringReader(toTokenise));
        source.reset();
        while (source.incrementToken()) {
            CharTermAttribute cta = source.getAttribute(CharTermAttribute.class);
            OffsetAttribute offsetAtt = source.getAttribute(OffsetAttribute.class);
            TypeAttribute typeAtt = null;
            if (source.hasAttribute(TypeAttribute.class)) {
                typeAtt = source.getAttribute(TypeAttribute.class);
            }
            PositionIncrementAttribute posIncAtt = null;
            if (source.hasAttribute(PositionIncrementAttribute.class)) {
                posIncAtt = source.getAttribute(PositionIncrementAttribute.class);
            }
            nextToken = new Token(cta.buffer(), 0, cta.length(), offsetAtt.startOffset(),
                    offsetAtt.endOffset());
            if (typeAtt != null) {
                nextToken.setType(typeAtt.type());
            }
            if (posIncAtt != null) {
                nextToken.setPositionIncrement(posIncAtt.getPositionIncrement());
            }

            list.add(nextToken);
            if (nextToken.getPositionIncrement() != 0)
                positionCount += nextToken.getPositionIncrement();
            else
                severalTokensAtSamePosition = true;
        }
    } catch (SolrException e) {
        // MNT-15336
        // Text against a numeric field should fail silently rather then tell you it is not possible.
        if (isNumeric && e.getMessage() != null && e.getMessage().startsWith("Invalid Number:")) {
            // Generate a query that does not match any document - rather than nothing
            return createNoMatchQuery();
        } else {
            throw e;
        }
    } finally {
        try {
            if (source != null) {
                source.close();
            }
        } catch (IOException e) {
            // ignore
        }
    }

    // add any alpha numeric wildcards that have been missed
    // Fixes most stop word and wild card issues

    for (int index = 0; index < testText.length(); index++) {
        char current = testText.charAt(index);
        if (((current == '*') || (current == '?')) && wildcardPoistions.contains(index)) {
            StringBuilder pre = new StringBuilder(10);
            if (index == 0) {
                // "*" and "?" at the start

                boolean found = false;
                for (int j = 0; j < list.size(); j++) {
                    org.apache.lucene.analysis.Token test = list.get(j);
                    if ((test.startOffset() <= 0) && (0 < test.endOffset())) {
                        found = true;
                        break;
                    }
                }
                if (!found && (list.size() == 0)) {
                    // Add new token followed by * not given by the tokeniser
                    org.apache.lucene.analysis.Token newToken = new org.apache.lucene.analysis.Token("", 0, 0);
                    newToken.setType("ALPHANUM");
                    if (requiresMLTokenDuplication) {
                        Locale locale = I18NUtil.parseLocale(localeString);
                        MLTokenDuplicator duplicator = new MLTokenDuplicator(locale,
                                MLAnalysisMode.EXACT_LANGUAGE);
                        Iterator<org.apache.lucene.analysis.Token> it = duplicator.buildIterator(newToken);
                        if (it != null) {
                            int count = 0;
                            while (it.hasNext()) {
                                list.add(it.next());
                                count++;
                                if (count > 1) {
                                    severalTokensAtSamePosition = true;
                                }
                            }
                        }
                    }
                    // content
                    else {
                        list.add(newToken);
                    }
                }
            } else if (index > 0) {
                // Add * and ? back into any tokens from which it has been removed

                boolean tokenFound = false;
                for (int j = 0; j < list.size(); j++) {
                    org.apache.lucene.analysis.Token test = list.get(j);
                    if ((test.startOffset() <= index) && (index < test.endOffset())) {
                        if (requiresMLTokenDuplication) {
                            String termText = test.toString();
                            int position = termText.indexOf("}");
                            String language = termText.substring(0, position + 1);
                            String token = termText.substring(position + 1);
                            if (index >= test.startOffset() + token.length()) {
                                test.setEmpty();
                                test.append(language + token + current);
                            }
                        } else {
                            if (index >= test.startOffset() + test.length()) {
                                test.setEmpty();
                                test.append(test.toString() + current);
                            }
                        }
                        tokenFound = true;
                        break;
                    }
                }

                if (!tokenFound) {
                    for (int i = index - 1; i >= 0; i--) {
                        char c = testText.charAt(i);
                        if (Character.isLetterOrDigit(c)) {
                            boolean found = false;
                            for (int j = 0; j < list.size(); j++) {
                                org.apache.lucene.analysis.Token test = list.get(j);
                                if ((test.startOffset() <= i) && (i < test.endOffset())) {
                                    found = true;
                                    break;
                                }
                            }
                            if (found) {
                                break;
                            } else {
                                pre.insert(0, c);
                            }
                        } else {
                            break;
                        }
                    }
                    if (pre.length() > 0) {
                        // Add new token followed by * not given by the tokeniser
                        org.apache.lucene.analysis.Token newToken = new org.apache.lucene.analysis.Token(
                                pre.toString(), index - pre.length(), index);
                        newToken.setType("ALPHANUM");
                        if (requiresMLTokenDuplication) {
                            Locale locale = I18NUtil.parseLocale(localeString);
                            MLTokenDuplicator duplicator = new MLTokenDuplicator(locale,
                                    MLAnalysisMode.EXACT_LANGUAGE);
                            Iterator<org.apache.lucene.analysis.Token> it = duplicator.buildIterator(newToken);
                            if (it != null) {
                                int count = 0;
                                while (it.hasNext()) {
                                    list.add(it.next());
                                    count++;
                                    if (count > 1) {
                                        severalTokensAtSamePosition = true;
                                    }
                                }
                            }
                        }
                        // content
                        else {
                            list.add(newToken);
                        }
                    }
                }
            }

            StringBuilder post = new StringBuilder(10);
            if (index > 0) {
                for (int i = index + 1; i < testText.length(); i++) {
                    char c = testText.charAt(i);
                    if (Character.isLetterOrDigit(c)) {
                        boolean found = false;
                        for (int j = 0; j < list.size(); j++) {
                            org.apache.lucene.analysis.Token test = list.get(j);
                            if ((test.startOffset() <= i) && (i < test.endOffset())) {
                                found = true;
                                break;
                            }
                        }
                        if (found) {
                            break;
                        } else {
                            post.append(c);
                        }
                    } else {
                        break;
                    }
                }
                if (post.length() > 0) {
                    // Add new token followed by * not given by the tokeniser
                    org.apache.lucene.analysis.Token newToken = new org.apache.lucene.analysis.Token(
                            post.toString(), index + 1, index + 1 + post.length());
                    newToken.setType("ALPHANUM");
                    if (requiresMLTokenDuplication) {
                        Locale locale = I18NUtil.parseLocale(localeString);
                        MLTokenDuplicator duplicator = new MLTokenDuplicator(locale,
                                MLAnalysisMode.EXACT_LANGUAGE);
                        Iterator<org.apache.lucene.analysis.Token> it = duplicator.buildIterator(newToken);
                        if (it != null) {
                            int count = 0;
                            while (it.hasNext()) {
                                list.add(it.next());
                                count++;
                                if (count > 1) {
                                    severalTokensAtSamePosition = true;
                                }
                            }
                        }
                    }
                    // content
                    else {
                        list.add(newToken);
                    }
                }
            }

        }
    }

    // Put in real position increments as we treat them correctly

    int curentIncrement = -1;
    for (org.apache.lucene.analysis.Token c : list) {
        if (curentIncrement == -1) {
            curentIncrement = c.getPositionIncrement();
        } else if (c.getPositionIncrement() > 0) {
            curentIncrement = c.getPositionIncrement();
        } else {
            c.setPositionIncrement(curentIncrement);
        }
    }

    // Remove small bits already covered in larger fragments 
    list = getNonContained(list);

    Collections.sort(list, new Comparator<org.apache.lucene.analysis.Token>() {

        public int compare(Token o1, Token o2) {
            int dif = o1.startOffset() - o2.startOffset();
            return dif;

        }
    });

    // Combined * and ? based strings - should redo the tokeniser

    // Build tokens by position

    LinkedList<LinkedList<org.apache.lucene.analysis.Token>> tokensByPosition = new LinkedList<LinkedList<org.apache.lucene.analysis.Token>>();
    LinkedList<org.apache.lucene.analysis.Token> currentList = null;
    int lastStart = 0;
    for (org.apache.lucene.analysis.Token c : list) {
        if (c.startOffset() == lastStart) {
            if (currentList == null) {
                currentList = new LinkedList<org.apache.lucene.analysis.Token>();
                tokensByPosition.add(currentList);
            }
            currentList.add(c);
        } else {
            currentList = new LinkedList<org.apache.lucene.analysis.Token>();
            tokensByPosition.add(currentList);
            currentList.add(c);
        }
        lastStart = c.startOffset();
    }

    // Build all the token sequences and see which ones get strung together

    OrderedHashSet<LinkedList<org.apache.lucene.analysis.Token>> allTokenSequencesSet = new OrderedHashSet<LinkedList<org.apache.lucene.analysis.Token>>();
    for (LinkedList<org.apache.lucene.analysis.Token> tokensAtPosition : tokensByPosition) {
        OrderedHashSet<LinkedList<org.apache.lucene.analysis.Token>> positionalSynonymSequencesSet = new OrderedHashSet<LinkedList<org.apache.lucene.analysis.Token>>();

        OrderedHashSet<LinkedList<org.apache.lucene.analysis.Token>> newAllTokenSequencesSet = new OrderedHashSet<LinkedList<org.apache.lucene.analysis.Token>>();

        FOR_FIRST_TOKEN_AT_POSITION_ONLY: for (org.apache.lucene.analysis.Token t : tokensAtPosition) {
            org.apache.lucene.analysis.Token replace = new org.apache.lucene.analysis.Token(t, t.startOffset(),
                    t.endOffset());
            replace.setType(t.type());
            replace.setPositionIncrement(t.getPositionIncrement());

            boolean tokenFoundSequence = false;
            for (LinkedList<org.apache.lucene.analysis.Token> tokenSequence : allTokenSequencesSet) {
                LinkedList<org.apache.lucene.analysis.Token> newEntry = new LinkedList<org.apache.lucene.analysis.Token>();
                newEntry.addAll(tokenSequence);
                if ((newEntry.getLast().endOffset() == replace.endOffset())
                        && replace.type().equals(SynonymFilter.TYPE_SYNONYM)) {
                    if ((newEntry.getLast().startOffset() == replace.startOffset())
                            && newEntry.getLast().type().equals(SynonymFilter.TYPE_SYNONYM)) {
                        positionalSynonymSequencesSet.add(tokenSequence);
                        newEntry.add(replace);
                        tokenFoundSequence = true;
                    } else if (newEntry.getLast().type().equals(CommonGramsFilter.GRAM_TYPE)) {
                        if (newEntry.toString().endsWith(replace.toString())) {
                            // already in the gram
                            positionalSynonymSequencesSet.add(tokenSequence);
                            tokenFoundSequence = true;
                        } else {
                            // need to replace the synonym in the current gram
                            tokenFoundSequence = true;
                            StringBuffer old = new StringBuffer(newEntry.getLast().toString());
                            old.replace(replace.startOffset() - newEntry.getLast().startOffset(),
                                    replace.endOffset() - newEntry.getLast().startOffset(), replace.toString());
                            Token newToken = new org.apache.lucene.analysis.Token(old.toString(),
                                    newEntry.getLast().startOffset(), newEntry.getLast().endOffset());
                            newEntry.removeLast();
                            newEntry.add(newToken);
                        }
                    }
                } else if ((newEntry.getLast().startOffset() < replace.startOffset())
                        && (newEntry.getLast().endOffset() < replace.endOffset())) {
                    if (newEntry.getLast().type().equals(SynonymFilter.TYPE_SYNONYM)
                            && replace.type().equals(SynonymFilter.TYPE_SYNONYM)) {
                        positionalSynonymSequencesSet.add(tokenSequence);
                    }
                    newEntry.add(replace);
                    tokenFoundSequence = true;
                }
                newAllTokenSequencesSet.add(newEntry);
            }
            if (false == tokenFoundSequence) {
                for (LinkedList<org.apache.lucene.analysis.Token> tokenSequence : newAllTokenSequencesSet) {
                    LinkedList<org.apache.lucene.analysis.Token> newEntry = new LinkedList<org.apache.lucene.analysis.Token>();
                    newEntry.addAll(tokenSequence);
                    if ((newEntry.getLast().endOffset() == replace.endOffset())
                            && replace.type().equals(SynonymFilter.TYPE_SYNONYM)) {
                        if ((newEntry.getLast().startOffset() == replace.startOffset())
                                && newEntry.getLast().type().equals(SynonymFilter.TYPE_SYNONYM)) {
                            positionalSynonymSequencesSet.add(tokenSequence);
                            newEntry.add(replace);
                            tokenFoundSequence = true;
                        } else if (newEntry.getLast().type().equals(CommonGramsFilter.GRAM_TYPE)) {
                            if (newEntry.toString().endsWith(replace.toString())) {
                                // already in the gram
                                positionalSynonymSequencesSet.add(tokenSequence);
                                tokenFoundSequence = true;
                            } else {
                                // need to replace the synonym in the current gram
                                tokenFoundSequence = true;
                                StringBuffer old = new StringBuffer(newEntry.getLast().toString());
                                old.replace(replace.startOffset() - newEntry.getLast().startOffset(),
                                        replace.endOffset() - newEntry.getLast().startOffset(),
                                        replace.toString());
                                Token newToken = new org.apache.lucene.analysis.Token(old.toString(),
                                        newEntry.getLast().startOffset(), newEntry.getLast().endOffset());
                                newEntry.removeLast();
                                newEntry.add(newToken);
                                positionalSynonymSequencesSet.add(newEntry);
                            }
                        }
                    } else if ((newEntry.getLast().startOffset() < replace.startOffset())
                            && (newEntry.getLast().endOffset() < replace.endOffset())) {
                        if (newEntry.getLast().type().equals(SynonymFilter.TYPE_SYNONYM)
                                && replace.type().equals(SynonymFilter.TYPE_SYNONYM)) {
                            positionalSynonymSequencesSet.add(tokenSequence);
                            newEntry.add(replace);
                            tokenFoundSequence = true;
                        }
                    }
                }
            }
            if (false == tokenFoundSequence) {
                LinkedList<org.apache.lucene.analysis.Token> newEntry = new LinkedList<org.apache.lucene.analysis.Token>();
                newEntry.add(replace);
                newAllTokenSequencesSet.add(newEntry);
            }
            // Limit the max number of permutations we consider
            if (newAllTokenSequencesSet.size() > 64) {
                break FOR_FIRST_TOKEN_AT_POSITION_ONLY;
            }
        }
        allTokenSequencesSet = newAllTokenSequencesSet;
        allTokenSequencesSet.addAll(positionalSynonymSequencesSet);

    }

    LinkedList<LinkedList<org.apache.lucene.analysis.Token>> allTokenSequences = new LinkedList<LinkedList<org.apache.lucene.analysis.Token>>(
            allTokenSequencesSet);

    // build the unique

    LinkedList<LinkedList<org.apache.lucene.analysis.Token>> fixedTokenSequences = new LinkedList<LinkedList<org.apache.lucene.analysis.Token>>();
    for (LinkedList<org.apache.lucene.analysis.Token> tokenSequence : allTokenSequences) {
        LinkedList<org.apache.lucene.analysis.Token> fixedTokenSequence = new LinkedList<org.apache.lucene.analysis.Token>();
        fixedTokenSequences.add(fixedTokenSequence);
        org.apache.lucene.analysis.Token replace = null;
        for (org.apache.lucene.analysis.Token c : tokenSequence) {
            if (replace == null) {
                StringBuilder prefix = new StringBuilder();
                for (int i = c.startOffset() - 1; i >= 0; i--) {
                    char test = testText.charAt(i);
                    if (((test == '*') || (test == '?')) && wildcardPoistions.contains(i)) {
                        prefix.insert(0, test);
                    } else {
                        break;
                    }
                }
                String pre = prefix.toString();
                if (requiresMLTokenDuplication) {
                    String termText = c.toString();
                    int position = termText.indexOf("}");
                    String language = termText.substring(0, position + 1);
                    String token = termText.substring(position + 1);
                    replace = new org.apache.lucene.analysis.Token(language + pre + token,
                            c.startOffset() - pre.length(), c.endOffset());
                    replace.setType(c.type());
                    replace.setPositionIncrement(c.getPositionIncrement());
                } else {
                    String termText = c.toString();
                    replace = new org.apache.lucene.analysis.Token(pre + termText,
                            c.startOffset() - pre.length(), c.endOffset());
                    replace.setType(c.type());
                    replace.setPositionIncrement(c.getPositionIncrement());
                }
            } else {
                StringBuilder prefix = new StringBuilder();
                StringBuilder postfix = new StringBuilder();
                StringBuilder builder = prefix;
                for (int i = c.startOffset() - 1; i >= replace.endOffset(); i--) {
                    char test = testText.charAt(i);
                    if (((test == '*') || (test == '?')) && wildcardPoistions.contains(i)) {
                        builder.insert(0, test);
                    } else {
                        builder = postfix;
                        postfix.setLength(0);
                    }
                }
                String pre = prefix.toString();
                String post = postfix.toString();

                // Does it bridge?
                if ((pre.length() > 0) && (replace.endOffset() + pre.length()) == c.startOffset()) {
                    String termText = c.toString();
                    if (requiresMLTokenDuplication) {
                        int position = termText.indexOf("}");
                        @SuppressWarnings("unused")
                        String language = termText.substring(0, position + 1);
                        String token = termText.substring(position + 1);
                        int oldPositionIncrement = replace.getPositionIncrement();
                        String replaceTermText = replace.toString();
                        replace = new org.apache.lucene.analysis.Token(replaceTermText + pre + token,
                                replace.startOffset(), c.endOffset());
                        replace.setType(replace.type());
                        replace.setPositionIncrement(oldPositionIncrement);
                    } else {
                        int oldPositionIncrement = replace.getPositionIncrement();
                        String replaceTermText = replace.toString();
                        replace = new org.apache.lucene.analysis.Token(replaceTermText + pre + termText,
                                replace.startOffset(), c.endOffset());
                        replace.setType(replace.type());
                        replace.setPositionIncrement(oldPositionIncrement);
                    }
                } else {
                    String termText = c.toString();
                    if (requiresMLTokenDuplication) {
                        int position = termText.indexOf("}");
                        String language = termText.substring(0, position + 1);
                        String token = termText.substring(position + 1);
                        String replaceTermText = replace.toString();
                        org.apache.lucene.analysis.Token last = new org.apache.lucene.analysis.Token(
                                replaceTermText + post, replace.startOffset(),
                                replace.endOffset() + post.length());
                        last.setType(replace.type());
                        last.setPositionIncrement(replace.getPositionIncrement());
                        fixedTokenSequence.add(last);
                        replace = new org.apache.lucene.analysis.Token(language + pre + token,
                                c.startOffset() - pre.length(), c.endOffset());
                        replace.setType(c.type());
                        replace.setPositionIncrement(c.getPositionIncrement());
                    } else {
                        String replaceTermText = replace.toString();
                        org.apache.lucene.analysis.Token last = new org.apache.lucene.analysis.Token(
                                replaceTermText + post, replace.startOffset(),
                                replace.endOffset() + post.length());
                        last.setType(replace.type());
                        last.setPositionIncrement(replace.getPositionIncrement());
                        fixedTokenSequence.add(last);
                        replace = new org.apache.lucene.analysis.Token(pre + termText,
                                c.startOffset() - pre.length(), c.endOffset());
                        replace.setType(c.type());
                        replace.setPositionIncrement(c.getPositionIncrement());
                    }
                }
            }
        }
        // finish last
        if (replace != null) {
            StringBuilder postfix = new StringBuilder();
            if ((replace.endOffset() >= 0) && (replace.endOffset() < testText.length())) {
                for (int i = replace.endOffset(); i < testText.length(); i++) {
                    char test = testText.charAt(i);
                    if (((test == '*') || (test == '?')) && wildcardPoistions.contains(i)) {
                        postfix.append(test);
                    } else {
                        break;
                    }
                }
            }
            String post = postfix.toString();
            int oldPositionIncrement = replace.getPositionIncrement();
            String replaceTermText = replace.toString();
            replace = new org.apache.lucene.analysis.Token(replaceTermText + post, replace.startOffset(),
                    replace.endOffset() + post.length());
            replace.setType(replace.type());
            replace.setPositionIncrement(oldPositionIncrement);
            fixedTokenSequence.add(replace);
        }
    }

    // rebuild fixed list

    ArrayList<org.apache.lucene.analysis.Token> fixed = new ArrayList<org.apache.lucene.analysis.Token>();
    for (LinkedList<org.apache.lucene.analysis.Token> tokenSequence : fixedTokenSequences) {
        for (org.apache.lucene.analysis.Token token : tokenSequence) {
            fixed.add(token);
        }
    }

    // reorder by start position and increment

    Collections.sort(fixed, new Comparator<org.apache.lucene.analysis.Token>() {

        public int compare(Token o1, Token o2) {
            int dif = o1.startOffset() - o2.startOffset();
            if (dif != 0) {
                return dif;
            } else {
                return o1.getPositionIncrement() - o2.getPositionIncrement();
            }
        }
    });

    // make sure we remove any tokens we have duplicated

    @SuppressWarnings("rawtypes")
    OrderedHashSet unique = new OrderedHashSet();
    unique.addAll(fixed);
    fixed = new ArrayList<org.apache.lucene.analysis.Token>(unique);

    list = fixed;

    // add any missing locales back to the tokens

    if (localePrefix.length() > 0) {
        for (int j = 0; j < list.size(); j++) {
            org.apache.lucene.analysis.Token currentToken = list.get(j);
            String termText = currentToken.toString();
            currentToken.setEmpty();
            currentToken.append(localePrefix + termText);
        }
    }

    SchemaField sf = schema.getField(field);
    TokenizerChain tokenizerChain = (sf.getType().getQueryAnalyzer() instanceof TokenizerChain)
            ? ((TokenizerChain) sf.getType().getQueryAnalyzer())
            : null;
    boolean isShingled = false;
    if (tokenizerChain != null) {
        for (TokenFilterFactory factory : tokenizerChain.getTokenFilterFactories()) {
            if (factory instanceof ShingleFilterFactory) {
                isShingled = true;
                break;
            }
        }
    }
    AlfrescoAnalyzerWrapper analyzerWrapper = (sf.getType()
            .getQueryAnalyzer() instanceof AlfrescoAnalyzerWrapper)
                    ? ((AlfrescoAnalyzerWrapper) sf.getType().getQueryAnalyzer())
                    : null;
    if (analyzerWrapper != null) {
        // assume if there are no term positions it is shingled ....
        isShingled = true;
    }

    boolean forceConjuncion = rerankPhase == RerankPhase.QUERY_PHASE;

    if (list.size() == 0)
        return null;
    else if (list.size() == 1) {
        nextToken = list.get(0);
        String termText = nextToken.toString();
        if (!isNumeric && (termText.contains("*") || termText.contains("?"))) {
            return newWildcardQuery(new Term(field, termText));
        } else {
            return newTermQuery(new Term(field, termText));
        }
    } else {
        if (severalTokensAtSamePosition) {
            if (positionCount == 1) {
                // no phrase query:
                BooleanQuery q = newBooleanQuery(true);
                for (int i = 0; i < list.size(); i++) {
                    Query currentQuery;
                    nextToken = list.get(i);
                    String termText = nextToken.toString();
                    if (termText.contains("*") || termText.contains("?")) {
                        currentQuery = newWildcardQuery(new Term(field, termText));
                    } else {
                        currentQuery = newTermQuery(new Term(field, termText));
                    }
                    q.add(currentQuery, BooleanClause.Occur.SHOULD);
                }
                return q;
            } else if (forceConjuncion) {
                BooleanQuery or = new BooleanQuery();

                for (LinkedList<org.apache.lucene.analysis.Token> tokenSequence : fixedTokenSequences) {
                    BooleanQuery and = new BooleanQuery();
                    for (int i = 0; i < tokenSequence.size(); i++) {
                        nextToken = (org.apache.lucene.analysis.Token) tokenSequence.get(i);
                        String termText = nextToken.toString();

                        Term term = new Term(field, termText);
                        if ((termText != null) && (termText.contains("*") || termText.contains("?"))) {
                            org.apache.lucene.search.WildcardQuery wildQuery = new org.apache.lucene.search.WildcardQuery(
                                    term);
                            and.add(wildQuery, Occur.MUST);
                        } else {
                            TermQuery termQuery = new TermQuery(term);
                            and.add(termQuery, Occur.MUST);
                        }
                    }
                    if (and.clauses().size() > 0) {
                        or.add(and, Occur.SHOULD);
                    }
                }
                return or;
            }
            // shingle
            else if (sf.omitPositions() && isShingled) {

                ArrayList<org.apache.lucene.analysis.Token> nonContained = getNonContained(list);
                Query currentQuery;

                BooleanQuery weakPhrase = new BooleanQuery();
                for (org.apache.lucene.analysis.Token shingleToken : nonContained) {
                    String termText = shingleToken.toString();
                    Term term = new Term(field, termText);

                    if ((termText != null) && (termText.contains("*") || termText.contains("?"))) {
                        currentQuery = new org.apache.lucene.search.WildcardQuery(term);
                    } else {
                        currentQuery = new TermQuery(term);
                    }
                    weakPhrase.add(currentQuery, Occur.MUST);
                }

                return weakPhrase;

            }
            // Consider if we can use a multi-phrase query (e.g for synonym use rather then WordDelimiterFilterFactory)
            else if (canUseMultiPhraseQuery(fixedTokenSequences)) {
                // phrase query:
                MultiPhraseQuery mpq = newMultiPhraseQuery();
                mpq.setSlop(internalSlop);
                ArrayList<Term> multiTerms = new ArrayList<Term>();
                int position = 0;
                for (int i = 0; i < list.size(); i++) {
                    nextToken = list.get(i);
                    String termText = nextToken.toString();

                    Term term = new Term(field, termText);
                    if ((termText != null) && (termText.contains("*") || termText.contains("?"))) {
                        throw new IllegalStateException("Wildcards are not allowed in multi phrase anymore");
                    } else {
                        multiTerms.add(term);
                    }

                    if (nextToken.getPositionIncrement() > 0 && multiTerms.size() > 0) {
                        if (getEnablePositionIncrements()) {
                            mpq.add(multiTerms.toArray(new Term[0]), position);
                        } else {
                            mpq.add(multiTerms.toArray(new Term[0]));
                        }
                        checkTermCount(field, queryText, mpq);
                        multiTerms.clear();
                    }
                    position += nextToken.getPositionIncrement();

                }
                if (getEnablePositionIncrements()) {
                    if (multiTerms.size() > 0) {
                        mpq.add(multiTerms.toArray(new Term[0]), position);
                    }
                    //                        else
                    //                        {
                    //                            mpq.add(new Term[] { new Term(field, "\u0000") }, position);
                    //                        }
                } else {
                    if (multiTerms.size() > 0) {
                        mpq.add(multiTerms.toArray(new Term[0]));
                    }
                    //                        else
                    //                        {
                    //                            mpq.add(new Term[] { new Term(field, "\u0000") });
                    //                        }
                }
                checkTermCount(field, queryText, mpq);
                return mpq;

            }
            // Word delimiter factory and other odd things generate complex token patterns
            // Smart skip token  sequences with small tokens that generate toomany wildcards
            // Fall back to the larger pattern
            // e.g Site1* will not do (S ite 1*) or (Site 1*)  if 1* matches too much (S ite1*)  and (Site1*) will still be OK 
            // If we skip all (for just 1* in the input) this is still an issue.
            else {

                return generateSpanOrQuery(field, fixedTokenSequences);

            }
        } else {
            if (forceConjuncion) {
                BooleanQuery or = new BooleanQuery();

                for (LinkedList<org.apache.lucene.analysis.Token> tokenSequence : fixedTokenSequences) {
                    BooleanQuery and = new BooleanQuery();
                    for (int i = 0; i < tokenSequence.size(); i++) {
                        nextToken = (org.apache.lucene.analysis.Token) tokenSequence.get(i);
                        String termText = nextToken.toString();

                        Term term = new Term(field, termText);
                        if ((termText != null) && (termText.contains("*") || termText.contains("?"))) {
                            org.apache.lucene.search.WildcardQuery wildQuery = new org.apache.lucene.search.WildcardQuery(
                                    term);
                            and.add(wildQuery, Occur.MUST);
                        } else {
                            TermQuery termQuery = new TermQuery(term);
                            and.add(termQuery, Occur.MUST);
                        }
                    }
                    if (and.clauses().size() > 0) {
                        or.add(and, Occur.SHOULD);
                    }
                }
                return or;
            } else {
                SpanQuery spanQuery = null;
                SpanOrQuery atSamePosition = new SpanOrQuery();
                int gap = 0;
                for (int i = 0; i < list.size(); i++) {
                    nextToken = list.get(i);
                    String termText = nextToken.toString();
                    Term term = new Term(field, termText);
                    if (getEnablePositionIncrements()) {
                        SpanQuery nextSpanQuery;
                        if ((termText != null) && (termText.contains("*") || termText.contains("?"))) {
                            org.apache.lucene.search.WildcardQuery wildQuery = new org.apache.lucene.search.WildcardQuery(
                                    term);
                            SpanMultiTermQueryWrapper wrapper = new SpanMultiTermQueryWrapper<>(wildQuery);
                            wrapper.setRewriteMethod(
                                    new TopTermsSpanBooleanQueryRewrite(topTermSpanRewriteLimit));
                            nextSpanQuery = wrapper;
                        } else {
                            nextSpanQuery = new SpanTermQuery(term);
                        }
                        if (gap == 0) {
                            atSamePosition.addClause(nextSpanQuery);
                        } else {
                            if (atSamePosition.getClauses().length == 0) {
                                if (spanQuery == null) {
                                    spanQuery = nextSpanQuery;
                                } else {
                                    spanQuery = new SpanNearQuery(new SpanQuery[] { spanQuery, nextSpanQuery },
                                            (gap - 1) + internalSlop, internalSlop < 2);
                                }
                                atSamePosition = new SpanOrQuery();
                            } else if (atSamePosition.getClauses().length == 1) {
                                if (spanQuery == null) {
                                    spanQuery = atSamePosition.getClauses()[0];
                                } else {
                                    spanQuery = new SpanNearQuery(
                                            new SpanQuery[] { spanQuery, atSamePosition.getClauses()[0] },
                                            (gap - 1) + internalSlop, internalSlop < 2);
                                }
                                atSamePosition = new SpanOrQuery();
                                atSamePosition.addClause(nextSpanQuery);
                            } else {
                                if (spanQuery == null) {
                                    spanQuery = atSamePosition;
                                } else {
                                    spanQuery = new SpanNearQuery(new SpanQuery[] { spanQuery, atSamePosition },
                                            (gap - 1) + internalSlop, internalSlop < 2);
                                }
                                atSamePosition = new SpanOrQuery();
                                atSamePosition.addClause(nextSpanQuery);
                            }
                        }
                        gap = nextToken.getPositionIncrement();
                    } else {
                        SpanQuery nextSpanQuery;
                        if ((termText != null) && (termText.contains("*") || termText.contains("?"))) {
                            org.apache.lucene.search.WildcardQuery wildQuery = new org.apache.lucene.search.WildcardQuery(
                                    term);
                            SpanMultiTermQueryWrapper wrapper = new SpanMultiTermQueryWrapper<>(wildQuery);
                            wrapper.setRewriteMethod(
                                    new TopTermsSpanBooleanQueryRewrite(topTermSpanRewriteLimit));
                            nextSpanQuery = wrapper;
                        } else {
                            nextSpanQuery = new SpanTermQuery(term);
                        }
                        if (spanQuery == null) {
                            spanQuery = new SpanOrQuery();
                            ((SpanOrQuery) spanQuery).addClause(nextSpanQuery);
                        } else {
                            ((SpanOrQuery) spanQuery).addClause(nextSpanQuery);
                        }
                    }
                }
                if (atSamePosition.getClauses().length == 0) {
                    return spanQuery;
                } else if (atSamePosition.getClauses().length == 1) {
                    if (spanQuery == null) {
                        spanQuery = atSamePosition.getClauses()[0];
                    } else {
                        spanQuery = new SpanNearQuery(
                                new SpanQuery[] { spanQuery, atSamePosition.getClauses()[0] },
                                (gap - 1) + internalSlop, internalSlop < 2);
                    }
                    return spanQuery;
                } else {
                    if (spanQuery == null) {
                        spanQuery = atSamePosition;
                    } else {
                        spanQuery = new SpanNearQuery(new SpanQuery[] { spanQuery, atSamePosition },
                                (gap - 1) + internalSlop, internalSlop < 2);
                    }
                    return spanQuery;
                }
            }
        }
    }
}

From source file:org.apache.flex.compiler.config.Configuration.java

/**
 * Configures the LocalizationManager's locale, which is used when reporting
 * compile time errors, warnings, and info.
 * //from   ww  w.  jav a  2  s . c  o m
 * @param toolsLocale A locale in Java format. For example, "en" or "ja_JP".
 * @throws ConfigurationException When the specified toolsLocale is not
 * available a ToolsLocaleNotAvailable error is reported.
 */
@Config
@Mapping("tools-locale")
public void setToolsLocale(ConfigurationValue cv, String toolsLocale) throws ConfigurationException {
    Locale[] locales = Locale.getAvailableLocales();

    for (int i = 0; i < locales.length; i++) {
        if (locales[i].toString().equals(toolsLocale)) {
            this.toolsLocale = locales[i];

            LocalizationManager.get().setLocale(locales[i]);
            return;
        }
    }

    throw new ConfigurationException.ToolsLocaleNotAvailable(cv.getVar(), cv.getSource(), cv.getLine());
}

From source file:edu.ku.brc.specify.tools.StrLocalizerApp.java

/**
 * @param hideExisting/*  w  w w  .  j a  v  a2s.com*/
 * @return
 */
private Locale doChooseLangLocale(final boolean hideExisting) {

    HashSet<String> existingLocs = new HashSet<String>();
    if (hideExisting) {
        for (String nm : rootDir.list()) {
            if (!nm.startsWith(".")) {
                existingLocs.add(nm);
            }
        }
    }

    Vector<Locale> locales = new Vector<Locale>();
    for (Locale l : Locale.getAvailableLocales()) {
        if (!hideExisting || !existingLocs.contains(getFullLang(l))) {
            locales.add(l);
        }
    }
    Collections.sort(locales, new Comparator<Locale>() {
        public int compare(Locale o1, Locale o2) {
            return o1.getDisplayName().compareTo(o2.getDisplayName());
        }
    });

    Vector<String> localeNames = new Vector<String>();
    for (Locale l : locales) {
        localeNames.add(l.getDisplayName());
    }

    ToggleButtonChooserDlg<String> chooser = new ToggleButtonChooserDlg<String>((Frame) null, "CHOOSE_LOCALE",
            localeNames, ToggleButtonChooserPanel.Type.RadioButton);
    chooser.setUseScrollPane(true);
    chooser.setVisible(true);
    if (!chooser.isCancelled()) {
        return locales.get(chooser.getSelectedIndex());
    }
    return null;
}

From source file:org.joget.apps.app.controller.ConsoleWebController.java

@SuppressWarnings({ "unchecked", "rawtypes" })
@RequestMapping("/console/profile")
public String profile(ModelMap map, HttpServletResponse response) throws IOException {
    User user = userDao.getUser(workflowUserManager.getCurrentUsername());

    if (user == null) {
        response.sendError(HttpServletResponse.SC_NOT_FOUND);
        return null;
    }//from ww w  .  j  a v a  2s  .  com

    map.addAttribute("user", user);
    map.addAttribute("timezones", TimeZoneUtil.getList());

    String enableUserLocale = SetupManager.getSettingValue("enableUserLocale");
    Map<String, String> localeStringList = new TreeMap<String, String>();
    if (enableUserLocale != null && enableUserLocale.equalsIgnoreCase("true")) {
        String userLocale = SetupManager.getSettingValue("userLocale");
        Collection<String> locales = new HashSet();
        locales.addAll(Arrays.asList(userLocale.split(",")));

        Locale[] localeList = Locale.getAvailableLocales();
        for (int x = 0; x < localeList.length; x++) {
            String code = localeList[x].toString();
            if (locales.contains(code)) {
                localeStringList.put(code,
                        code + " - " + localeList[x].getDisplayName(LocaleContextHolder.getLocale()));
            }
        }
    }

    UserSecurity us = DirectoryUtil.getUserSecurity();
    if (us != null) {
        map.addAttribute("policies", us.passwordPolicies());
        map.addAttribute("userProfileFooter", us.getUserProfileFooter(user));
    } else {
        map.addAttribute("policies", "");
        map.addAttribute("userProfileFooter", "");
    }

    map.addAttribute("enableUserLocale", enableUserLocale);
    map.addAttribute("localeStringList", localeStringList);

    return "console/profile";
}

From source file:org.pentaho.big.data.kettle.plugins.hdfs.trans.HadoopFileInputDialog.java

protected void setLocales() {
    Locale[] locale = Locale.getAvailableLocales();
    dateLocale = new String[locale.length];
    for (int i = 0; i < locale.length; i++) {
        dateLocale[i] = locale[i].toString();
    }//from w  w w.  j  a va 2 s.  com
    if (dateLocale != null) {
        wDateLocale.setItems(dateLocale);
    }
}