Example usage for java.util LinkedList subList

List of usage examples for java.util LinkedList subList

Introduction

In this page you can find the example usage for java.util LinkedList subList.

Prototype

List<E> subList(int fromIndex, int toIndex);

Source Link

Document

Returns a view of the portion of this list between the specified fromIndex , inclusive, and toIndex , exclusive.

Usage

From source file:Main.java

public static void main(String[] args) {
    LinkedList<String> lList = new LinkedList<String>();

    lList.add("1");
    lList.add("2");
    lList.add("3");
    lList.add("4");
    lList.add("5");

    System.out.println(lList);/*from  w ww .  j  av a 2  s .  c  o  m*/
    lList.subList(2, 5).clear();
    System.out.println(lList);
}

From source file:Main.java

public static void main(String[] args) {
    LinkedList<String> lList = new LinkedList<String>();

    lList.add("1");
    lList.add("2");
    lList.add("3");
    lList.add("4");
    lList.add("5");

    System.out.println(lList);/* www. j  ava2s. com*/
    List lst = lList.subList(1, 4);
    System.out.println(lst);

    lst.remove(2);
    System.out.println(lst);
    System.out.println(lList);
}

From source file:acromusashi.stream.component.rabbitmq.AbstractContextBuilder.java

/**
 * ????????????RabbitMQ??/* w w  w .j  ava 2  s . c  o  m*/
 * 
 * @param contextList ?
 * @return RabbitMQ
 * @throws RabbitmqCommunicateException ??????????
 */
protected Map<String, List<String>> initProcessLists(List<RabbitmqClusterContext> contextList)
        throws RabbitmqCommunicateException {
    if (this.contextMap == null) {
        this.contextMap = initContextMap(contextList);
    }

    Map<String, List<String>> processLists = new HashMap<String, List<String>>();

    LinkedList<String> processList = null;
    String connectionProcess = null;
    int processIndex = 0;

    for (String queueName : this.contextMap.keySet()) {
        //???????
        RabbitmqClusterContext context = this.contextMap.get(queueName);
        processList = new LinkedList<String>(context.getMqProcessList());

        //RabbitMQ????????
        //??(0)
        processIndex = 0;
        connectionProcess = context.getConnectionProcessMap().get(getClientId(queueName));
        if (connectionProcess != null) {
            processIndex = processList.indexOf(connectionProcess);
        }

        //RabbitMQ?????RabbitMQ??
        LinkedList<String> backwardProcesses = new LinkedList<String>(processList.subList(0, processIndex));
        LinkedList<String> forwardProcesses = new LinkedList<String>(
                processList.subList(processIndex, processList.size()));
        forwardProcesses.addAll(backwardProcesses);
        processList = new LinkedList<String>(forwardProcesses);

        processLists.put(queueName, processList);
    }

    return processLists;
}

From source file:elh.eus.absa.Features.java

/**
 *  Extract word form n-grams up to a certain length from a kaf/naf file
 * /*from  w  ww . j  a  va 2 s.  co m*/
 * @param int length : which 'n' use for 'n-grams' 
 * @param KAFDocument kafDoc : postagged kaf document to extract ngrams from.
 * @param boolean save : safe ngrams to file or not. 
 * @return TreeSet<String> return word form ngrams of length length
 */
private int extractWfNgramsKAF(int length, KAFDocument kafDoc, boolean save) {
    //System.err.println("ngram extraction: _"+length+"_");
    if (length == 0) {
        return 0;
    }

    for (List<WF> sent : kafDoc.getSentences()) {
        LinkedList<String> ngrams = new LinkedList<String>();
        for (WF wf : sent) {
            if (ngrams.size() >= length) {
                ngrams.removeFirst();
            }
            ngrams.add(wf.getForm());
            //ngrams.add(normalize(wf.getForm(), params.getProperty("normalization", "none")));

            // add ngrams to the feature list
            for (int i = 0; i < ngrams.size(); i++) {
                String ng = featureFromArray(ngrams.subList(0, i + 1), "wf");
                addNgram("wf", ng);
            }
        }
        //empty ngram list and add remaining ngrams to the feature list
        while (!ngrams.isEmpty()) {
            String ng = featureFromArray(ngrams, "wf");
            addNgram("wf", ng);
            ngrams.removeFirst();
        }
    }
    return 1;
}

From source file:elh.eus.absa.Features.java

/**
 *     POS ngram extraction from a kaf document
 * //from  w w  w .  j av  a2  s .co m
 * @param int length : which 'n' use for 'n-grams' 
 * @param KAFDocument kafDoc : postagged kaf document to extract ngrams from.
 * @param boolean save : safe ngrams to file or not. 
 * @return TreeSet<String> return lemma ngrams of length length
 */
public int extractPosNgrams(int length, KAFDocument kafDoc, List<String> discardPos, boolean save) {
    //System.err.println("POS ngram extraction: _"+length+"_");
    if (length == 0) {
        return 0;
    }

    int sentNum = kafDoc.getSentences().size();
    for (int s = 0; s < sentNum; s++) {
        LinkedList<String> ngrams = new LinkedList<String>();
        for (Term term : kafDoc.getTermsBySent(s)) {
            if (ngrams.size() >= length) {
                ngrams.removeFirst();
            }

            if (!discardPos.contains(term.getPos())) {
                ngrams.add(term.getPos());
            }
            // add ngrams to the feature list
            for (int i = 0; i < ngrams.size(); i++) {
                String ng = featureFromArray(ngrams.subList(0, i + 1), "pos");
                addNgram("pos", ng);
            }
        }
        //empty ngram list and add remaining ngrams to the feature list
        while (!ngrams.isEmpty()) {
            String ng = featureFromArray(ngrams, "pos");
            addNgram("pos", ng);
            ngrams.removeFirst();
        }
    }
    return 1;
}

From source file:elh.eus.absa.Features.java

/**
 * Given a window check if the ngrams inside (all of them) are present in the feature set, and if so, 
 * update the feature vector accordingly
 * /*w w w . jav  a 2  s .c o m*/
 * @param ngrams
 * @param prefix String : possible prefix used to differentiate ngram groups in the attribute set.
 * @param double[] fVector : feature vector for the corresponding instance
 * @param int tokens : number of tokens in the sentence (in case we want to add not a frequency value
 * but a normalized value)
 * 
 */
private void checkNgramFeatures(LinkedList<String> ngrams, double[] fVector, String prefix, int tokens,
        boolean empty) {
    //System.err.println("features::checkNgramFeatures ->"+Arrays.asList(ngrams).toString());

    // if empty is active means that we are checking the end of the sentence and 
    // the ngram list must be emptied 
    if (empty) {
        while (!ngrams.isEmpty()) {
            String ng = featureFromArray(ngrams, prefix);
            //add occurrence to feature vector (the functions checks if the given ngram feature exists).
            addNumericToFeatureVector(ng, fVector, tokens); //tokNum

            ngrams.removeFirst();
        }
    }
    // if empty is false search for all ngrams in the window
    else {
        // add ngrams to the feature list
        for (int i = 0; i < ngrams.size(); i++) {
            String ng = featureFromArray(ngrams.subList(0, i + 1), prefix);
            // add occurrence to feature vector (the functions checks if the given ngram feature exists). 
            addNumericToFeatureVector(ng, fVector, tokens);//tokNum
        }
    }
}

From source file:elh.eus.absa.Features.java

/**
 *     Lemma ngram extraction from a kaf document
 * //from  w  w  w . j a v a 2 s. com
 * @param int length : which 'n' use for 'n-grams' 
 * @param KAFDocument kafDoc : postagged kaf document to extract ngrams from.
 * @param boolean save : safe ngrams to file or not. 
 * @return TreeSet<String> return lemma ngrams of length length
 */
private int extractLemmaNgrams(int length, KAFDocument kafDoc, List<String> discardPos, boolean save) {
    //System.err.println("lemma ngram extraction: _"+length+"_");
    if (length == 0) {
        return 0;
    }

    int sentNum = kafDoc.getSentences().size();
    for (int s = 0; s < sentNum; s++) {
        LinkedList<String> ngrams = new LinkedList<String>();
        for (Term term : kafDoc.getTermsBySent(s)) {
            if (ngrams.size() >= length) {
                ngrams.removeFirst();
            }

            //if no alphanumeric char is present discard the element as invalid ngram. Or if it has a PoS tag that
            //should be discarded              
            String lCurrent = term.getLemma();
            if ((!discardPos.contains(term.getPos()))
                    && (!lCurrent.matches("[^\\p{L}\\p{M}\\p{Nd}\\p{InEmoticons}]+"))
                    && (lCurrent.length() > 1)) {
                ngrams.add(lCurrent);
                //ngrams.add(normalize(term.getLemma(), params.getProperty("normalization", "none")));
            }
            //certain punctuation marks and emoticons are allowed as lemmas
            else if ((lCurrent.length() <= 2) && (lCurrent.matches("[,;.?!]"))) {
                ngrams.add(lCurrent);
            }

            // add ngrams to the feature list
            for (int i = 0; i < ngrams.size(); i++) {
                String ng = featureFromArray(ngrams.subList(0, i + 1), "lemma");
                addNgram("lemma", ng);
            }
        }
        //empty ngram list and add remaining ngrams to the feature list
        while (!ngrams.isEmpty()) {
            String ng = featureFromArray(ngrams, "lemma");
            addNgram("lemma", ng);
            ngrams.removeFirst();
        }
    }
    return 1;
}

From source file:elh.eus.absa.Features.java

/**
 * Check if the given word/lemma/ngram exists both in the ngram list and in the general or domain polarity
 * lexicons, and if yes updates the corresponding attributes in the feature vector
 * /*from w w  w .ja  v  a  2s .  c o  m*/
 * @param ngrams
 * @param fVector
 * @param prefix
 * @param toknumNgram
 * @param toknumPol
 * @param empty
 * @param ngram
 */
private void checkNgramsAndPolarLexicons(LinkedList<String> ngrams, double[] fVector, String prefix,
        int toknumNgram, int toknumPol, boolean empty, boolean ngram) {
    //System.err.println(Arrays.asList(ngrams).toString());
    // if empty is active means that we are checking the end of the sentence and 
    // the ngram list must be emptied 
    if (empty) {
        // add ngrams to the feature vector
        while (!ngrams.isEmpty()) {
            String ng = featureFromArray(ngrams, prefix);
            //if the current lemma is in the ngram list activate the feature in the vector
            if (params.containsKey("lemmaNgrams")
                    && (!params.getProperty("lemmaNgrams").equalsIgnoreCase("0"))) {
                // add occurrence to feature vector (the functions checks if the given ngram feature exists).
                addNumericToFeatureVector(ng, fVector, toknumNgram); //tokNum
            }

            ng = featureFromArray(ngrams, "");
            if (params.containsKey("polarLexiconGeneral") || params.containsKey("polarLexiconDomain")) {
                checkPolarityLexicons(ng, fVector, toknumPol, ngram);
            } //end polarity ngram checker

            ngrams.removeFirst();

        } //end ngram checking
    }
    // if empty is false search for all ngrams in the window
    else {
        // add ngrams to the feature vector
        for (int i = 0; i < ngrams.size(); i++) {
            String ng = featureFromArray(ngrams.subList(0, i + 1), prefix);
            //if the current lemma is in the ngram list activate the feature in the vector
            if (params.containsKey("lemmaNgrams")
                    && (!params.getProperty("lemmaNgrams").equalsIgnoreCase("0"))) {
                // add occurrence to feature vector (the functions checks if the given ngram feature exists).
                addNumericToFeatureVector(ng, fVector, toknumNgram); //tokNum                                    
            }

            ng = featureFromArray(ngrams.subList(0, i + 1), "");
            if (params.containsKey("polarLexiconGeneral") || params.containsKey("polarLexiconDomain")) {
                checkPolarityLexicons(ng, fVector, toknumPol, ngram);
            } //end polarity ngram checker
        } //end ngram checking                          
    }
}

From source file:edu.harvard.iq.dvn.ingest.dsb.impl.DvnRJobRequest.java

public List<List<String>> getValueRange(String tkn) {

    dbgLog.fine("received token=" + tkn);
    String step0 = StringUtils.strip(tkn);
    dbgLog.fine("step0=" + step0);

    // string into tokens
    String[] step1raw = step0.split(",");

    dbgLog.fine("step1raw=" + StringUtils.join(step1raw, ","));

    // remove meaningless commas if exist

    List<String> step1 = new ArrayList<String>();

    for (String el : step1raw) {
        if (!el.equals("")) {
            step1.add(el);//from  w  w  w .  j a va  2s  .c  o  m
        }
    }

    dbgLog.fine("step1=" + StringUtils.join(step1, ","));

    List<List<String>> rangeData = new ArrayList<List<String>>();

    // for each token, check the range operator

    for (int i = 0; i < step1.size(); i++) {
        LinkedList<String> tmp = new LinkedList<String>(
                Arrays.asList(String2StringArray(String.valueOf(step1.get(i)))));

        Map<String, String> token = new HashMap<String, String>();
        boolean rangeMode = false;

        // .get(i) below CAN'T possibly be right (??) -- replacing
        // it with .get(0). -- L.A., v3.6
        //if ((!tmp.get(i).equals("[")) && (!tmp.get(i).equals("("))){
        if ((!tmp.get(0).equals("[")) && (!tmp.get(0).equals("("))) {
            // no LHS range operator
            // assume [
            token.put("start", "3");
        } else if (tmp.get(0).equals("[")) {
            rangeMode = true;
            token.put("start", "3");
            tmp.removeFirst();
        } else if (tmp.get(0).equals("(")) {
            rangeMode = true;
            token.put("start", "5");
            tmp.removeFirst();
        }

        if ((!tmp.getLast().equals("]")) && (!tmp.getLast().equals(")"))) {
            // no RHS range operator
            // assume ]
            token.put("end", "4");
        } else if (tmp.getLast().equals("]")) {
            rangeMode = true;
            tmp.removeLast();
            token.put("end", "4");
        } else if (tmp.getLast().equals(")")) {
            rangeMode = true;
            tmp.removeLast();
            token.put("end", "6");
        }

        // I'm now enforcing the following rules:
        // the "rangeMode" above - a range must have at least one range
        // operator, a square bracket or parenthesis, on one end, at
        // least; i.e., either on the left, or on the right. 
        // If there are no range operators, even if there are dashes
        // inside the token, they are not going to be interpreted as 
        // range definitions.  
        // still TODO: (possibly?) add more validation; figure out how 
        // to encode *date* ranges ("-" is not optimal, since dates already
        // contain dashes... although, since dates are (supposed to be) 
        // normalized it should still be possible to parse it unambiguously)
        //          -- L.A., v3.6

        if (rangeMode) {
            // after these steps, the string does not have range operators;
            // i.e., '-9--3', '--9', '-9-','-9', '-1-1', '1', '3-4', '6-'

            if ((tmp.get(0).equals("!")) && (tmp.get(1).equals("="))) {
                // != negation string is found
                token.put("start", "2");
                token.put("end", "");
                token.put("v1", StringUtils.join(tmp.subList(2, tmp.size()), ""));
                token.put("v2", "");
                dbgLog.fine("value=" + StringUtils.join(tmp.subList(2, tmp.size()), ","));

            } else if ((tmp.get(0).equals("-")) && (tmp.get(1).equals("-"))) {
                // type 2: --9
                token.put("v1", "");
                tmp.removeFirst();
                token.put("v2", StringUtils.join(tmp, ""));
            } else if ((tmp.get(0).equals("-")) && (tmp.getLast().equals("-"))) {
                // type 3: -9-
                token.put("v2", "");
                tmp.removeLast();
                token.put("v1", StringUtils.join(tmp, ""));
            } else if ((!tmp.get(0).equals("-")) && (tmp.getLast().equals("-"))) {
                // type 8: 6-
                token.put("v2", "");
                tmp.removeLast();
                token.put("v1", StringUtils.join(tmp, ""));
            } else {
                int count = 0;
                List<Integer> index = new ArrayList<Integer>();
                for (int j = 0; j < tmp.size(); j++) {
                    if (tmp.get(j).equals("-")) {
                        count++;
                        index.add(j);
                    }
                }

                if (count >= 2) {
                    // range type
                    // divide the second hyphen
                    // types 1 and 5: -9--3, -1-1
                    // token.put("v1", StringUtils.join(tmp[0..($index[1]-1)],"" ));
                    token.put("v2", StringUtils.join(tmp.subList((index.get(1) + 1), tmp.size()), ""));

                } else if (count == 1) {
                    if (tmp.get(0).equals("-")) {
                        // point negative type
                        // type 4: -9 or -inf,9
                        // do nothing
                        if ((token.get("start").equals("5"))
                                && ((token.get("end").equals("6")) || (token.get("end").equals("4")))) {
                            token.put("v1", "");
                            tmp.removeFirst();
                            token.put("v2", StringUtils.join(tmp, ""));
                        } else {
                            token.put("v1", StringUtils.join(tmp, ""));
                            token.put("v2", StringUtils.join(tmp, ""));
                        }
                    } else {
                        // type 7: 3-4
                        // both positive value and range type
                        String[] vset = (StringUtils.join(tmp, "")).split("-");
                        token.put("v1", vset[0]);
                        token.put("v2", vset[1]);
                    }

                } else {
                    // type 6: 1
                    token.put("v1", StringUtils.join(tmp, ""));
                    token.put("v2", StringUtils.join(tmp, ""));
                }
            }
        } else {
            // assume that this is NOT a range; treat the entire sequence 
            // of symbols as a single token:
            // type 6: 1
            token.put("v1", StringUtils.join(tmp, ""));
            token.put("v2", StringUtils.join(tmp, ""));
        }

        dbgLog.fine(i + "-th result=" + token.get("start") + "|" + token.get("v1") + "|" + token.get("end")
                + "|" + token.get("v2"));

        List<String> rangeSet = new ArrayList<String>();
        rangeSet.add(token.get("start"));
        rangeSet.add(token.get("v1"));
        rangeSet.add(token.get("end"));
        rangeSet.add(token.get("v2"));
        rangeData.add(rangeSet);

    }

    dbgLog.fine("rangeData:\n" + rangeData);
    return rangeData;
}

From source file:elh.eus.absa.Features.java

/**
 *  Extract n-grams up to a certain length from an Conll tabulated format corpus.
 * //from   w w w.ja v  a2 s .  c  o m
 * @param int length : which 'n' use for 'n-grams' 
 * @param string type (wf|lemma|pos): what type of ngrams we want to extract.
 * @param boolean save : safe ngrams to file or not. 
 * @return TreeSet<String> return word form ngrams of length length
 */
private int extractNgramsTAB(int length, String type, List<String> discardPos, boolean save) {
    //System.err.println("ngram extraction Tab: _"+length+"_"+type);
    if (length == 0) {
        return 0;
    }

    for (String sent : corpus.getSentences().keySet()) {
        //System.err.println("ngram extraction, corpus sentences: "+corpus.getSentences().get(sent));           
        String[] tokens = corpus.getSentences().get(sent).split("\n");
        LinkedList<String> ngrams = new LinkedList<String>();
        for (String row : tokens) {
            String ngram = "";
            String[] fields = row.split("\t");
            String pos = "";
            switch (type) {
            case "wf":
                ngram = fields[0];
                break;
            case "lemma":
                if (fields.length > 1) {
                    ngram = fields[1];
                }
                if (fields.length > 2) {
                    pos = fields[2];
                }
                break;
            case "pos":
                if (fields.length > 2) {
                    ngram = fields[2];
                    switch (ngram.length()) {
                    case 0:
                        ngram = "-";
                        break;
                    case 1:
                        ngram = ngram.substring(0, 1);
                        break;
                    default:
                        ngram = ngram.substring(0, 2);
                        break;
                    }
                }
            }

            //if the is a blank line we assume sentence has ended and we empty and re-initialize the n-gram list 
            if (ngram.equals("")) {
                //empty n-gram list and add remaining n-grams to the feature list
                while (!ngrams.isEmpty()) {
                    String ng = featureFromArray(ngrams, type);
                    addNgram(type, ng);
                    ngrams.removeFirst();
                }
                continue;
            }

            if (ngrams.size() >= length) {
                ngrams.removeFirst();
            }

            //if no alphanumeric char is present discard the element as invalid ngram. Or if it has a PoS tag that
            //should be discarded
            String lCurrent = ngram;
            if ((!discardPos.contains(pos)) && (!ngram.matches("^[^\\p{L}\\p{M}\\p{Nd}\\p{InEmoticons}]+$"))
                    && (lCurrent.length() > 1)) {
                //standarize numeric values to NUMNUM lemma value
                //ngram.replaceFirst("^[0-9]$", "NUMNUM");
                if (!type.equalsIgnoreCase("pos")) {
                    ngrams.add(normalize(ngram, params.getProperty("normalization", "none")));
                } else {
                    ngrams.add(ngram);
                }
            }
            //certain punctuation marks are allowed as lemmas
            else if ((lCurrent.length() < 2) && (lCurrent.matches("[,;.?!]"))) {
                ngrams.add(lCurrent);
            }

            // add ngrams to the feature list
            for (int i = 0; i < ngrams.size(); i++) {
                String ng = featureFromArray(ngrams.subList(0, i + 1), type);
                addNgram(type, ng);
            }
        }
        //empty ngram list and add remaining ngrams to the feature list
        while (!ngrams.isEmpty()) {
            String ng = featureFromArray(ngrams, type);
            addNgram(type, ng);
            ngrams.removeFirst();
        }
    }
    return 1;
}